aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
blob: 3a084ce8ff5e16a9e818c5f4b2346812b81a5784 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_BREADCRUMBS_TYPES__
#define __INTEL_BREADCRUMBS_TYPES__

#include <linux/irq_work.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/types.h>

/*
 * Rather than have every client wait upon all user interrupts,
 * with the herd waking after every interrupt and each doing the
 * heavyweight seqno dance, we delegate the task (of being the
 * bottom-half of the user interrupt) to the first client. After
 * every interrupt, we wake up one client, who does the heavyweight
 * coherent seqno read and either goes back to sleep (if incomplete),
 * or wakes up all the completed clients in parallel, before then
 * transferring the bottom-half status to the next client in the queue.
 *
 * Compared to walking the entire list of waiters in a single dedicated
 * bottom-half, we reduce the latency of the first waiter by avoiding
 * a context switch, but incur additional coherent seqno reads when
 * following the chain of request breadcrumbs. Since it is most likely
 * that we have a single client waiting on each seqno, then reducing
 * the overhead of waking that client is much preferred.
 */
struct intel_breadcrumbs {
	atomic_t active;

	spinlock_t signalers_lock; /* protects the list of signalers */
	struct list_head signalers;
	struct llist_head signaled_requests;
	atomic_t signaler_active;

	spinlock_t irq_lock; /* protects the interrupt from hardirq context */
	struct irq_work irq_work; /* for use from inside irq_lock */
	unsigned int irq_enabled;
	bool irq_armed;

	/* Not all breadcrumbs are attached to physical HW */
	struct intel_engine_cs *irq_engine;
};

#endif /* __INTEL_BREADCRUMBS_TYPES__ */