aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/intel_context_types.h
blob: aceaac28a33eb01041a230d431e23e769961eb94 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__

#include <linux/average.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>

#include "i915_active_types.h"
#include "i915_sw_fence.h"
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"

#include "uc/intel_guc_fwif.h"

#define CONTEXT_REDZONE POISON_INUSE
DECLARE_EWMA(runtime, 3, 8);

struct i915_gem_context;
struct i915_gem_ww_ctx;
struct i915_vma;
struct intel_breadcrumbs;
struct intel_context;
struct intel_ring;

struct intel_context_ops {
	unsigned long flags;
#define COPS_HAS_INFLIGHT_BIT 0
#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)

#define COPS_RUNTIME_CYCLES_BIT 1
#define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)

	int (*alloc)(struct intel_context *ce);

	void (*revoke)(struct intel_context *ce, struct i915_request *rq,
		       unsigned int preempt_timeout_ms);

	void (*close)(struct intel_context *ce);

	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
	int (*pin)(struct intel_context *ce, void *vaddr);
	void (*unpin)(struct intel_context *ce);
	void (*post_unpin)(struct intel_context *ce);

	void (*cancel_request)(struct intel_context *ce,
			       struct i915_request *rq);

	void (*enter)(struct intel_context *ce);
	void (*exit)(struct intel_context *ce);

	void (*sched_disable)(struct intel_context *ce);

	void (*update_stats)(struct intel_context *ce);

	void (*reset)(struct intel_context *ce);
	void (*destroy)(struct kref *kref);

	/* virtual/parallel engine/context interface */
	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
						unsigned int count,
						unsigned long flags);
	struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
						 unsigned int num_siblings,
						 unsigned int width);
	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
					       unsigned int sibling);
};

struct intel_context {
	/*
	 * Note: Some fields may be accessed under RCU.
	 *
	 * Unless otherwise noted a field can safely be assumed to be protected
	 * by strong reference counting.
	 */
	union {
		struct kref ref; /* no kref_get_unless_zero()! */
		struct rcu_head rcu;
	};

	struct intel_engine_cs *engine;
	struct intel_engine_cs *inflight;
#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
#define intel_context_inflight(ce) \
	__intel_context_inflight(READ_ONCE((ce)->inflight))
#define intel_context_inflight_count(ce) \
	__intel_context_inflight_count(READ_ONCE((ce)->inflight))

	struct i915_address_space *vm;
	struct i915_gem_context __rcu *gem_context;

	/*
	 * @signal_lock protects the list of requests that need signaling,
	 * @signals. While there are any requests that need signaling,
	 * we add the context to the breadcrumbs worker, and remove it
	 * upon completion/cancellation of the last request.
	 */
	struct list_head signal_link; /* Accessed under RCU */
	struct list_head signals; /* Guarded by signal_lock */
	spinlock_t signal_lock; /* protects signals, the list of requests */

	struct i915_vma *state;
	u32 ring_size;
	struct intel_ring *ring;
	struct intel_timeline *timeline;

	unsigned long flags;
#define CONTEXT_BARRIER_BIT		0
#define CONTEXT_ALLOC_BIT		1
#define CONTEXT_INIT_BIT		2
#define CONTEXT_VALID_BIT		3
#define CONTEXT_CLOSED_BIT		4
#define CONTEXT_USE_SEMAPHORES		5
#define CONTEXT_BANNED			6
#define CONTEXT_FORCE_SINGLE_SUBMISSION	7
#define CONTEXT_NOPREEMPT		8
#define CONTEXT_LRCA_DIRTY		9
#define CONTEXT_GUC_INIT		10
#define CONTEXT_PERMA_PIN		11
#define CONTEXT_IS_PARKING		12
#define CONTEXT_EXITING			13

	struct {
		u64 timeout_us;
	} watchdog;

	u32 *lrc_reg_state;
	union {
		struct {
			u32 lrca;
			u32 ccid;
		};
		u64 desc;
	} lrc;
	u32 tag; /* cookie passed to HW to track this context on submission */

	/** stats: Context GPU engine busyness tracking. */
	struct intel_context_stats {
		u64 active;

		/* Time on GPU as tracked by the hw. */
		struct {
			struct ewma_runtime avg;
			u64 total;
			u32 last;
			I915_SELFTEST_DECLARE(u32 num_underflow);
			I915_SELFTEST_DECLARE(u32 max_underflow);
		} runtime;
	} stats;

	unsigned int active_count; /* protected by timeline->mutex */

	atomic_t pin_count;
	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */

	/**
	 * active: Active tracker for the rq activity (inc. external) on this
	 * intel_context object.
	 */
	struct i915_active active;

	const struct intel_context_ops *ops;

	/** sseu: Control eu/slice partitioning */
	struct intel_sseu sseu;

	/**
	 * pinned_contexts_link: List link for the engine's pinned contexts.
	 * This is only used if this is a perma-pinned kernel context and
	 * the list is assumed to only be manipulated during driver load
	 * or unload time so no mutex protection currently.
	 */
	struct list_head pinned_contexts_link;

	u8 wa_bb_page; /* if set, page num reserved for context workarounds */

	struct {
		/** @lock: protects everything in guc_state */
		spinlock_t lock;
		/**
		 * @sched_state: scheduling state of this context using GuC
		 * submission
		 */
		u32 sched_state;
		/*
		 * @fences: maintains a list of requests that are currently
		 * being fenced until a GuC operation completes
		 */
		struct list_head fences;
		/**
		 * @blocked: fence used to signal when the blocking of a
		 * context's submissions is complete.
		 */
		struct i915_sw_fence blocked;
		/** @requests: list of active requests on this context */
		struct list_head requests;
		/** @prio: the context's current guc priority */
		u8 prio;
		/**
		 * @prio_count: a counter of the number requests in flight in
		 * each priority bucket
		 */
		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
		/**
		 * @sched_disable_delay_work: worker to disable scheduling on this
		 * context
		 */
		struct delayed_work sched_disable_delay_work;
	} guc_state;

	struct {
		/**
		 * @id: handle which is used to uniquely identify this context
		 * with the GuC, protected by guc->submission_state.lock
		 */
		u16 id;
		/**
		 * @ref: the number of references to the guc_id, when
		 * transitioning in and out of zero protected by
		 * guc->submission_state.lock
		 */
		atomic_t ref;
		/**
		 * @link: in guc->guc_id_list when the guc_id has no refs but is
		 * still valid, protected by guc->submission_state.lock
		 */
		struct list_head link;
	} guc_id;

	/**
	 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
	 * list when context is pending to be destroyed (deregistered with the
	 * GuC), protected by guc->submission_state.lock
	 */
	struct list_head destroyed_link;

	/** @parallel: sub-structure for parallel submission members */
	struct {
		union {
			/**
			 * @child_list: parent's list of children
			 * contexts, no protection as immutable after context
			 * creation
			 */
			struct list_head child_list;
			/**
			 * @child_link: child's link into parent's list of
			 * children
			 */
			struct list_head child_link;
		};
		/** @parent: pointer to parent if child */
		struct intel_context *parent;
		/**
		 * @last_rq: last request submitted on a parallel context, used
		 * to insert submit fences between requests in the parallel
		 * context
		 */
		struct i915_request *last_rq;
		/**
		 * @fence_context: fence context composite fence when doing
		 * parallel submission
		 */
		u64 fence_context;
		/**
		 * @seqno: seqno for composite fence when doing parallel
		 * submission
		 */
		u32 seqno;
		/** @number_children: number of children if parent */
		u8 number_children;
		/** @child_index: index into child_list if child */
		u8 child_index;
		/** @guc: GuC specific members for parallel submission */
		struct {
			/** @wqi_head: cached head pointer in work queue */
			u16 wqi_head;
			/** @wqi_tail: cached tail pointer in work queue */
			u16 wqi_tail;
			/** @wq_head: pointer to the actual head in work queue */
			u32 *wq_head;
			/** @wq_tail: pointer to the actual head in work queue */
			u32 *wq_tail;
			/** @wq_status: pointer to the status in work queue */
			u32 *wq_status;

			/**
			 * @parent_page: page in context state (ce->state) used
			 * by parent for work queue, process descriptor
			 */
			u8 parent_page;
		} guc;
	} parallel;

#ifdef CONFIG_DRM_I915_SELFTEST
	/**
	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
	 */
	bool drop_schedule_enable;

	/**
	 * @drop_schedule_disable: Force drop of schedule disable G2H for
	 * selftest
	 */
	bool drop_schedule_disable;

	/**
	 * @drop_deregister: Force drop of deregister G2H for selftest
	 */
	bool drop_deregister;
#endif
};

#endif /* __INTEL_CONTEXT_TYPES__ */