intel_engine_types.h 16.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_ENGINE_TYPES__
#define __INTEL_ENGINE_TYPES__

#include <linux/hashtable.h>
#include <linux/irq_work.h>
12
#include <linux/kref.h>
13
#include <linux/list.h>
14
#include <linux/llist.h>
15
#include <linux/rbtree.h>
16
#include <linux/timer.h>
17
#include <linux/types.h>
18
#include <linux/workqueue.h>
19

20
#include "i915_gem.h"
21
#include "i915_pmu.h"
22
#include "i915_priolist_types.h"
23
#include "i915_selftest.h"
24
#include "intel_engine_pool_types.h"
25
#include "intel_sseu.h"
26
#include "intel_timeline_types.h"
27
#include "intel_wakeref.h"
28
29
#include "intel_workarounds_types.h"

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/* Legacy HW Engine ID */

#define RCS0_HW		0
#define VCS0_HW		1
#define BCS0_HW		2
#define VECS0_HW	3
#define VCS1_HW		4
#define VCS2_HW		6
#define VCS3_HW		7
#define VECS1_HW	12

/* Gen11+ HW Engine class + instance */
#define RENDER_CLASS		0
#define VIDEO_DECODE_CLASS	1
#define VIDEO_ENHANCEMENT_CLASS	2
#define COPY_ENGINE_CLASS	3
#define OTHER_CLASS		4
#define MAX_ENGINE_CLASS	4
#define MAX_ENGINE_INSTANCE	3

50
51
52
53
54
#define I915_MAX_SLICES	3
#define I915_MAX_SUBSLICES 8

#define I915_CMD_HASH_ORDER 9

55
struct dma_fence;
56
struct drm_i915_gem_object;
57
58
59
60
struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
61
struct intel_gt;
62
struct intel_ring;
63
struct intel_uncore;
64

65
66
67
typedef u8 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
struct intel_hw_status_page {
	struct i915_vma *vma;
	u32 *addr;
};

struct intel_instdone {
	u32 instdone;
	/* The following exist only in the RCS engine */
	u32 slice_common;
	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};

/*
 * we use a single page to load ctx workarounds so all of these
 * values are referred in terms of dwords
 *
 * struct i915_wa_ctx_bb:
 *  offset: specifies batch starting position, also helpful in case
 *    if we want to have multiple batches at different offsets based on
 *    some criteria. It is not a requirement at the moment but provides
 *    an option for future use.
 *  size: size of the batch in DWORDS
 */
struct i915_ctx_workarounds {
	struct i915_wa_ctx_bb {
		u32 offset;
		u32 size;
	} indirect_ctx, per_ctx;
	struct i915_vma *vma;
};

#define I915_MAX_VCS	4
#define I915_MAX_VECS	2

/*
 * Engine IDs definitions.
 * Keep instances of the same type engine together.
 */
enum intel_engine_id {
	RCS0 = 0,
	BCS0,
	VCS0,
	VCS1,
	VCS2,
	VCS3,
#define _VCS(n) (VCS0 + (n))
	VECS0,
116
	VECS1,
117
#define _VECS(n) (VECS0 + (n))
118
	I915_NUM_ENGINES
119
#define INVALID_ENGINE ((enum intel_engine_id)-1)
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
};

struct st_preempt_hang {
	struct completion completion;
	unsigned int count;
	bool inject_hang;
};

/**
 * struct intel_engine_execlists - execlist submission queue and port state
 *
 * The struct intel_engine_execlists represents the combined logical state of
 * driver and the hardware state for execlist mode of submission.
 */
struct intel_engine_execlists {
	/**
	 * @tasklet: softirq tasklet for bottom handler
	 */
	struct tasklet_struct tasklet;

140
141
142
143
144
	/**
	 * @timer: kick the current context if its timeslice expires
	 */
	struct timer_list timer;

145
146
147
148
149
	/**
	 * @preempt: reset the current context if it fails to give way
	 */
	struct timer_list preempt;

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
	/**
	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
	 */
	struct i915_priolist default_priolist;

	/**
	 * @no_priolist: priority lists disabled
	 */
	bool no_priolist;

	/**
	 * @submit_reg: gen-specific execlist submission register
	 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
	 * the ExecList Submission Queue Contents register array for Gen11+
	 */
	u32 __iomem *submit_reg;

	/**
	 * @ctrl_reg: the enhanced execlists control register, used to load the
	 * submit queue on the HW and to request preemptions to idle
	 */
	u32 __iomem *ctrl_reg;

173
174
175
176
177
#define EXECLIST_MAX_PORTS 2
	/**
	 * @active: the currently known context executing on HW
	 */
	struct i915_request * const *active;
178
	/**
179
	 * @inflight: the set of contexts submitted and acknowleged by HW
180
	 *
181
182
183
184
	 * The set of inflight contexts is managed by reading CS events
	 * from the HW. On a context-switch event (not preemption), we
	 * know the HW has transitioned from port0 to port1, and we
	 * advance our inflight/active tracking accordingly.
185
	 */
186
	struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
187
	/**
188
	 * @pending: the next set of contexts submitted to ELSP
189
	 *
190
191
192
	 * We store the array of contexts that we submit to HW (via ELSP) and
	 * promote them to the inflight array once HW has signaled the
	 * preemption or idle-to-active event.
193
	 */
194
	struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
195
196
197
198
199
200

	/**
	 * @port_mask: number of execlist ports - 1
	 */
	unsigned int port_mask;

201
202
203
204
205
206
207
208
209
210
	/**
	 * @switch_priority_hint: Second context priority.
	 *
	 * We submit multiple contexts to the HW simultaneously and would
	 * like to occasionally switch between them to emulate timeslicing.
	 * To know when timeslicing is suitable, we track the priority of
	 * the context submitted second.
	 */
	int switch_priority_hint;

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
	/**
	 * @queue_priority_hint: Highest pending priority.
	 *
	 * When we add requests into the queue, or adjust the priority of
	 * executing requests, we compute the maximum priority of those
	 * pending requests. We can then use this value to determine if
	 * we need to preempt the executing requests to service the queue.
	 * However, since the we may have recorded the priority of an inflight
	 * request we wanted to preempt but since completed, at the time of
	 * dequeuing the priority hint may no longer may match the highest
	 * available request priority.
	 */
	int queue_priority_hint;

	/**
	 * @queue: queue of requests, in priority lists
	 */
	struct rb_root_cached queue;
229
	struct rb_root_cached virtual;
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244

	/**
	 * @csb_write: control register for Context Switch buffer
	 *
	 * Note this register may be either mmio or HWSP shadow.
	 */
	u32 *csb_write;

	/**
	 * @csb_status: status array for Context Switch buffer
	 *
	 * Note these register may be either mmio or HWSP shadow.
	 */
	u32 *csb_status;

245
246
247
248
249
	/**
	 * @csb_size: context status buffer FIFO size
	 */
	u8 csb_size;

250
251
252
253
254
255
256
257
258
259
260
261
	/**
	 * @csb_head: context status buffer head
	 */
	u8 csb_head;

	I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};

#define INTEL_ENGINE_CS_MAX_NAME 8

struct intel_engine_cs {
	struct drm_i915_private *i915;
262
	struct intel_gt *gt;
263
	struct intel_uncore *uncore;
264
265
266
	char name[INTEL_ENGINE_CS_MAX_NAME];

	enum intel_engine_id id;
267
268
	enum intel_engine_id legacy_idx;

269
270
	unsigned int hw_id;
	unsigned int guc_id;
271

272
273
274
275
	intel_engine_mask_t mask;

	u8 class;
	u8 instance;
276

277
278
	u16 uabi_class;
	u16 uabi_instance;
279

Chris Wilson's avatar
Chris Wilson committed
280
	u32 uabi_capabilities;
281
282
283
	u32 context_size;
	u32 mmio_base;

Chris Wilson's avatar
Chris Wilson committed
284
285
	unsigned int context_tag;
#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
286

287
288
	struct rb_node uabi_node;

289
290
	struct intel_sseu sseu;

291
292
293
294
295
	struct {
		spinlock_t lock;
		struct list_head requests;
	} active;

296
	struct llist_head barrier_tasks;
297

298
299
	struct intel_context *kernel_context; /* pinned */

300
301
	intel_engine_mask_t saturated; /* submitting semaphores too late? */

302
303
304
305
306
	struct {
		struct delayed_work work;
		struct i915_request *systole;
	} heartbeat;

307
308
309
310
	unsigned long serial;

	unsigned long wakeref_serial;
	struct intel_wakeref wakeref;
311
312
313
	struct drm_i915_gem_object *default_state;
	void *pinned_default_state;

314
315
316
317
318
	struct {
		struct intel_ring *ring;
		struct intel_timeline *timeline;
	} legacy;

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
	/* Rather than have every client wait upon all user interrupts,
	 * with the herd waking after every interrupt and each doing the
	 * heavyweight seqno dance, we delegate the task (of being the
	 * bottom-half of the user interrupt) to the first client. After
	 * every interrupt, we wake up one client, who does the heavyweight
	 * coherent seqno read and either goes back to sleep (if incomplete),
	 * or wakes up all the completed clients in parallel, before then
	 * transferring the bottom-half status to the next client in the queue.
	 *
	 * Compared to walking the entire list of waiters in a single dedicated
	 * bottom-half, we reduce the latency of the first waiter by avoiding
	 * a context switch, but incur additional coherent seqno reads when
	 * following the chain of request breadcrumbs. Since it is most likely
	 * that we have a single client waiting on each seqno, then reducing
	 * the overhead of waking that client is much preferred.
	 */
	struct intel_breadcrumbs {
		spinlock_t irq_lock;
		struct list_head signalers;

		struct irq_work irq_work; /* for use from inside irq_lock */

		unsigned int irq_enabled;

		bool irq_armed;
	} breadcrumbs;

	struct intel_engine_pmu {
		/**
		 * @enable: Bitmask of enable sample events on this engine.
		 *
		 * Bits correspond to sample event types, for instance
		 * I915_SAMPLE_QUEUED is bit 0 etc.
		 */
		u32 enable;
		/**
		 * @enable_count: Reference count for the enabled samplers.
		 *
		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
		 */
		unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
		/**
		 * @sample: Counter values for sampling events.
		 *
		 * Our internal timer stores the current counters in this field.
		 *
		 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
		 */
		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
	} pmu;

	/*
	 * A pool of objects to use as shadow copies of client batch buffers
	 * when the command parser is enabled. Prevents the client from
	 * modifying the batch contents after software parsing.
	 */
375
	struct intel_engine_pool pool;
376
377
378
379
380
381
382
383
384
385
386
387

	struct intel_hw_status_page status_page;
	struct i915_ctx_workarounds wa_ctx;
	struct i915_wa_list ctx_wa_list;
	struct i915_wa_list wa_list;
	struct i915_wa_list whitelist;

	u32             irq_keep_mask; /* always keep these interrupts */
	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
	void		(*irq_enable)(struct intel_engine_cs *engine);
	void		(*irq_disable)(struct intel_engine_cs *engine);

388
	int		(*resume)(struct intel_engine_cs *engine);
389
390
391
392
393
394
395
396
397
398
399
400

	struct {
		void (*prepare)(struct intel_engine_cs *engine);
		void (*reset)(struct intel_engine_cs *engine, bool stalled);
		void (*finish)(struct intel_engine_cs *engine);
	} reset;

	void		(*park)(struct intel_engine_cs *engine);
	void		(*unpark)(struct intel_engine_cs *engine);

	void		(*set_default_submission)(struct intel_engine_cs *engine);

401
	const struct intel_context_ops *cops;
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

	int		(*request_alloc)(struct i915_request *rq);

	int		(*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE	BIT(0)
#define EMIT_FLUSH	BIT(1)
#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
	int		(*emit_bb_start)(struct i915_request *rq,
					 u64 offset, u32 length,
					 unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
						 u32 *cs);
	unsigned int	emit_fini_breadcrumb_dw;

	/* Pass the request to the hardware queue (e.g. directly into
	 * the legacy ringbuffer or to the end of an execlist).
	 *
	 * This is called from an atomic context with irqs disabled; must
	 * be irq safe.
	 */
	void		(*submit_request)(struct i915_request *rq);

427
428
429
430
431
432
433
	/*
	 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
	 * request down to the bonded pairs.
	 */
	void            (*bond_execute)(struct i915_request *rq,
					struct dma_fence *signal);

434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
	/*
	 * Call when the priority on a request has changed and it and its
	 * dependencies may need rescheduling. Note the request itself may
	 * not be ready to run!
	 */
	void		(*schedule)(struct i915_request *request,
				    const struct i915_sched_attr *attr);

	/*
	 * Cancel all requests on the hardware, or queued for execution.
	 * This should only cancel the ready requests that have been
	 * submitted to the engine (via the engine->submit_request callback).
	 * This is called when marking the device as wedged.
	 */
	void		(*cancel_requests)(struct intel_engine_cs *engine);

450
	void		(*destroy)(struct intel_engine_cs *engine);
451
452
453

	struct intel_engine_execlists execlists;

454
455
456
457
458
459
460
461
	/*
	 * Keep track of completed timelines on this engine for early
	 * retirement with the goal of quickly enabling powersaving as
	 * soon as the engine is idle.
	 */
	struct intel_timeline *retire;
	struct work_struct retire_work;

462
463
464
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;

465
#define I915_ENGINE_USING_CMD_PARSER BIT(0)
466
467
468
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
469
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
470
#define I915_ENGINE_IS_VIRTUAL       BIT(5)
471
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
472
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
	unsigned int flags;

	/*
	 * Table of commands the command parser needs to know about
	 * for this engine.
	 */
	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);

	/*
	 * Table of registers allowed in commands that read/write registers.
	 */
	const struct drm_i915_reg_table *reg_tables;
	int reg_table_count;

	/*
	 * Returns the bitmask for the length field of the specified command.
	 * Return 0 for an unrecognized/invalid command.
	 *
	 * If the command parser finds an entry for a command in the engine's
	 * cmd_tables, it gets the command's length based on the table entry.
	 * If not, it calls this function to determine the per-engine length
	 * field encoding for the command (i.e. different opcode ranges use
	 * certain bits to encode the command length in the header).
	 */
	u32 (*get_cmd_length_mask)(u32 cmd_header);

	struct {
		/**
		 * @lock: Lock protecting the below fields.
		 */
		seqlock_t lock;
		/**
		 * @enabled: Reference count indicating number of listeners.
		 */
		unsigned int enabled;
		/**
		 * @active: Number of contexts currently scheduled in.
		 */
		unsigned int active;
		/**
		 * @enabled_at: Timestamp when busy stats were enabled.
		 */
		ktime_t enabled_at;
		/**
		 * @start: Timestamp of the last idle to active transition.
		 *
		 * Idle is defined as active == 0, active is active > 0.
		 */
		ktime_t start;
		/**
		 * @total: Total time this engine was busy.
		 *
		 * Accumulated time not counting the most recent block in cases
		 * where engine is currently busy (active > 0).
		 */
		ktime_t total;
	} stats;
530
531

	struct {
532
		unsigned long heartbeat_interval_ms;
533
		unsigned long preempt_timeout_ms;
534
		unsigned long stop_timeout_ms;
535
		unsigned long timeslice_duration_ms;
536
	} props;
537
538
539
};

static inline bool
540
intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
541
{
542
543
544
545
546
547
548
	return engine->flags & I915_ENGINE_USING_CMD_PARSER;
}

static inline bool
intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
}

static inline bool
intel_engine_supports_stats(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
}

static inline bool
intel_engine_has_preemption(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
}

static inline bool
intel_engine_has_semaphores(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
}

569
570
571
572
573
574
static inline bool
intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}

575
576
577
578
579
580
static inline bool
intel_engine_is_virtual(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_IS_VIRTUAL;
}

581
582
583
584
585
586
static inline bool
intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
{
	return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
}

587
588
589
590
591
592
593
594
595
596
597
598
599
600
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
	((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))

#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
	(IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
	 intel_sseu_has_subslice(sseu__, 0, subslice__))

#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
	for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
	     (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
	     (slice_) += ((subslice_) == 0)) \
		for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
			    (instdone_has_subslice(dev_priv_, sseu_, slice_, \
						    subslice_)))
601
#endif /* __INTEL_ENGINE_TYPES_H__ */