Commit 422d7df4 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Replace engine->timeline with a plain list



To continue the onslaught of removing the assumption of a global
execution ordering, another casualty is the engine->timeline. Without an
actual timeline to track, it is overkill and we can replace it with a
much less grand plain list. We still need a list of requests inflight,
for the simple purpose of finding inflight requests (for retiring,
resetting, preemption etc).

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-3-chris@chris-wilson.co.uk
parent 9db0c5ca
......@@ -565,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
void intel_engine_init_active(struct intel_engine_cs *engine,
unsigned int subclass);
#define ENGINE_PHYSICAL 0
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
#endif /* _INTEL_RINGBUFFER_H_ */
......@@ -617,14 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
if (err)
return err;
err = i915_timeline_init(engine->i915,
&engine->timeline,
engine->status_page.vma);
if (err)
goto err_hwsp;
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
......@@ -637,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
return 0;
err_hwsp:
cleanup_status_page(engine);
return err;
}
/**
......@@ -797,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx,
return 0;
}
void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
/*
* Due to an interesting quirk in lockdep's internal debug tracking,
* after setting a subclass we must ensure the lock is used. Otherwise,
* nr_unused_locks is incremented once too often.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
local_irq_disable();
lock_map_acquire(&engine->active.lock.dep_map);
lock_map_release(&engine->active.lock.dep_map);
local_irq_enable();
#endif
}
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
......@@ -860,6 +870,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->active.requests));
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
......@@ -874,8 +886,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_context_unpin(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
i915_timeline_fini(&engine->timeline);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
intel_wa_list_free(&engine->whitelist);
......@@ -1482,16 +1492,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
......@@ -1572,7 +1572,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
......@@ -1598,7 +1598,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
unlock:
write_sequnlock(&engine->stats.lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
return err;
}
......@@ -1683,22 +1683,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
spin_lock_irqsave(&engine->active.lock, flags);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
if (!i915_request_started(request))
break;
continue;
/* More than one preemptible request may match! */
if (!match_ring(request))
break;
continue;
active = request;
break;
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
......
......@@ -288,7 +288,11 @@ struct intel_engine_cs {
struct intel_ring *buffer;
struct i915_timeline timeline;
struct {
spinlock_t lock;
struct list_head requests;
} active;
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
......
......@@ -298,8 +298,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
if (!list_is_last(&rq->link, &engine->timeline.requests) &&
rq_prio(list_next_entry(rq, link)) > last_prio)
if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
if (rb) {
......@@ -434,11 +434,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
struct list_head *uninitialized_var(pl);
int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock);
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline.requests,
link) {
&engine->active.requests,
sched.link) {
struct intel_engine_cs *owner;
if (i915_request_completed(rq))
......@@ -465,7 +465,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
}
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_add(&rq->sched.link, pl);
list_move(&rq->sched.link, pl);
active = rq;
} else {
rq->engine = owner;
......@@ -933,11 +933,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq;
spin_lock(&ve->base.timeline.lock);
spin_lock(&ve->base.active.lock);
rq = ve->request;
if (unlikely(!rq)) { /* lost the race to a sibling */
spin_unlock(&ve->base.timeline.lock);
spin_unlock(&ve->base.active.lock);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
rb = rb_first_cached(&execlists->virtual);
......@@ -950,13 +950,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) {
spin_unlock(&ve->base.timeline.lock);
spin_unlock(&ve->base.active.lock);
rb = rb_next(rb);
continue;
}
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.timeline.lock);
spin_unlock(&ve->base.active.lock);
return; /* leave this rq for another engine */
}
......@@ -1011,7 +1011,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = rq;
}
spin_unlock(&ve->base.timeline.lock);
spin_unlock(&ve->base.active.lock);
break;
}
......@@ -1068,8 +1068,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
......@@ -1170,7 +1168,7 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
lockdep_assert_held(&engine->timeline.lock);
lockdep_assert_held(&engine->active.lock);
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
......@@ -1330,7 +1328,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->timeline.lock);
lockdep_assert_held(&engine->active.lock);
process_csb(engine);
if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
......@@ -1351,15 +1349,16 @@ static void execlists_submission_tasklet(unsigned long data)
!!intel_wakeref_active(&engine->wakeref),
engine->execlists.active);
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
GEM_BUG_ON(!list_empty(&node->link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
......@@ -1390,7 +1389,7 @@ static void execlists_submit_request(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
......@@ -1399,7 +1398,7 @@ static void execlists_submit_request(struct i915_request *request)
submit_queue(engine, rq_prio(request));
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void __execlists_context_fini(struct intel_context *ce)
......@@ -2050,8 +2049,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static bool lrc_regs_ok(const struct i915_request *rq)
......@@ -2094,11 +2093,11 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
static struct i915_request *active_request(struct i915_request *rq)
{
const struct list_head * const list = &rq->engine->timeline.requests;
const struct list_head * const list = &rq->engine->active.requests;
const struct intel_context * const context = rq->hw_context;
struct i915_request *active = NULL;
list_for_each_entry_from_reverse(rq, list, link) {
list_for_each_entry_from_reverse(rq, list, sched.link) {
if (i915_request_completed(rq))
break;
......@@ -2215,11 +2214,11 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
GEM_TRACE("%s\n", engine->name);
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, stalled);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void nop_submission_tasklet(unsigned long data)
......@@ -2250,12 +2249,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
......@@ -2286,7 +2285,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.timeline.lock);
spin_lock(&ve->base.active.lock);
if (ve->request) {
ve->request->engine = engine;
__i915_request_submit(ve->request);
......@@ -2295,7 +2294,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
}
spin_unlock(&ve->base.timeline.lock);
spin_unlock(&ve->base.active.lock);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
......@@ -2307,7 +2306,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
......@@ -3010,12 +3009,18 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
return ret;
}
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
return &ve->base.execlists.default_priolist.requests[0];
}
static void virtual_context_destroy(struct kref *kref)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
unsigned int n;
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight);
......@@ -3026,13 +3031,13 @@ static void virtual_context_destroy(struct kref *kref)
if (RB_EMPTY_NODE(node))
continue;
spin_lock_irq(&sibling->timeline.lock);
spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irq(&sibling->timeline.lock);
spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
......@@ -3040,8 +3045,6 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
kfree(ve->bonds);
i915_timeline_fini(&ve->base.timeline);
kfree(ve);
}
......@@ -3161,16 +3164,16 @@ static void virtual_submission_tasklet(unsigned long data)
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
spin_lock(&sibling->timeline.lock);
spin_lock(&sibling->active.lock);
rb_erase_cached(&node->rb,
&sibling->execlists.virtual);
RB_CLEAR_NODE(&node->rb);
spin_unlock(&sibling->timeline.lock);
spin_unlock(&sibling->active.lock);
}
continue;
}
spin_lock(&sibling->timeline.lock);
spin_lock(&sibling->active.lock);
if (!RB_EMPTY_NODE(&node->rb)) {
/*
......@@ -3214,7 +3217,7 @@ static void virtual_submission_tasklet(unsigned long data)
tasklet_hi_schedule(&sibling->execlists.tasklet);
}
spin_unlock(&sibling->timeline.lock);
spin_unlock(&sibling->active.lock);
}
local_irq_enable();
}
......@@ -3231,9 +3234,13 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
GEM_BUG_ON(ve->request);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
ve->base.execlists.queue_priority_hint = rq_prio(rq);
WRITE_ONCE(ve->request, rq);
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_schedule(&ve->base.execlists.tasklet);
}
......@@ -3297,10 +3304,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL);
if (err)
goto err_put;
i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL);
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_execlists(&ve->base);
......@@ -3311,6 +3315,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN;
tasklet_init(&ve->base.execlists.tasklet,
virtual_submission_tasklet,
......@@ -3465,11 +3470,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int count;
struct rb_node *rb;
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
last = NULL;
count = 0;
list_for_each_entry(rq, &engine->timeline.requests, link) {
list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (count++ < max - 1)
show_request(m, rq, "\t\tE ");
else
......@@ -3532,7 +3537,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tV ");
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
void intel_lr_context_reset(struct intel_engine_cs *engine,
......
......@@ -49,12 +49,12 @@ static void engine_skip_context(struct i915_request *rq)
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
lockdep_assert_held(&engine->timeline.lock);
lockdep_assert_held(&engine->active.lock);
if (!i915_request_is_active(rq))
return;
list_for_each_entry_continue(rq, &engine->timeline.requests, link)
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
}
......@@ -130,7 +130,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
rq->fence.seqno,
yesno(guilty));
lockdep_assert_held(&rq->engine->timeline.lock);
lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq));
if (guilty) {
......@@ -785,10 +785,10 @@ static void nop_submit_request(struct i915_request *request)
engine->name, request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
......
......@@ -730,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{
struct i915_timeline *tl = &engine->timeline;
struct i915_request *pos, *rq;
unsigned long flags;
u32 head;
rq = NULL;
spin_lock_irqsave(&tl->lock, flags);
list_for_each_entry(pos, &tl->requests, link) {
spin_lock_irqsave(&engine->active.lock, flags);
list_for_each_entry(pos, &engine->active.requests, sched.link) {
if (!i915_request_completed(pos)) {
rq = pos;
break;
......@@ -791,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
engine->buffer->head = intel_ring_wrap(engine->buffer, head);
spin_unlock_irqrestore(&tl->lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
......@@ -877,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
......@@ -889,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
......@@ -1267,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
GEM_BUG_ON(timeline == &engine->timeline);
lockdep_assert_held(&engine->i915->drm.struct_mutex);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
......
......@@ -229,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, sched.link) {
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}