drm/i915: Mark up racy read of active rq->engine
As a virtual engine may change the rq->engine to point to the active request in flight, we need to warn the compiler that an active request's engine is volatile. [ 95.017686] write (marked) to 0xffff8881e8386b10 of 8 bytes by interrupt on cpu 2: [ 95.018123] execlists_dequeue+0x762/0x2150 [i915] [ 95.018539] __execlists_submission_tasklet+0x48/0x60 [i915] [ 95.018955] execlists_submission_tasklet+0xd3/0x170 [i915] [ 95.018986] tasklet_action_common.isra.0+0x42/0xa0 [ 95.019016] __do_softirq+0xd7/0x2cd [ 95.019043] irq_exit+0xbe/0xe0 [ 95.019068] irq_work_interrupt+0xf/0x20 [ 95.019491] i915_request_retire+0x2c5/0x670 [i915] [ 95.019937] retire_requests+0xa1/0xf0 [i915] [ 95.020348] engine_retire+0xa1/0xe0 [i915] [ 95.020376] process_one_work+0x3b1/0x690 [ 95.020403] worker_thread+0x80/0x670 [ 95.020429] kthread+0x19a/0x1e0 [ 95.020454] ret_from_fork+0x1f/0x30 [ 95.020476] [ 95.020498] read to 0xffff8881e8386b10 of 8 bytes by task 8909 on cpu 3: [ 95.020918] __i915_request_commit+0x177/0x220 [i915] [ 95.021329] i915_gem_do_execbuffer+0x38c4/0x4e50 [i915] [ 95.021750] i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915] [ 95.021784] drm_ioctl_kernel+0xe4/0x120 [ 95.021809] drm_ioctl+0x297/0x4c7 [ 95.021832] ksys_ioctl+0x89/0xb0 [ 95.021865] __x64_sys_ioctl+0x42/0x60 [ 95.021901] do_syscall_64+0x6e/0x2c0 [ 95.021927] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200310142403.5953-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
0690e504b6
commit
326611ddff
|
@ -773,7 +773,7 @@ void intel_rps_park(struct intel_rps *rps)
|
|||
|
||||
void intel_rps_boost(struct i915_request *rq)
|
||||
{
|
||||
struct intel_rps *rps = &rq->engine->gt->rps;
|
||||
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
|
||||
unsigned long flags;
|
||||
|
||||
if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
|
||||
|
|
|
@ -977,6 +977,8 @@ emit_semaphore_wait(struct i915_request *to,
|
|||
struct i915_request *from,
|
||||
gfp_t gfp)
|
||||
{
|
||||
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
|
||||
|
||||
if (!intel_context_use_semaphores(to->context))
|
||||
goto await_fence;
|
||||
|
||||
|
@ -984,7 +986,7 @@ emit_semaphore_wait(struct i915_request *to,
|
|||
goto await_fence;
|
||||
|
||||
/* Just emit the first semaphore we see as request space is limited. */
|
||||
if (already_busywaiting(to) & from->engine->mask)
|
||||
if (already_busywaiting(to) & mask)
|
||||
goto await_fence;
|
||||
|
||||
if (i915_request_await_start(to, from) < 0)
|
||||
|
@ -997,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
|
|||
if (__emit_semaphore_wait(to, from, from->fence.seqno))
|
||||
goto await_fence;
|
||||
|
||||
to->sched.semaphores |= from->engine->mask;
|
||||
to->sched.semaphores |= mask;
|
||||
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
return 0;
|
||||
|
||||
|
@ -1338,7 +1340,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
|
|||
i915_seqno_passed(prev->fence.seqno,
|
||||
rq->fence.seqno));
|
||||
|
||||
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
|
||||
if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
|
||||
i915_sw_fence_await_sw_fence(&rq->submit,
|
||||
&prev->submit,
|
||||
&rq->submitq);
|
||||
|
|
Loading…
Reference in New Issue