drm/i915 fixes for v5.6-rc7:
- Track active elements during dequeue - Fix failure to handle all MCR ranges - Revert unnecessary workaround -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEFWWmW3ewYy4RJOWc05gHnSar7m8FAl5zJl4ACgkQ05gHnSar 7m+wJhAAreffDNdNJQ7aNspMJiysrSaknCGQGdFtgih0qb3f9t/A7VccpHBNN2GB d5CKrBE72PWQT6r4PYux56Fg1mj0wq8ajkvyqvVImkG2NWGWN6hl2Vo7vwGBa35Z INSCSKSMOfQfWzJGhl5SAp8ieF12gFTB251THFQ/4lt0DlMlYDfq48UxXyr7TdXs 1Cmrgch4zDzntqMXvCrSkX3WjoPrWXqpHJFIO3AuQ6+EA14Zb4TVB7VetbpKqm26 4TUtKPjDYaNQ2JHE9KW82vq4P1Hw55fuP0P4umL4l2M5NXkbGkWGCQiOaoaKfzM9 u6JRhSGtg5ho4Hj05L2ai7082tH2ME1/o4nC2Wi2DHR8FW6QnMl3XC6oSmK3lpDx 3P72D1jxW15lFpmarT/BTP9C4Puk8iyDtdkf6XRtFy2SkfEtfG8qBlBuXEcK8sz7 LEJbJDbBr1CAWSqVkQfno1hQWdRY4847OeksVgLgAufI4bhPNxQNrZvENv1IdOwx 78tf+y0L1dlArvYuCzNjTNotedsJcDBCgqIeMsBUYvlw7mJuWnWScU5eK7YRFviK AxZ0ZLxCyj9mBZp1S9Y20awjUTjjR6O1TMVmpFWYoK7UZm97CfoIAkV5Fl1Bm1CT epssLTY7Bbp7NdGY5bcEMRmslJj94Wm13VCtL+h7jY6rjE8h7IU= =ELTv -----END PGP SIGNATURE----- Merge tag 'drm-intel-fixes-2020-03-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes drm/i915 fixes for v5.6-rc7: - Track active elements during dequeue - Fix failure to handle all MCR ranges - Revert unnecessary workaround Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/877dzgepvu.fsf@intel.com
This commit is contained in:
commit
5366b96b19
|
@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
|
|||
spin_unlock(&old->breadcrumbs.irq_lock);
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
last_active(const struct intel_engine_execlists *execlists)
|
||||
{
|
||||
struct i915_request * const *last = READ_ONCE(execlists->active);
|
||||
|
||||
while (*last && i915_request_completed(*last))
|
||||
last++;
|
||||
|
||||
return *last;
|
||||
}
|
||||
|
||||
#define for_each_waiter(p__, rq__) \
|
||||
list_for_each_entry_lockless(p__, \
|
||||
&(rq__)->sched.waiters_list, \
|
||||
|
@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
|
|||
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
|
||||
}
|
||||
|
||||
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
|
||||
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = last_active(&engine->execlists);
|
||||
if (!rq)
|
||||
return 0;
|
||||
|
||||
|
@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
|
|||
return READ_ONCE(engine->props.preempt_timeout_ms);
|
||||
}
|
||||
|
||||
static void set_preempt_timeout(struct intel_engine_cs *engine)
|
||||
static void set_preempt_timeout(struct intel_engine_cs *engine,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
if (!intel_engine_has_preempt_reset(engine))
|
||||
return;
|
||||
|
||||
set_timer_ms(&engine->execlists.preempt,
|
||||
active_preempt_timeout(engine));
|
||||
active_preempt_timeout(engine, rq));
|
||||
}
|
||||
|
||||
static inline void clear_ports(struct i915_request **ports, int count)
|
||||
|
@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request **port = execlists->pending;
|
||||
struct i915_request ** const last_port = port + execlists->port_mask;
|
||||
struct i915_request * const *active;
|
||||
struct i915_request *last;
|
||||
struct rb_node *rb;
|
||||
bool submit = false;
|
||||
|
@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
* i.e. we will retrigger preemption following the ack in case
|
||||
* of trouble.
|
||||
*/
|
||||
last = last_active(execlists);
|
||||
active = READ_ONCE(execlists->active);
|
||||
while ((last = *active) && i915_request_completed(last))
|
||||
active++;
|
||||
|
||||
if (last) {
|
||||
if (need_preempt(engine, last, rb)) {
|
||||
ENGINE_TRACE(engine,
|
||||
|
@ -2110,7 +2102,7 @@ done:
|
|||
* Skip if we ended up with exactly the same set of requests,
|
||||
* e.g. trying to timeslice a pair of ordered contexts
|
||||
*/
|
||||
if (!memcmp(execlists->active, execlists->pending,
|
||||
if (!memcmp(active, execlists->pending,
|
||||
(port - execlists->pending + 1) * sizeof(*port))) {
|
||||
do
|
||||
execlists_schedule_out(fetch_and_zero(port));
|
||||
|
@ -2121,7 +2113,7 @@ done:
|
|||
clear_ports(port + 1, last_port - port);
|
||||
|
||||
execlists_submit_ports(engine);
|
||||
set_preempt_timeout(engine);
|
||||
set_preempt_timeout(engine, *active);
|
||||
} else {
|
||||
skip_submit:
|
||||
ring_set_paused(engine, 0);
|
||||
|
@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
|
|||
|
||||
*cs++ = preparser_disable(false);
|
||||
intel_ring_advance(request, cs);
|
||||
|
||||
/*
|
||||
* Wa_1604544889:tgl
|
||||
*/
|
||||
if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
flags = 0;
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
|
||||
|
||||
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
|
||||
cs = intel_ring_begin(request, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
cs = gen8_emit_pipe_control(cs, flags,
|
||||
LRC_PPHWSP_SCRATCH_ADDR);
|
||||
intel_ring_advance(request, cs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1529,15 +1529,34 @@ err_obj:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
u32 start;
|
||||
u32 end;
|
||||
} mcr_ranges_gen8[] = {
|
||||
{ .start = 0x5500, .end = 0x55ff },
|
||||
{ .start = 0x7000, .end = 0x7fff },
|
||||
{ .start = 0x9400, .end = 0x97ff },
|
||||
{ .start = 0xb000, .end = 0xb3ff },
|
||||
{ .start = 0xe000, .end = 0xe7ff },
|
||||
{},
|
||||
};
|
||||
|
||||
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Registers in this range are affected by the MCR selector
|
||||
* Registers in these ranges are affected by the MCR selector
|
||||
* which only controls CPU initiated MMIO. Routing does not
|
||||
* work for CS access so we cannot verify them on this path.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
|
||||
return true;
|
||||
for (i = 0; mcr_ranges_gen8[i].start; i++)
|
||||
if (offset >= mcr_ranges_gen8[i].start &&
|
||||
offset <= mcr_ranges_gen8[i].end)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue