2021-01-23 03:29:04 +08:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-08-12 05:06:33 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched/clock.h>
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_irq.h"
|
2020-07-31 23:48:34 +08:00
|
|
|
#include "intel_breadcrumbs.h"
|
2019-08-12 05:06:33 +08:00
|
|
|
#include "intel_gt.h"
|
|
|
|
#include "intel_gt_irq.h"
|
2020-12-10 07:36:16 +08:00
|
|
|
#include "intel_lrc_reg.h"
|
2019-08-12 05:06:33 +08:00
|
|
|
#include "intel_uncore.h"
|
2019-10-25 05:16:41 +08:00
|
|
|
#include "intel_rps.h"
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
|
|
|
|
{
|
|
|
|
if (iir & GUC_INTR_GUC2HOST)
|
|
|
|
intel_guc_to_host_event_handler(guc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32
|
|
|
|
gen11_gt_engine_identity(struct intel_gt *gt,
|
|
|
|
const unsigned int bank, const unsigned int bit)
|
|
|
|
{
|
|
|
|
void __iomem * const regs = gt->uncore->regs;
|
|
|
|
u32 timeout_ts;
|
|
|
|
u32 ident;
|
|
|
|
|
|
|
|
lockdep_assert_held(>->irq_lock);
|
|
|
|
|
|
|
|
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NB: Specs do not specify how long to spin wait,
|
|
|
|
* so we do ~100us as an educated guess.
|
|
|
|
*/
|
|
|
|
timeout_ts = (local_clock() >> 10) + 100;
|
|
|
|
do {
|
|
|
|
ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
|
|
|
|
} while (!(ident & GEN11_INTR_DATA_VALID) &&
|
|
|
|
!time_after32(local_clock() >> 10, timeout_ts));
|
|
|
|
|
|
|
|
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
|
|
|
|
DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
|
|
|
|
bank, bit, ident);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
|
|
|
|
GEN11_INTR_DATA_VALID);
|
|
|
|
|
|
|
|
return ident;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
|
|
|
|
const u16 iir)
|
|
|
|
{
|
|
|
|
if (instance == OTHER_GUC_INSTANCE)
|
|
|
|
return guc_irq_handler(>->uc.guc, iir);
|
|
|
|
|
|
|
|
if (instance == OTHER_GTPM_INSTANCE)
|
2019-10-25 05:16:41 +08:00
|
|
|
return gen11_rps_irq_handler(>->rps, iir);
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
|
|
|
|
instance, iir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
|
|
|
|
const u8 instance, const u16 iir)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
|
|
|
|
if (instance <= MAX_ENGINE_INSTANCE)
|
|
|
|
engine = gt->engine_class[class][instance];
|
|
|
|
else
|
|
|
|
engine = NULL;
|
|
|
|
|
|
|
|
if (likely(engine))
|
2021-05-22 02:32:15 +08:00
|
|
|
return intel_engine_cs_irq(engine, iir);
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
|
|
|
|
class, instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
|
|
|
|
{
|
|
|
|
const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
|
|
|
|
const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
|
|
|
|
const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
|
|
|
|
|
|
|
|
if (unlikely(!intr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (class <= COPY_ENGINE_CLASS)
|
|
|
|
return gen11_engine_irq_handler(gt, class, instance, intr);
|
|
|
|
|
|
|
|
if (class == OTHER_CLASS)
|
|
|
|
return gen11_other_irq_handler(gt, instance, intr);
|
|
|
|
|
|
|
|
WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
|
|
|
|
class, instance, intr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
|
|
|
|
{
|
|
|
|
void __iomem * const regs = gt->uncore->regs;
|
|
|
|
unsigned long intr_dw;
|
|
|
|
unsigned int bit;
|
|
|
|
|
|
|
|
lockdep_assert_held(>->irq_lock);
|
|
|
|
|
|
|
|
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
|
|
|
|
|
|
|
|
for_each_set_bit(bit, &intr_dw, 32) {
|
|
|
|
const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
|
|
|
|
|
|
|
|
gen11_gt_identity_handler(gt, ident);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear must be after shared has been served for engine */
|
|
|
|
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
|
|
|
|
{
|
|
|
|
unsigned int bank;
|
|
|
|
|
|
|
|
spin_lock(>->irq_lock);
|
|
|
|
|
|
|
|
for (bank = 0; bank < 2; bank++) {
|
|
|
|
if (master_ctl & GEN11_GT_DW_IRQ(bank))
|
|
|
|
gen11_gt_bank_handler(gt, bank);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(>->irq_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
|
|
|
|
const unsigned int bank, const unsigned int bit)
|
|
|
|
{
|
|
|
|
void __iomem * const regs = gt->uncore->regs;
|
|
|
|
u32 dw;
|
|
|
|
|
|
|
|
lockdep_assert_held(>->irq_lock);
|
|
|
|
|
|
|
|
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
|
|
|
|
if (dw & BIT(bit)) {
|
|
|
|
/*
|
|
|
|
* According to the BSpec, DW_IIR bits cannot be cleared without
|
|
|
|
* first servicing the Selector & Shared IIR registers.
|
|
|
|
*/
|
|
|
|
gen11_gt_engine_identity(gt, bank, bit);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We locked GT INT DW by reading it. If we want to (try
|
|
|
|
* to) recover from this successfully, we need to clear
|
|
|
|
* our bit, otherwise we are locking the register for
|
|
|
|
* everybody.
|
|
|
|
*/
|
|
|
|
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen11_gt_irq_reset(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
|
|
|
|
/* Disable RCS, BCS, VCS and VECS class engines. */
|
|
|
|
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
|
|
|
|
|
|
|
|
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
|
|
|
|
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
|
|
|
|
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
|
|
|
|
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
|
|
|
|
|
|
|
|
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
|
|
|
|
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
|
|
|
|
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
|
|
|
|
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen11_gt_irq_postinstall(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
2021-06-03 13:16:13 +08:00
|
|
|
u32 irqs = GT_RENDER_USER_INTERRUPT;
|
|
|
|
u32 dmask;
|
|
|
|
u32 smask;
|
|
|
|
|
|
|
|
if (!intel_uc_wants_guc_submission(>->uc))
|
|
|
|
irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
|
|
|
|
GT_CONTEXT_SWITCH_INTERRUPT |
|
|
|
|
GT_WAIT_SEMAPHORE_INTERRUPT;
|
|
|
|
|
|
|
|
dmask = irqs << 16 | irqs;
|
|
|
|
smask = irqs << 16;
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON(irqs & 0xffff0000);
|
|
|
|
|
|
|
|
/* Enable RCS, BCS, VCS and VECS class interrupts. */
|
|
|
|
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
|
|
|
|
|
|
|
|
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
|
|
|
|
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
|
|
|
|
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
|
|
|
|
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
|
|
|
|
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RPS interrupts will get enabled/disabled on demand when RPS itself
|
|
|
|
* is enabled/disabled.
|
|
|
|
*/
|
|
|
|
gt->pm_ier = 0x0;
|
|
|
|
gt->pm_imr = ~gt->pm_ier;
|
|
|
|
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
|
|
|
|
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
|
|
|
|
|
|
|
|
/* Same thing for GuC interrupts */
|
|
|
|
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
|
|
|
|
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
|
|
|
|
{
|
|
|
|
if (gt_iir & GT_RENDER_USER_INTERRUPT)
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
|
|
|
|
gt_iir);
|
|
|
|
|
2019-08-12 05:06:33 +08:00
|
|
|
if (gt_iir & ILK_BSD_USER_INTERRUPT)
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
|
|
|
|
gt_iir);
|
2019-08-12 05:06:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
|
|
|
|
{
|
|
|
|
if (!HAS_L3_DPF(gt->i915))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(>->irq_lock);
|
|
|
|
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
|
|
|
|
spin_unlock(>->irq_lock);
|
|
|
|
|
|
|
|
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
|
|
|
|
gt->i915->l3_parity.which_slice |= 1 << 1;
|
|
|
|
|
|
|
|
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
|
|
|
|
gt->i915->l3_parity.which_slice |= 1 << 0;
|
|
|
|
|
|
|
|
schedule_work(>->i915->l3_parity.error_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
|
|
|
|
{
|
|
|
|
if (gt_iir & GT_RENDER_USER_INTERRUPT)
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
|
|
|
|
gt_iir);
|
|
|
|
|
2019-08-12 05:06:33 +08:00
|
|
|
if (gt_iir & GT_BSD_USER_INTERRUPT)
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
|
|
|
|
gt_iir >> 12);
|
|
|
|
|
2019-08-12 05:06:33 +08:00
|
|
|
if (gt_iir & GT_BLT_USER_INTERRUPT)
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
|
|
|
|
gt_iir >> 22);
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
|
|
|
|
GT_BSD_CS_ERROR_INTERRUPT |
|
2020-01-29 04:43:15 +08:00
|
|
|
GT_CS_MASTER_ERROR_INTERRUPT))
|
2019-08-12 05:06:33 +08:00
|
|
|
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
|
|
|
|
|
|
|
|
if (gt_iir & GT_PARITY_ERROR(gt->i915))
|
|
|
|
gen7_parity_error_irq_handler(gt, gt_iir);
|
|
|
|
}
|
|
|
|
|
2020-01-28 07:15:36 +08:00
|
|
|
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
|
2019-08-12 05:06:33 +08:00
|
|
|
{
|
|
|
|
void __iomem * const regs = gt->uncore->regs;
|
2020-01-28 07:15:36 +08:00
|
|
|
u32 iir;
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
|
2020-01-28 07:15:36 +08:00
|
|
|
iir = raw_reg_read(regs, GEN8_GT_IIR(0));
|
|
|
|
if (likely(iir)) {
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
|
|
|
|
iir >> GEN8_RCS_IRQ_SHIFT);
|
|
|
|
intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
|
|
|
|
iir >> GEN8_BCS_IRQ_SHIFT);
|
2020-01-28 07:15:36 +08:00
|
|
|
raw_reg_write(regs, GEN8_GT_IIR(0), iir);
|
|
|
|
}
|
2019-08-12 05:06:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
|
2020-01-28 07:15:36 +08:00
|
|
|
iir = raw_reg_read(regs, GEN8_GT_IIR(1));
|
|
|
|
if (likely(iir)) {
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
|
|
|
|
iir >> GEN8_VCS0_IRQ_SHIFT);
|
|
|
|
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
|
|
|
|
iir >> GEN8_VCS1_IRQ_SHIFT);
|
2020-01-28 07:15:36 +08:00
|
|
|
raw_reg_write(regs, GEN8_GT_IIR(1), iir);
|
|
|
|
}
|
2019-08-12 05:06:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (master_ctl & GEN8_GT_VECS_IRQ) {
|
2020-01-28 07:15:36 +08:00
|
|
|
iir = raw_reg_read(regs, GEN8_GT_IIR(3));
|
|
|
|
if (likely(iir)) {
|
2021-05-22 02:32:15 +08:00
|
|
|
intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
|
|
|
|
iir >> GEN8_VECS_IRQ_SHIFT);
|
2020-01-28 07:15:36 +08:00
|
|
|
raw_reg_write(regs, GEN8_GT_IIR(3), iir);
|
|
|
|
}
|
2019-08-12 05:06:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
|
2020-01-28 07:15:36 +08:00
|
|
|
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
|
|
|
|
if (likely(iir)) {
|
|
|
|
gen6_rps_irq_handler(>->rps, iir);
|
|
|
|
guc_irq_handler(>->uc.guc, iir >> 16);
|
|
|
|
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
|
|
|
|
}
|
2019-08-12 05:06:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen8_gt_irq_reset(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
|
|
|
|
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
|
|
|
|
GEN8_IRQ_RESET_NDX(uncore, GT, 1);
|
|
|
|
GEN8_IRQ_RESET_NDX(uncore, GT, 2);
|
|
|
|
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen8_gt_irq_postinstall(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
/* These are interrupts we'll toggle with the ring mask register */
|
2020-01-29 04:43:15 +08:00
|
|
|
const u32 irqs =
|
|
|
|
GT_CS_MASTER_ERROR_INTERRUPT |
|
|
|
|
GT_RENDER_USER_INTERRUPT |
|
drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore
If we find ourselves waiting on a MI_SEMAPHORE_WAIT, either within the
user batch or in our own preamble, the engine raises a
GT_WAIT_ON_SEMAPHORE interrupt. We can unmask that interrupt and so
respond to a semaphore wait by yielding the timeslice, if we have
another context to yield to!
The only real complication is that the interrupt is only generated for
the start of the semaphore wait, and is asynchronous to our
process_csb() -- that is, we may not have registered the timeslice before
we see the interrupt. To ensure we don't miss a potential semaphore
blocking forward progress (e.g. selftests/live_timeslice_preempt) we mark
the interrupt and apply it to the next timeslice regardless of whether it
was active at the time.
v2: We use semaphores in preempt-to-busy, within the timeslicing
implementation itself! Ergo, when we do insert a preemption due to an
expired timeslice, the new context may start with the missed semaphore
flagged by the retired context and be yielded, ad infinitum. To avoid
this, read the context id at the time of the semaphore interrupt and
only yield if that context is still active.
Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200407130811.17321-1-chris@chris-wilson.co.uk
2020-04-07 21:08:11 +08:00
|
|
|
GT_CONTEXT_SWITCH_INTERRUPT |
|
|
|
|
GT_WAIT_SEMAPHORE_INTERRUPT;
|
2020-01-28 07:15:37 +08:00
|
|
|
const u32 gt_interrupts[] = {
|
|
|
|
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
|
|
|
|
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
|
2019-08-12 05:06:33 +08:00
|
|
|
0,
|
2020-01-28 07:15:37 +08:00
|
|
|
irqs << GEN8_VECS_IRQ_SHIFT,
|
2019-08-12 05:06:33 +08:00
|
|
|
};
|
2020-01-28 07:15:37 +08:00
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
2019-08-12 05:06:33 +08:00
|
|
|
|
|
|
|
gt->pm_ier = 0x0;
|
|
|
|
gt->pm_imr = ~gt->pm_ier;
|
|
|
|
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
|
|
|
|
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
|
|
|
|
/*
|
|
|
|
* RPS interrupts will get enabled/disabled on demand when RPS itself
|
|
|
|
* is enabled/disabled. Same wil be the case for GuC interrupts.
|
|
|
|
*/
|
|
|
|
GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
|
|
|
|
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen5_gt_update_irq(struct intel_gt *gt,
|
|
|
|
u32 interrupt_mask,
|
|
|
|
u32 enabled_irq_mask)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(>->irq_lock);
|
|
|
|
|
|
|
|
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
|
|
|
|
|
|
|
|
gt->gt_imr &= ~interrupt_mask;
|
|
|
|
gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
|
|
|
|
intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
|
|
|
|
{
|
|
|
|
gen5_gt_update_irq(gt, mask, mask);
|
|
|
|
intel_uncore_posting_read_fw(gt->uncore, GTIMR);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
|
|
|
|
{
|
|
|
|
gen5_gt_update_irq(gt, mask, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen5_gt_irq_reset(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
|
|
|
|
GEN3_IRQ_RESET(uncore, GT);
|
2021-06-05 23:53:52 +08:00
|
|
|
if (GRAPHICS_VER(gt->i915) >= 6)
|
2019-08-12 05:06:33 +08:00
|
|
|
GEN3_IRQ_RESET(uncore, GEN6_PM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen5_gt_irq_postinstall(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
u32 pm_irqs = 0;
|
|
|
|
u32 gt_irqs = 0;
|
|
|
|
|
|
|
|
gt->gt_imr = ~0;
|
|
|
|
if (HAS_L3_DPF(gt->i915)) {
|
|
|
|
/* L3 parity interrupt is always unmasked. */
|
|
|
|
gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
|
|
|
|
gt_irqs |= GT_PARITY_ERROR(gt->i915);
|
|
|
|
}
|
|
|
|
|
|
|
|
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
2021-06-05 23:53:52 +08:00
|
|
|
if (GRAPHICS_VER(gt->i915) == 5)
|
2019-08-12 05:06:33 +08:00
|
|
|
gt_irqs |= ILK_BSD_USER_INTERRUPT;
|
|
|
|
else
|
|
|
|
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
|
|
|
|
|
|
|
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
|
|
|
|
|
2021-06-05 23:53:52 +08:00
|
|
|
if (GRAPHICS_VER(gt->i915) >= 6) {
|
2019-08-12 05:06:33 +08:00
|
|
|
/*
|
|
|
|
* RPS interrupts will get enabled/disabled on demand when RPS
|
|
|
|
* itself is enabled/disabled.
|
|
|
|
*/
|
2020-07-08 08:39:45 +08:00
|
|
|
if (HAS_ENGINE(gt, VECS0)) {
|
2019-08-12 05:06:33 +08:00
|
|
|
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
|
|
|
|
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
gt->pm_imr = 0xffffffff;
|
|
|
|
GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
|
|
|
|
}
|
|
|
|
}
|