staging: tidspbridge - fix mmufault support
With changes for iommu migration mmufault report and dsp track dump is broken, this patch fixes that. Signed-off-by: Fernando Guzman Lugo <x0095840@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
e7396e77d9
commit
f265846db1
|
@ -32,4 +32,6 @@ struct deh_mgr {
|
||||||
struct tasklet_struct dpc_tasklet;
|
struct tasklet_struct dpc_tasklet;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int mmu_fault_isr(struct iommu *mmu);
|
||||||
|
|
||||||
#endif /* _DEH_ */
|
#endif /* _DEH_ */
|
||||||
|
|
|
@ -58,6 +58,7 @@
|
||||||
#include "_tiomap.h"
|
#include "_tiomap.h"
|
||||||
#include "_tiomap_pwr.h"
|
#include "_tiomap_pwr.h"
|
||||||
#include "tiomap_io.h"
|
#include "tiomap_io.h"
|
||||||
|
#include "_deh.h"
|
||||||
|
|
||||||
/* Offset in shared mem to write to in order to synchronize start with DSP */
|
/* Offset in shared mem to write to in order to synchronize start with DSP */
|
||||||
#define SHMSYNCOFFSET 4 /* GPP byte offset */
|
#define SHMSYNCOFFSET 4 /* GPP byte offset */
|
||||||
|
@ -367,6 +368,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
|
||||||
}
|
}
|
||||||
if (!status) {
|
if (!status) {
|
||||||
dev_context->dsp_mmu = mmu;
|
dev_context->dsp_mmu = mmu;
|
||||||
|
mmu->isr = mmu_fault_isr;
|
||||||
sm_sg = &dev_context->sh_s;
|
sm_sg = &dev_context->sh_s;
|
||||||
sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
|
sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
|
||||||
sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
|
sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
#include <dspbridge/drv.h>
|
#include <dspbridge/drv.h>
|
||||||
#include <dspbridge/wdt.h>
|
#include <dspbridge/wdt.h>
|
||||||
|
|
||||||
static u32 fault_addr;
|
#define MMU_CNTL_TWL_EN (1 << 2)
|
||||||
|
|
||||||
static void mmu_fault_dpc(unsigned long data)
|
static void mmu_fault_dpc(unsigned long data)
|
||||||
{
|
{
|
||||||
|
@ -43,43 +43,18 @@ static void mmu_fault_dpc(unsigned long data)
|
||||||
bridge_deh_notify(deh, DSP_MMUFAULT, 0);
|
bridge_deh_notify(deh, DSP_MMUFAULT, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t mmu_fault_isr(int irq, void *data)
|
int mmu_fault_isr(struct iommu *mmu)
|
||||||
{
|
{
|
||||||
struct deh_mgr *deh = data;
|
struct deh_mgr *dm;
|
||||||
struct cfg_hostres *resources;
|
|
||||||
u32 event;
|
|
||||||
|
|
||||||
if (!deh)
|
dev_get_deh_mgr(dev_get_first(), &dm);
|
||||||
return IRQ_HANDLED;
|
|
||||||
|
|
||||||
resources = deh->hbridge_context->resources;
|
if (!dm)
|
||||||
if (!resources) {
|
return -EPERM;
|
||||||
dev_dbg(bridge, "%s: Failed to get Host Resources\n",
|
|
||||||
__func__);
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
hw_mmu_event_status(resources->dw_dmmu_base, &event);
|
iommu_write_reg(mmu, 0, MMU_IRQENABLE);
|
||||||
if (event == HW_MMU_TRANSLATION_FAULT) {
|
tasklet_schedule(&dm->dpc_tasklet);
|
||||||
hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
|
return 0;
|
||||||
dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
|
|
||||||
event, fault_addr);
|
|
||||||
/*
|
|
||||||
* Schedule a DPC directly. In the future, it may be
|
|
||||||
* necessary to check if DSP MMU fault is intended for
|
|
||||||
* Bridge.
|
|
||||||
*/
|
|
||||||
tasklet_schedule(&deh->dpc_tasklet);
|
|
||||||
|
|
||||||
/* Disable the MMU events, else once we clear it will
|
|
||||||
* start to raise INTs again */
|
|
||||||
hw_mmu_event_disable(resources->dw_dmmu_base,
|
|
||||||
HW_MMU_TRANSLATION_FAULT);
|
|
||||||
} else {
|
|
||||||
hw_mmu_event_disable(resources->dw_dmmu_base,
|
|
||||||
HW_MMU_ALL_INTERRUPTS);
|
|
||||||
}
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int bridge_deh_create(struct deh_mgr **ret_deh,
|
int bridge_deh_create(struct deh_mgr **ret_deh,
|
||||||
|
@ -161,42 +136,45 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
|
||||||
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
|
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
|
||||||
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
|
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
|
||||||
{
|
{
|
||||||
struct cfg_hostres *resources;
|
void *dummy_addr;
|
||||||
struct hw_mmu_map_attrs_t map_attrs = {
|
u32 fa, tmp;
|
||||||
.endianism = HW_LITTLE_ENDIAN,
|
struct iotlb_entry e;
|
||||||
.element_size = HW_ELEM_SIZE16BIT,
|
struct iommu *mmu = dev_context->dsp_mmu;
|
||||||
.mixed_size = HW_MMU_CPUES,
|
dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
|
||||||
};
|
|
||||||
void *dummy_va_addr;
|
|
||||||
|
|
||||||
resources = dev_context->resources;
|
|
||||||
dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before acking the MMU fault, let's make sure MMU can only
|
* Before acking the MMU fault, let's make sure MMU can only
|
||||||
* access entry #0. Then add a new entry so that the DSP OS
|
* access entry #0. Then add a new entry so that the DSP OS
|
||||||
* can continue in order to dump the stack.
|
* can continue in order to dump the stack.
|
||||||
*/
|
*/
|
||||||
hw_mmu_twl_disable(resources->dw_dmmu_base);
|
tmp = iommu_read_reg(mmu, MMU_CNTL);
|
||||||
hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
|
tmp &= ~MMU_CNTL_TWL_EN;
|
||||||
|
iommu_write_reg(mmu, tmp, MMU_CNTL);
|
||||||
|
fa = iommu_read_reg(mmu, MMU_FAULT_AD);
|
||||||
|
e.da = fa & PAGE_MASK;
|
||||||
|
e.pa = virt_to_phys(dummy_addr);
|
||||||
|
e.valid = 1;
|
||||||
|
e.prsvd = 1;
|
||||||
|
e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
|
||||||
|
e.endian = MMU_RAM_ENDIAN_LITTLE;
|
||||||
|
e.elsz = MMU_RAM_ELSZ_32;
|
||||||
|
e.mixed = 0;
|
||||||
|
|
||||||
hw_mmu_tlb_add(resources->dw_dmmu_base,
|
load_iotlb_entry(dev_context->dsp_mmu, &e);
|
||||||
virt_to_phys(dummy_va_addr), fault_addr,
|
|
||||||
HW_PAGE_SIZE4KB, 1,
|
|
||||||
&map_attrs, HW_SET, HW_SET);
|
|
||||||
|
|
||||||
dsp_clk_enable(DSP_CLK_GPT8);
|
dsp_clk_enable(DSP_CLK_GPT8);
|
||||||
|
|
||||||
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
|
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
|
||||||
|
|
||||||
/* Clear MMU interrupt */
|
/* Clear MMU interrupt */
|
||||||
hw_mmu_event_ack(resources->dw_dmmu_base,
|
tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
|
||||||
HW_MMU_TRANSLATION_FAULT);
|
iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
|
||||||
|
|
||||||
dump_dsp_stack(dev_context);
|
dump_dsp_stack(dev_context);
|
||||||
dsp_clk_disable(DSP_CLK_GPT8);
|
dsp_clk_disable(DSP_CLK_GPT8);
|
||||||
|
|
||||||
hw_mmu_disable(resources->dw_dmmu_base);
|
iopgtable_clear_entry(mmu, fa);
|
||||||
free_page((unsigned long)dummy_va_addr);
|
free_page((unsigned long)dummy_addr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -215,6 +193,7 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
|
||||||
{
|
{
|
||||||
struct bridge_dev_context *dev_context;
|
struct bridge_dev_context *dev_context;
|
||||||
const char *str = event_to_string(event);
|
const char *str = event_to_string(event);
|
||||||
|
u32 fa;
|
||||||
|
|
||||||
if (!deh)
|
if (!deh)
|
||||||
return;
|
return;
|
||||||
|
@ -232,8 +211,8 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
case DSP_MMUFAULT:
|
case DSP_MMUFAULT:
|
||||||
dev_err(bridge, "%s: %s, addr=0x%x", __func__,
|
fa = iommu_read_reg(dev_context->dsp_mmu, MMU_FAULT_AD);
|
||||||
str, fault_addr);
|
dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fa);
|
||||||
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
|
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
|
||||||
print_dsp_trace_buffer(dev_context);
|
print_dsp_trace_buffer(dev_context);
|
||||||
dump_dl_modules(dev_context);
|
dump_dl_modules(dev_context);
|
||||||
|
|
Loading…
Reference in New Issue