iommu/mediatek: Only adjust code about register base
No functional change. Use "base" instead of the data->base. This is avoid to touch too many lines in the next patches. Signed-off-by: Yong Wu <yong.wu@mediatek.com> Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Reviewed-by: Matthias Brugger <matthias.bgg@gmail.com> Link: https://lore.kernel.org/r/20220503071427.2285-25-yong.wu@mediatek.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ef68a193c7
commit
887cf6a74a
|
@ -227,12 +227,12 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
|
|||
|
||||
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
|
||||
{
|
||||
void __iomem *base = data->base;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&data->tlb_lock, flags);
|
||||
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
|
||||
data->base + data->plat_data->inv_sel_reg);
|
||||
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
|
||||
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
|
||||
writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
|
||||
wmb(); /* Make sure the tlb flush all done */
|
||||
spin_unlock_irqrestore(&data->tlb_lock, flags);
|
||||
}
|
||||
|
@ -243,6 +243,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
|
|||
struct list_head *head = data->hw_list;
|
||||
bool check_pm_status;
|
||||
unsigned long flags;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
u32 tmp;
|
||||
|
||||
|
@ -269,23 +270,23 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
|
|||
continue;
|
||||
}
|
||||
|
||||
base = data->base;
|
||||
|
||||
spin_lock_irqsave(&data->tlb_lock, flags);
|
||||
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
|
||||
data->base + data->plat_data->inv_sel_reg);
|
||||
base + data->plat_data->inv_sel_reg);
|
||||
|
||||
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
|
||||
data->base + REG_MMU_INVLD_START_A);
|
||||
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
|
||||
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
|
||||
data->base + REG_MMU_INVLD_END_A);
|
||||
writel_relaxed(F_MMU_INV_RANGE,
|
||||
data->base + REG_MMU_INVALIDATE);
|
||||
base + REG_MMU_INVLD_END_A);
|
||||
writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
|
||||
|
||||
/* tlb sync */
|
||||
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
|
||||
ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE,
|
||||
tmp, tmp != 0, 10, 1000);
|
||||
|
||||
/* Clear the CPE status */
|
||||
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
|
||||
writel_relaxed(0, base + REG_MMU_CPE_DONE);
|
||||
spin_unlock_irqrestore(&data->tlb_lock, flags);
|
||||
|
||||
if (ret) {
|
||||
|
@ -305,23 +306,25 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
|
|||
struct mtk_iommu_domain *dom = data->m4u_dom;
|
||||
unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0;
|
||||
u32 int_state, regval, va34_32, pa34_32;
|
||||
const struct mtk_iommu_plat_data *plat_data = data->plat_data;
|
||||
void __iomem *base = data->base;
|
||||
u64 fault_iova, fault_pa;
|
||||
bool layer, write;
|
||||
|
||||
/* Read error info from registers */
|
||||
int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
|
||||
int_state = readl_relaxed(base + REG_MMU_FAULT_ST1);
|
||||
if (int_state & F_REG_MMU0_FAULT_MASK) {
|
||||
regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
|
||||
fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
|
||||
fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
|
||||
regval = readl_relaxed(base + REG_MMU0_INT_ID);
|
||||
fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA);
|
||||
fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA);
|
||||
} else {
|
||||
regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
|
||||
fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
|
||||
fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
|
||||
regval = readl_relaxed(base + REG_MMU1_INT_ID);
|
||||
fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA);
|
||||
fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA);
|
||||
}
|
||||
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
|
||||
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
|
||||
if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) {
|
||||
va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
|
||||
fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
|
||||
fault_iova |= (u64)va34_32 << 32;
|
||||
|
@ -329,12 +332,12 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
|
|||
pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
|
||||
fault_pa |= (u64)pa34_32 << 32;
|
||||
|
||||
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
|
||||
if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
|
||||
fault_port = F_MMU_INT_ID_PORT_ID(regval);
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM_2BITS)) {
|
||||
if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
|
||||
fault_larb = F_MMU_INT_ID_COMM_ID(regval);
|
||||
sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
|
||||
} else if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM_3BITS)) {
|
||||
} else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
|
||||
fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
|
||||
sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
|
||||
} else {
|
||||
|
@ -353,9 +356,9 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
/* Interrupt clear */
|
||||
regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
|
||||
regval = readl_relaxed(base + REG_MMU_INT_CONTROL0);
|
||||
regval |= F_INT_CLR_BIT;
|
||||
writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
|
||||
writel_relaxed(regval, base + REG_MMU_INT_CONTROL0);
|
||||
|
||||
mtk_iommu_tlb_flush_all(data);
|
||||
|
||||
|
|
Loading…
Reference in New Issue