Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu

This commit is contained in:
Joerg Roedel 2015-12-22 11:26:37 +01:00
commit 6d6c7e56a2
5 changed files with 119 additions and 173 deletions

View File

@ -40,7 +40,10 @@
#define IDR0_ST_LVL_SHIFT 27
#define IDR0_ST_LVL_MASK 0x3
#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
#define IDR0_STALL_MODEL (3 << 24)
#define IDR0_STALL_MODEL_SHIFT 24
#define IDR0_STALL_MODEL_MASK 0x3
#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
#define IDR0_TTENDIAN_SHIFT 21
#define IDR0_TTENDIAN_MASK 0x3
#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
@ -253,6 +256,9 @@
#define STRTAB_STE_1_STRW_EL2 2UL
#define STRTAB_STE_1_STRW_SHIFT 30
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
@ -378,7 +384,6 @@
#define PRIQ_0_SID_MASK 0xffffffffUL
#define PRIQ_0_SSID_SHIFT 32
#define PRIQ_0_SSID_MASK 0xfffffUL
#define PRIQ_0_OF (1UL << 57)
#define PRIQ_0_PERM_PRIV (1UL << 58)
#define PRIQ_0_PERM_EXEC (1UL << 59)
#define PRIQ_0_PERM_READ (1UL << 60)
@ -855,15 +860,17 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
};
dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
cerror_str[idx]);
idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
switch (idx) {
case CMDQ_ERR_CERROR_ILL_IDX:
break;
case CMDQ_ERR_CERROR_ABT_IDX:
dev_err(smmu->dev, "retrying command fetch\n");
case CMDQ_ERR_CERROR_NONE_IDX:
return;
case CMDQ_ERR_CERROR_ILL_IDX:
/* Fallthrough */
default:
break;
}
/*
@ -1042,6 +1049,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
: STRTAB_STE_0_CFG_BYPASS;
dst[0] = cpu_to_le64(val);
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
<< STRTAB_STE_1_SHCFG_SHIFT);
dst[2] = 0; /* Nuke the VMID */
if (ste_live)
arm_smmu_sync_ste_for_sid(smmu, sid);
@ -1056,12 +1065,14 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
STRTAB_STE_1_S1C_CACHE_WBRA
<< STRTAB_STE_1_S1COR_SHIFT |
STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
STRTAB_STE_1_S1STALLD |
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
@ -1123,8 +1134,8 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
GFP_KERNEL);
desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
GFP_KERNEL | __GFP_ZERO);
if (!desc->l2ptr) {
dev_err(smmu->dev,
"failed to allocate l2 stream table for SID %u\n",
@ -1250,50 +1261,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
{
u32 gerror, gerrorn;
u32 gerror, gerrorn, active;
struct arm_smmu_device *smmu = dev;
gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
gerror ^= gerrorn;
if (!(gerror & GERROR_ERR_MASK))
active = gerror ^ gerrorn;
if (!(active & GERROR_ERR_MASK))
return IRQ_NONE; /* No errors pending */
dev_warn(smmu->dev,
"unexpected global error reported (0x%08x), this could be serious\n",
gerror);
active);
if (gerror & GERROR_SFM_ERR) {
if (active & GERROR_SFM_ERR) {
dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
arm_smmu_device_disable(smmu);
}
if (gerror & GERROR_MSI_GERROR_ABT_ERR)
if (active & GERROR_MSI_GERROR_ABT_ERR)
dev_warn(smmu->dev, "GERROR MSI write aborted\n");
if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
if (active & GERROR_MSI_PRIQ_ABT_ERR) {
dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
arm_smmu_priq_handler(irq, smmu->dev);
}
if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
if (active & GERROR_MSI_EVTQ_ABT_ERR) {
dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
arm_smmu_evtq_handler(irq, smmu->dev);
}
if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
if (active & GERROR_MSI_CMDQ_ABT_ERR) {
dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
arm_smmu_cmdq_sync_handler(irq, smmu->dev);
}
if (gerror & GERROR_PRIQ_ABT_ERR)
if (active & GERROR_PRIQ_ABT_ERR)
dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
if (gerror & GERROR_EVTQ_ABT_ERR)
if (active & GERROR_EVTQ_ABT_ERR)
dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
if (gerror & GERROR_CMDQ_ERR)
if (active & GERROR_CMDQ_ERR)
arm_smmu_cmdq_skip_err(smmu);
writel(gerror, smmu->base + ARM_SMMU_GERRORN);
@ -1335,7 +1346,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool leaf, void *cookie)
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
@ -1354,7 +1365,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
do {
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
cmd.tlbi.addr += granule;
} while (size -= granule);
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
@ -1429,7 +1443,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
if (cfg->cdptr) {
dma_free_coherent(smmu_domain->smmu->dev,
dmam_free_coherent(smmu_domain->smmu->dev,
CTXDESC_CD_DWORDS << 3,
cfg->cdptr,
cfg->cdptr_dma);
@ -1457,8 +1471,9 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
if (IS_ERR_VALUE(asid))
return asid;
cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
&cfg->cdptr_dma, GFP_KERNEL);
cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
&cfg->cdptr_dma,
GFP_KERNEL | __GFP_ZERO);
if (!cfg->cdptr) {
dev_warn(smmu->dev, "failed to allocate context descriptor\n");
ret = -ENOMEM;
@ -1804,13 +1819,13 @@ static int arm_smmu_add_device(struct device *dev)
smmu = arm_smmu_get_for_pci_dev(pdev);
if (!smmu) {
ret = -ENOENT;
goto out_put_group;
goto out_remove_dev;
}
smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
if (!smmu_group) {
ret = -ENOMEM;
goto out_put_group;
goto out_remove_dev;
}
smmu_group->ste.valid = true;
@ -1826,20 +1841,20 @@ static int arm_smmu_add_device(struct device *dev)
for (i = 0; i < smmu_group->num_sids; ++i) {
/* If we already know about this SID, then we're done */
if (smmu_group->sids[i] == sid)
return 0;
goto out_put_group;
}
/* Check the SID is in range of the SMMU and our stream table */
if (!arm_smmu_sid_in_range(smmu, sid)) {
ret = -ERANGE;
goto out_put_group;
goto out_remove_dev;
}
/* Ensure l2 strtab is initialised */
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
ret = arm_smmu_init_l2_strtab(smmu, sid);
if (ret)
goto out_put_group;
goto out_remove_dev;
}
/* Resize the SID array for the group */
@ -1849,15 +1864,19 @@ static int arm_smmu_add_device(struct device *dev)
if (!sids) {
smmu_group->num_sids--;
ret = -ENOMEM;
goto out_put_group;
goto out_remove_dev;
}
/* Add the new SID */
sids[smmu_group->num_sids - 1] = sid;
smmu_group->sids = sids;
return 0;
out_put_group:
iommu_group_put(group);
return 0;
out_remove_dev:
iommu_group_remove_device(dev);
iommu_group_put(group);
return ret;
}
@ -1937,7 +1956,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
{
size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
if (!q->base) {
dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
qsz);
@ -1957,23 +1976,6 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
return 0;
}
static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q)
{
size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
}
static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
{
arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
if (smmu->features & ARM_SMMU_FEAT_PRI)
arm_smmu_free_one_queue(smmu, &smmu->priq.q);
}
static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
{
int ret;
@ -1983,49 +1985,20 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
if (ret)
goto out;
return ret;
/* evtq */
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
if (ret)
goto out_free_cmdq;
return ret;
/* priq */
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
if (ret)
goto out_free_evtq;
return 0;
out_free_evtq:
arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
out_free_cmdq:
arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
out:
return ret;
}
static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
{
int i;
size_t size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
for (i = 0; i < cfg->num_l1_ents; ++i) {
struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
if (!desc->l2ptr)
continue;
dma_free_coherent(smmu->dev, size, desc->l2ptr,
desc->l2ptr_dma);
}
}
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@ -2054,7 +2027,6 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
void *strtab;
u64 reg;
u32 size, l1size;
int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/*
@ -2077,8 +2049,8 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
size, smmu->sid_bits);
l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL | __GFP_ZERO);
if (!strtab) {
dev_err(smmu->dev,
"failed to allocate l1 stream table (%u bytes)\n",
@ -2095,13 +2067,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
<< STRTAB_BASE_CFG_SPLIT_SHIFT;
cfg->strtab_base_cfg = reg;
ret = arm_smmu_init_l1_strtab(smmu);
if (ret)
dma_free_coherent(smmu->dev,
l1size,
strtab,
cfg->strtab_dma);
return ret;
return arm_smmu_init_l1_strtab(smmu);
}
static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
@ -2112,8 +2078,8 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
GFP_KERNEL);
strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
GFP_KERNEL | __GFP_ZERO);
if (!strtab) {
dev_err(smmu->dev,
"failed to allocate linear stream table (%u bytes)\n",
@ -2157,21 +2123,6 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
return 0;
}
static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
{
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
u32 size = cfg->num_l1_ents;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
arm_smmu_free_l2_strtab(smmu);
size *= STRTAB_L1_DESC_DWORDS << 3;
} else {
size *= STRTAB_STE_DWORDS * 3;
}
dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
}
static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
@ -2180,21 +2131,7 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
if (ret)
return ret;
ret = arm_smmu_init_strtab(smmu);
if (ret)
goto out_free_queues;
return 0;
out_free_queues:
arm_smmu_free_queues(smmu);
return ret;
}
static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
{
arm_smmu_free_strtab(smmu);
arm_smmu_free_queues(smmu);
return arm_smmu_init_strtab(smmu);
}
static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
@ -2532,8 +2469,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
coherent ? "true" : "false");
if (reg & IDR0_STALL_MODEL)
switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
case IDR0_STALL_MODEL_STALL:
/* Fallthrough */
case IDR0_STALL_MODEL_FORCE:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
if (reg & IDR0_S1P)
smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
@ -2699,15 +2640,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smmu);
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
goto out_free_structures;
return 0;
out_free_structures:
arm_smmu_free_structures(smmu);
return ret;
return arm_smmu_device_reset(smmu);
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@ -2715,7 +2648,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
arm_smmu_device_disable(smmu);
arm_smmu_free_structures(smmu);
return 0;
}

View File

@ -582,7 +582,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool leaf, void *cookie)
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
@ -597,12 +597,18 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
iova &= ~12UL;
iova |= ARM_SMMU_CB_ASID(cfg);
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
#ifdef CONFIG_64BIT
} else {
iova >>= 12;
iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
#endif
}
#ifdef CONFIG_64BIT
@ -610,7 +616,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
writeq_relaxed(iova >> 12, reg);
iova >>= 12;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
#endif
} else {
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
@ -945,9 +955,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
free_irq(irq, domain);
}
if (smmu_domain->pgtbl_ops)
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
@ -1357,6 +1365,7 @@ static int arm_smmu_add_device(struct device *dev)
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
return 0;
}

View File

@ -38,9 +38,6 @@
#define io_pgtable_to_data(x) \
container_of((x), struct arm_lpae_io_pgtable, iop)
#define io_pgtable_ops_to_pgtable(x) \
container_of((x), struct io_pgtable, ops)
#define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
@ -58,8 +55,10 @@
((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
* (d)->bits_per_level) + (d)->pg_shift)
#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
#define ARM_LPAE_PAGES_PER_PGD(d) \
DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
/*
* Calculate the index at level l used to map virtual address a using the
@ -169,7 +168,7 @@
/* IOPTE accessors */
#define iopte_deref(pte,d) \
(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
& ~((1ULL << (d)->pg_shift) - 1)))
& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
#define iopte_type(pte,l) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
@ -326,7 +325,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = *ptep;
if (!pte) {
cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
GFP_ATOMIC, cfg);
if (!cptep)
return -ENOMEM;
@ -405,16 +404,17 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
/* Only leaf entries at the last level */
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
return;
if (lvl == ARM_LPAE_START_LVL(data))
table_size = data->pgd_size;
else
table_size = 1UL << data->pg_shift;
table_size = ARM_LPAE_GRANULE(data);
start = ptep;
/* Only leaf entries at the last level */
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
end = ptep;
else
end = (void *)ptep + table_size;
while (ptep != end) {
@ -473,7 +473,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
__arm_lpae_set_pte(ptep, table, cfg);
iova &= ~(blk_size - 1);
cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
return size;
}
@ -486,11 +486,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
pte = *ptep;
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
if (WARN_ON(!pte))
return 0;
/* If the size matches this level, we're in the right place */
@ -499,12 +501,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie);
tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
false, cookie);
tlb->tlb_sync(cookie);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
tlb->tlb_add_flush(iova, size, true, cookie);
tlb->tlb_add_flush(iova, size, size, true, cookie);
}
return size;
@ -570,7 +573,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return 0;
found_translation:
iova &= ((1 << data->pg_shift) - 1);
iova &= (ARM_LPAE_GRANULE(data) - 1);
return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
}
@ -668,7 +671,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
switch (1 << data->pg_shift) {
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K;
break;
@ -769,7 +772,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
sl = ARM_LPAE_START_LVL(data);
switch (1 << data->pg_shift) {
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K;
sl++; /* SL0 format is different for 4K granule size */
@ -889,8 +892,8 @@ static void dummy_tlb_flush_all(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
void *cookie)
static void dummy_tlb_add_flush(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));

View File

@ -26,8 +26,8 @@ enum io_pgtable_fmt {
*/
struct iommu_gather_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie);
void (*tlb_sync)(void *cookie);
};
@ -131,6 +131,8 @@ struct io_pgtable {
struct io_pgtable_ops ops;
};
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.

View File

@ -277,8 +277,8 @@ static void ipmmu_tlb_flush_all(void *cookie)
ipmmu_tlb_invalidate(domain);
}
static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
void *cookie)
static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
/* The hardware doesn't support selective TLB flush. */
}