From 3f3b8d0c9c1838271543df9e655032117a663f88 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 18 Sep 2019 17:17:48 +0100 Subject: [PATCH 01/62] iommu/arm-smmu: Remove .tlb_inv_range indirection Fill in 'native' iommu_flush_ops callbacks for all the arm_smmu_flush_ops variants, and clear up the remains of the previous .tlb_inv_range abstraction. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 110 ++++++++++++++++++++++----------------- drivers/iommu/arm-smmu.h | 2 - 2 files changed, 63 insertions(+), 49 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b18aac4c105e..78292e8e31a5 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) } static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, } static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -362,14 +362,63 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, } while (size -= granule); } +static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true); +} + +static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true); +} + +static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_context_s2(cookie); +} /* * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think. + * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might + * think. */ -static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -380,66 +429,33 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - - ops->tlb_inv_range(iova, size, granule, false, cookie); - ops->tlb_sync(cookie); -} - -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - - ops->tlb_inv_range(iova, size, granule, true, cookie); - ops->tlb_sync(cookie); -} - -static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - - ops->tlb_inv_range(iova, granule, granule, true, cookie); -} - static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, + .tlb_add_page = arm_smmu_tlb_add_page_s1, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s1, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, + .tlb_add_page = arm_smmu_tlb_add_page_s2, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s2, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }, - .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, }; diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index b19b6cae9b5e..6edd35ca983c 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -306,8 +306,6 @@ enum arm_smmu_domain_stage { struct arm_smmu_flush_ops { struct iommu_flush_ops tlb; - void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); void (*tlb_sync)(void *cookie); }; From 3370cb6bf64f6896a30eb7ad97721b9598c8fb10 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 18 Sep 2019 17:17:49 +0100 Subject: [PATCH 02/62] iommu/arm-smmu: Remove "leaf" indirection Now that the "leaf" flag is no longer part of an external interface, there's no need to use it to infer a register offset at runtime when we can just as easily encode the offset directly in its place. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 78292e8e31a5..4edbe58a303a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,18 +312,16 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) } static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, - size_t granule, void *cookie, bool leaf) + size_t granule, void *cookie, int reg) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - int reg, idx = cfg->cbndx; + int idx = cfg->cbndx; if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; - if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { iova = (iova >> 12) << 12; iova |= cfg->asid; @@ -342,16 +340,15 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, } static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, - size_t granule, void *cookie, bool leaf) + size_t granule, void *cookie, int reg) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - int reg, idx = smmu_domain->cfg.cbndx; + int idx = smmu_domain->cfg.cbndx; if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; do { if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) @@ -365,14 +362,16 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false); + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, + ARM_SMMU_CB_S1_TLBIVA); arm_smmu_tlb_sync_context(cookie); } static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true); + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, + ARM_SMMU_CB_S1_TLBIVAL); arm_smmu_tlb_sync_context(cookie); } @@ -380,20 +379,23 @@ static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true); + arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, + ARM_SMMU_CB_S1_TLBIVAL); } static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false); + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2); arm_smmu_tlb_sync_context(cookie); } static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true); + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2L); arm_smmu_tlb_sync_context(cookie); } @@ -401,7 +403,8 @@ static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true); + arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2L); } static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, From ae2b60f34ab21780bc30d01ae976cc7340446bde Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 18 Sep 2019 17:17:50 +0100 Subject: [PATCH 03/62] iommu/arm-smmu: Move .tlb_sync method to implementation With the .tlb_sync interface no longer exposed directly to io-pgtable, strip away the remains of that abstraction layer. Retain the callback in spirit, though, by transforming it into an implementation override for the low-level sync routine itself, for which we will have at least one user. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 33 +++++++++++++++------------------ drivers/iommu/arm-smmu.h | 3 ++- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4edbe58a303a..25876eb9266d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -244,6 +244,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, unsigned int spin_cnt, delay; u32 reg; + if (smmu->impl && unlikely(smmu->impl->tlb_sync)) + return smmu->impl->tlb_sync(smmu, page, sync, status); + arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { @@ -268,9 +271,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } -static void arm_smmu_tlb_sync_context(void *cookie) +static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; unsigned long flags; @@ -280,13 +282,6 @@ static void arm_smmu_tlb_sync_context(void *cookie) spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } -static void arm_smmu_tlb_sync_vmid(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_tlb_sync_global(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context_s1(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; @@ -297,7 +292,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie) wmb(); arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); - arm_smmu_tlb_sync_context(cookie); + arm_smmu_tlb_sync_context(smmu_domain); } static void arm_smmu_tlb_inv_context_s2(void *cookie) @@ -439,7 +434,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, .tlb_add_page = arm_smmu_tlb_add_page_s1, }, - .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { @@ -449,7 +443,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, .tlb_add_page = arm_smmu_tlb_add_page_s2, }, - .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { @@ -459,7 +452,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }, - .tlb_sync = arm_smmu_tlb_sync_vmid, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -1229,11 +1221,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->flush_ops) { - arm_smmu_rpm_get(smmu); - smmu_domain->flush_ops->tlb_sync(smmu_domain); - arm_smmu_rpm_put(smmu); - } + if (!smmu) + return; + + arm_smmu_rpm_get(smmu); + if (smmu->version == ARM_SMMU_V2 || + smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + arm_smmu_tlb_sync_context(smmu_domain); + else + arm_smmu_tlb_sync_global(smmu); + arm_smmu_rpm_put(smmu); } static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 6edd35ca983c..5032102f05b7 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -306,7 +306,6 @@ enum arm_smmu_domain_stage { struct arm_smmu_flush_ops { struct iommu_flush_ops tlb; - void (*tlb_sync)(void *cookie); }; struct arm_smmu_domain { @@ -333,6 +332,8 @@ struct arm_smmu_impl { int (*cfg_probe)(struct arm_smmu_device *smmu); int (*reset)(struct arm_smmu_device *smmu); int (*init_context)(struct arm_smmu_domain *smmu_domain); + void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, + int status); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) From 696bcfb709862077e8fd0e484cca952db7f2001a Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 18 Sep 2019 17:17:51 +0100 Subject: [PATCH 04/62] iommu/arm-smmu: Remove arm_smmu_flush_ops Now it's just an empty wrapper. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 40 +++++++++++++++++----------------------- drivers/iommu/arm-smmu.h | 6 +----- 2 files changed, 18 insertions(+), 28 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 25876eb9266d..d2d357c87b61 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -427,31 +427,25 @@ static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } -static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { - .tlb = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, - .tlb_add_page = arm_smmu_tlb_add_page_s1, - }, +static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, + .tlb_add_page = arm_smmu_tlb_add_page_s1, }; -static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { - .tlb = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, - .tlb_add_page = arm_smmu_tlb_add_page_s2, - }, +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, + .tlb_add_page = arm_smmu_tlb_add_page_s2, }; -static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { - .tlb = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, - .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, - .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, - }, +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -781,7 +775,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, - .tlb = &smmu_domain->flush_ops->tlb, + .tlb = smmu_domain->flush_ops, .iommu_dev = smmu->dev, }; @@ -1210,7 +1204,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); + smmu_domain->flush_ops->tlb_flush_all(smmu_domain); arm_smmu_rpm_put(smmu); } } diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 5032102f05b7..ba0f05952dd9 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -304,14 +304,10 @@ enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_BYPASS, }; -struct arm_smmu_flush_ops { - struct iommu_flush_ops tlb; -}; - struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct arm_smmu_flush_ops *flush_ops; + const struct iommu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; bool non_strict; From 931a0ba638e09a707e9a905cb6bea1fb1c6d4183 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 17 Sep 2019 15:45:34 +0100 Subject: [PATCH 05/62] iommu/arm-smmu: Report USF more clearly Although CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT is a welcome tool for smoking out inadequate firmware, the failure mode is non-obvious and can be confusing for end users. Add some special-case reporting of Unidentified Stream Faults to help clarify this particular symptom. Since we're adding yet another print to the mix, also break out an explicit ratelimit state to make sure everything stays together (and reduce the static storage footprint a little). Reviewed-by: Douglas Anderson Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 21 ++++++++++++++++----- drivers/iommu/arm-smmu.h | 2 ++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d2d357c87b61..bd6683c210da 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -477,6 +478,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) { u32 gfsr, gfsynr0, gfsynr1, gfsynr2; struct arm_smmu_device *smmu = dev; + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); @@ -486,11 +489,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) if (!gfsr) return IRQ_NONE; - dev_err_ratelimited(smmu->dev, - "Unexpected global fault, this could be serious\n"); - dev_err_ratelimited(smmu->dev, - "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", - gfsr, gfsynr0, gfsynr1, gfsynr2); + if (__ratelimit(&rs)) { + if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) && + (gfsr & sGFSR_USF)) + dev_err(smmu->dev, + "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n", + (u16)gfsynr1); + else + dev_err(smmu->dev, + "Unexpected global fault, this could be serious\n"); + dev_err(smmu->dev, + "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", + gfsr, gfsynr0, gfsynr1, gfsynr2); + } arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); return IRQ_HANDLED; diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index ba0f05952dd9..409716410b0d 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -79,6 +79,8 @@ #define ID7_MINOR GENMASK(3, 0) #define ARM_SMMU_GR0_sGFSR 0x48 +#define sGFSR_USF BIT(1) + #define ARM_SMMU_GR0_sGFSYNR0 0x50 #define ARM_SMMU_GR0_sGFSYNR1 0x54 #define ARM_SMMU_GR0_sGFSYNR2 0x58 From 9062c1d0bedacf68d9c92cbd62c62a6fe6f6cebc Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 9 Sep 2019 22:19:19 +0200 Subject: [PATCH 06/62] iommu/io-pgtable: Move some initialization data to .init.rodata The memory used by '__init' functions can be freed once the initialization phase has been performed. Mark some 'static const' array defined and used within some '__init' functions as '__initconst', so that the corresponding data can also be discarded. Without '__initconst', the data are put in the .rodata section. With the qualifier, they are put in the .init.rodata section. With gcc 8.3.0, the following changes have been measured: Without '__initconst': section size .rodata 00000720 .init.rodata 00000018 With '__initconst': section size .rodata 00000660 .init.rodata 00000058 Signed-off-by: Christophe JAILLET Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 4c91359057c5..a743a2601334 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1113,7 +1113,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) { - static const enum io_pgtable_fmt fmts[] = { + static const enum io_pgtable_fmt fmts[] __initconst = { ARM_64_LPAE_S1, ARM_64_LPAE_S2, }; @@ -1212,13 +1212,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) static int __init arm_lpae_do_selftests(void) { - static const unsigned long pgsize[] = { + static const unsigned long pgsize[] __initconst = { SZ_4K | SZ_2M | SZ_1G, SZ_16K | SZ_32M, SZ_64K | SZ_512M, }; - static const unsigned int ias[] = { + static const unsigned int ias[] __initconst = { 32, 36, 40, 42, 44, 48, }; From bdde4718aba368c33705ccff8f3e29586d41b69b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 15 Sep 2019 21:34:01 +0200 Subject: [PATCH 07/62] iommu/arm-smmu: Axe a useless test in 'arm_smmu_master_alloc_smes()' 'iommu_group_get_for_dev()' never returns NULL, so this test can be removed. Reviewed-by: Robin Murphy Signed-off-by: Christophe JAILLET Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index bd6683c210da..080af0326816 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1054,8 +1054,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev) } group = iommu_group_get_for_dev(dev); - if (!group) - group = ERR_PTR(-ENOMEM); if (IS_ERR(group)) { ret = PTR_ERR(group); goto out_err; From 37ec8eb851c1876580a963f283fe7496592b9f72 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Sun, 8 Sep 2019 09:56:37 -0700 Subject: [PATCH 08/62] iommu/amd: Remove unnecessary locking from AMD iommu driver With or without locking it doesn't make sense for two writers to be writing to the same IOVA range at the same time. Even with locking we still have a race condition, whoever gets the lock first, so we still can't be sure what the result will be. With locking the result will be more sane, it will be correct for the last writer, but still useless because we can't be sure which writer will get the lock last. It's a fundamentally broken design to have two writers writing to the same IOVA range at the same time. So we can remove the locking and work on the assumption that no two writers will be writing to the same IOVA range at the same time. The only exception is when we have to allocate a middle page in the page tables, the middle page can cover more than just the IOVA range a writer has been allocated. However this isn't an issue in the AMD driver because it can atomically allocate middle pages using "cmpxchg64()". Signed-off-by: Tom Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 10 +--------- drivers/iommu/amd_iommu_types.h | 1 - 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2369b8af81f3..97ca4ed4ce0c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2934,7 +2934,6 @@ static void protection_domain_free(struct protection_domain *domain) static int protection_domain_init(struct protection_domain *domain) { spin_lock_init(&domain->lock); - mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); if (!domain->id) return -ENOMEM; @@ -3121,9 +3120,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, if (iommu_prot & IOMMU_WRITE) prot |= IOMMU_PROT_IW; - mutex_lock(&domain->api_lock); ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); - mutex_unlock(&domain->api_lock); domain_flush_np_cache(domain, iova, page_size); @@ -3135,16 +3132,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); - size_t unmap_size; if (domain->mode == PAGE_MODE_NONE) return 0; - mutex_lock(&domain->api_lock); - unmap_size = iommu_unmap_page(domain, iova, page_size); - mutex_unlock(&domain->api_lock); - - return unmap_size; + return iommu_unmap_page(domain, iova, page_size); } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index c9c1612d52e0..becbd3bd9180 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -468,7 +468,6 @@ struct protection_domain { struct iommu_domain domain; /* generic domain handle used by iommu core code */ spinlock_t lock; /* mostly used to lock the page table*/ - struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ From 781ca2de89bae1b1d2c96df9ef33e9a324415995 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Sun, 8 Sep 2019 09:56:38 -0700 Subject: [PATCH 09/62] iommu: Add gfp parameter to iommu_ops::map Add a gfp_t parameter to the iommu_ops::map function. Remove the needless locking in the AMD iommu driver. The iommu_ops::map function (or the iommu_map function which calls it) was always supposed to be sleepable (according to Joerg's comment in this thread: https://lore.kernel.org/patchwork/patch/977520/ ) and so should probably have had a "might_sleep()" since it was written. However currently the dma-iommu api can call iommu_map in an atomic context, which it shouldn't do. This doesn't cause any problems because any iommu driver which uses the dma-iommu api uses gfp_atomic in it's iommu_ops::map function. But doing this wastes the memory allocators atomic pools. Signed-off-by: Tom Murphy Reviewed-by: Robin Murphy Reviewed-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 ++- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/dma-iommu.c | 6 ++--- drivers/iommu/exynos-iommu.c | 2 +- drivers/iommu/intel-iommu.c | 2 +- drivers/iommu/iommu.c | 43 +++++++++++++++++++++++++++++----- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/mtk_iommu_v1.c | 2 +- drivers/iommu/omap-iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- drivers/iommu/rockchip-iommu.c | 2 +- drivers/iommu/s390-iommu.c | 2 +- drivers/iommu/tegra-gart.c | 2 +- drivers/iommu/tegra-smmu.c | 2 +- drivers/iommu/virtio-iommu.c | 2 +- include/linux/iommu.h | 21 ++++++++++++++++- 19 files changed, 77 insertions(+), 26 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 97ca4ed4ce0c..1444757f0ff9 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3106,7 +3106,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, size_t page_size, int iommu_prot) + phys_addr_t paddr, size_t page_size, int iommu_prot, + gfp_t gfp) { struct protection_domain *domain = to_pdomain(dom); int prot = 0; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8da93e730d6f..f1bdaaa8c5de 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2448,7 +2448,7 @@ out_unlock: } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b18aac4c105e..e85bc0e8d564 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1159,7 +1159,7 @@ rpm_put: } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f321279baf9e..cc3bf5cf0a90 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -476,7 +476,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, if (!iova) return DMA_MAPPING_ERROR; - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { + if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { iommu_dma_free_iova(cookie, iova, size); return DMA_MAPPING_ERROR; } @@ -611,7 +611,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) < size) goto out_free_sg; @@ -871,7 +871,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) + if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 9c94e16fb127..186ff5cc975c 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, */ static int exynos_iommu_map(struct iommu_domain *iommu_domain, unsigned long l_iova, phys_addr_t paddr, size_t size, - int prot) + int prot, gfp_t gfp) { struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); sysmmu_pte_t *entry; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3f974919d3bd..615495aa613e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5432,7 +5432,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, - size_t size, int iommu_prot) + size_t size, int iommu_prot, gfp_t gfp) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); u64 max_addr; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d658c7c6a2ab..f8853dbf1c4e 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1854,8 +1854,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, return pgsize; } -int iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) +int __iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_ops *ops = domain->ops; unsigned long orig_iova = iova; @@ -1892,8 +1892,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); + ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); - ret = ops->map(domain, iova, paddr, pgsize, prot); if (ret) break; @@ -1913,8 +1913,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, return ret; } + +int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + might_sleep(); + return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(iommu_map); +int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iommu_map_atomic); + static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) @@ -1991,8 +2005,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot) +size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot, + gfp_t gfp) { size_t len = 0, mapped = 0; phys_addr_t start; @@ -2003,7 +2018,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, phys_addr_t s_phys = sg_phys(sg); if (len && s_phys != start + len) { - ret = iommu_map(domain, iova + mapped, start, len, prot); + ret = __iommu_map(domain, iova + mapped, start, + len, prot, gfp); + if (ret) goto out_err; @@ -2031,8 +2048,22 @@ out_err: return 0; } + +size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) +{ + might_sleep(); + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(iommu_map_sg); +size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) +{ + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iommu_map_sg_atomic); + int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) { diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9da8309f7170..35f3edaba0e2 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -724,7 +724,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, } static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index be99d408cf35..93f14bca26ee 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -504,7 +504,7 @@ fail: } static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t len, int prot) + phys_addr_t pa, size_t len, int prot, gfp_t gfp) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 67a483c1a935..556beaa110b3 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -412,7 +412,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, } static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index b5efd6dac953..e93b94ecac45 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, } static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 09c6e1c680db..be551cc34be4 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) } static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, - phys_addr_t pa, size_t bytes, int prot) + phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct device *dev = omap_domain->dev; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c31e7bc4ccbe..6ad5f934f00f 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de } static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { int ret; unsigned long flags; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 26290f310f90..a4f6425fd12d 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -757,7 +757,7 @@ unwind: } static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 3b0b18e23187..1137f3ddcb85 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -265,7 +265,7 @@ undo_cpu_trans: } static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct s390_domain *s390_domain = to_s390_domain(domain); int flags = ZPCI_PTE_VALID, rc = 0; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 3924f7c05544..3fb7ba72507d 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, } static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t bytes, int prot) + phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) { struct gart_device *gart = gart_handle; int ret; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 7293fc3f796d..99f85fb5a704 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -650,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, } static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3ea9d7682999..2ebcf4bae0c6 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -713,7 +713,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) } static int viommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { int ret; u32 flags; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 29bac5345563..6ca3fb2873d7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -256,7 +256,7 @@ struct iommu_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); @@ -421,6 +421,8 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); +extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_unmap_fast(struct iommu_domain *domain, @@ -428,6 +430,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,unsigned int nents, int prot); +extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -662,6 +667,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; } +static inline int iommu_map_atomic(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + return -ENODEV; +} + static inline size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { @@ -682,6 +694,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, return 0; } +static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + return 0; +} + static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { } From 795bbbb9b6f80306be3d45c79527324036a68509 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Sun, 8 Sep 2019 09:56:39 -0700 Subject: [PATCH 10/62] iommu/dma-iommu: Handle deferred devices Handle devices which defer their attach to the iommu in the dma-iommu api Signed-off-by: Tom Murphy Reviewed-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index cc3bf5cf0a90..b58b04ba1e02 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -22,6 +22,7 @@ #include #include #include +#include struct iommu_dma_msi_page { struct list_head list; @@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } +static int iommu_dma_deferred_attach(struct device *dev, + struct iommu_domain *domain) +{ + const struct iommu_ops *ops = domain->ops; + + if (!is_kdump_kernel()) + return 0; + + if (unlikely(ops->is_attach_deferred && + ops->is_attach_deferred(domain, dev))) + return iommu_attach_device(domain, dev); + + return 0; +} + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -470,6 +486,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return DMA_MAPPING_ERROR; + size = iova_align(iovad, size + iova_off); iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); @@ -579,6 +598,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return NULL; + min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { min_size = PAGE_SIZE; @@ -711,7 +733,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dma_handle; - dma_handle =__iommu_dma_map(dev, phys, size, prot); + dma_handle = __iommu_dma_map(dev, phys, size, prot); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -821,6 +843,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return 0; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); From 6e2350207f40e24884da262976f7fd4fba387e8a Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Sun, 8 Sep 2019 09:56:40 -0700 Subject: [PATCH 11/62] iommu/dma-iommu: Use the dev->coherent_dma_mask Use the dev->coherent_dma_mask when allocating in the dma-iommu ops api. Signed-off-by: Tom Murphy Reviewed-by: Robin Murphy Reviewed-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index b58b04ba1e02..ecc08aef9b58 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -478,7 +478,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot) + size_t size, int prot, dma_addr_t dma_mask) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -491,7 +491,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(iovad, size + iova_off); - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; @@ -733,7 +733,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dma_handle; - dma_handle = __iommu_dma_map(dev, phys, size, prot); + dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -936,7 +936,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, + dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -1042,7 +1043,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (!cpu_addr) return NULL; - *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); + *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, + dev->coherent_dma_mask); if (*handle == DMA_MAPPING_ERROR) { __iommu_dma_free(dev, size, cpu_addr); return NULL; From be62dbf554c5b50718a54a359372c148cd9975c7 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Sun, 8 Sep 2019 09:56:41 -0700 Subject: [PATCH 12/62] iommu/amd: Convert AMD iommu driver to the dma-iommu api Convert the AMD iommu driver to the dma-iommu api. Remove the iova handling and reserve region code from the AMD iommu driver. Signed-off-by: Tom Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 1 + drivers/iommu/amd_iommu.c | 692 ++++---------------------------------- 2 files changed, 68 insertions(+), 625 deletions(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e3842eabcfdd..384c1a183cb5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -138,6 +138,7 @@ config AMD_IOMMU select PCI_PASID select IOMMU_API select IOMMU_IOVA + select IOMMU_DMA depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1444757f0ff9..1881fe8bdfed 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; -static const struct dma_map_ops amd_iommu_dma_ops; - /* * general struct to manage commands send to an IOMMU */ @@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); -static void iova_domain_flush_tlb(struct iova_domain *iovad); - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - /* generic protection domain information */ - struct protection_domain domain; - - /* IOVA RB-Tree */ - struct iova_domain iovad; -}; - -static struct iova_domain reserved_iova_ranges; -static struct lock_class_key reserved_rbtree_key; /**************************************************************************** * @@ -187,12 +171,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } -static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) -{ - BUG_ON(domain->flags != PD_DMA_OPS_MASK); - return container_of(domain, struct dma_ops_domain, domain); -} - static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -1301,12 +1279,6 @@ static void domain_flush_pages(struct protection_domain *domain, __domain_flush_pages(domain, address, size, 0); } -/* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); -} - /* Flush the whole IO/TLB for a given protection domain - including PDE */ static void domain_flush_tlb_pde(struct protection_domain *domain) { @@ -1751,43 +1723,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, return unmapped; } -/**************************************************************************** - * - * The next functions belong to the address allocator for the dma_ops - * interface functions. - * - ****************************************************************************/ - - -static unsigned long dma_ops_alloc_iova(struct device *dev, - struct dma_ops_domain *dma_dom, - unsigned int pages, u64 dma_mask) -{ - unsigned long pfn = 0; - - pages = __roundup_pow_of_two(pages); - - if (dma_mask > DMA_BIT_MASK(32)) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(DMA_BIT_MASK(32)), false); - - if (!pfn) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(dma_mask), true); - - return (pfn << PAGE_SHIFT); -} - -static void dma_ops_free_iova(struct dma_ops_domain *dma_dom, - unsigned long address, - unsigned int pages) -{ - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - free_iova_fast(&dma_dom->iovad, address, pages); -} - /**************************************************************************** * * The next functions belong to the domain allocation. A domain is @@ -1864,42 +1799,23 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } -static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) -{ - unsigned long flags; - - spin_lock_irqsave(&dom->domain.lock, flags); - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); - spin_unlock_irqrestore(&dom->domain.lock, flags); -} - -static void iova_domain_flush_tlb(struct iova_domain *iovad) -{ - struct dma_ops_domain *dom; - - dom = container_of(iovad, struct dma_ops_domain, iovad); - - dma_ops_domain_flush_tlb(dom); -} - /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table */ -static void dma_ops_domain_free(struct dma_ops_domain *dom) +static void dma_ops_domain_free(struct protection_domain *domain) { - if (!dom) + if (!domain) return; - put_iova_domain(&dom->iovad); + iommu_put_dma_cookie(&domain->domain); - free_pagetable(&dom->domain); + free_pagetable(domain); - if (dom->domain.id) - domain_id_free(dom->domain.id); + if (domain->id) + domain_id_free(domain->id); - kfree(dom); + kfree(domain); } /* @@ -1907,35 +1823,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) * It also initializes the page table and the address allocator data * structures required for the dma_ops interface */ -static struct dma_ops_domain *dma_ops_domain_alloc(void) +static struct protection_domain *dma_ops_domain_alloc(void) { - struct dma_ops_domain *dma_dom; + struct protection_domain *domain; - dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); - if (!dma_dom) + domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL); + if (!domain) return NULL; - if (protection_domain_init(&dma_dom->domain)) - goto free_dma_dom; + if (protection_domain_init(domain)) + goto free_domain; - dma_dom->domain.mode = PAGE_MODE_3_LEVEL; - dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); - dma_dom->domain.flags = PD_DMA_OPS_MASK; - if (!dma_dom->domain.pt_root) - goto free_dma_dom; + domain->mode = PAGE_MODE_3_LEVEL; + domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + domain->flags = PD_DMA_OPS_MASK; + if (!domain->pt_root) + goto free_domain; - init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN); + if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM) + goto free_domain; - if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) - goto free_dma_dom; + return domain; - /* Initialize reserved ranges */ - copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); - - return dma_dom; - -free_dma_dom: - dma_ops_domain_free(dma_dom); +free_domain: + dma_ops_domain_free(domain); return NULL; } @@ -2303,8 +2214,8 @@ static int amd_iommu_add_device(struct device *dev) domain = iommu_get_domain_for_dev(dev); if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; - else - dev->dma_ops = &amd_iommu_dma_ops; + else if (domain->type == IOMMU_DOMAIN_DMA) + iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); out: iommu_completion_wait(iommu); @@ -2338,43 +2249,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) return acpihid_device_group(dev); } +static int amd_iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + return -ENODEV; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = !amd_iommu_unmap_flush; + return 0; + default: + return -ENODEV; + } + break; + default: + return -EINVAL; + } +} + /***************************************************************************** * * The next functions belong to the dma_ops mapping/unmapping code. * *****************************************************************************/ -/* - * In the dma_ops path we only have the struct device. This function - * finds the corresponding IOMMU, the protection domain and the - * requestor id for a given device. - * If the device is not yet associated with a domain this is also done - * in this function. - */ -static struct protection_domain *get_domain(struct device *dev) -{ - struct protection_domain *domain; - struct iommu_domain *io_domain; - - if (!check_device(dev)) - return ERR_PTR(-EINVAL); - - domain = get_dev_data(dev)->domain; - if (domain == NULL && get_dev_data(dev)->defer_attach) { - get_dev_data(dev)->defer_attach = false; - io_domain = iommu_get_domain_for_dev(dev); - domain = to_pdomain(io_domain); - attach_device(dev, domain); - } - if (domain == NULL) - return ERR_PTR(-EBUSY); - - if (!dma_ops_domain(domain)) - return ERR_PTR(-EBUSY); - - return domain; -} - static void update_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; @@ -2400,458 +2300,6 @@ static void update_domain(struct protection_domain *domain) domain_flush_tlb_pde(domain); } -static int dir2prot(enum dma_data_direction direction) -{ - if (direction == DMA_TO_DEVICE) - return IOMMU_PROT_IR; - else if (direction == DMA_FROM_DEVICE) - return IOMMU_PROT_IW; - else if (direction == DMA_BIDIRECTIONAL) - return IOMMU_PROT_IW | IOMMU_PROT_IR; - else - return 0; -} - -/* - * This function contains common code for mapping of a physically - * contiguous memory region into DMA address space. It is used by all - * mapping functions provided with this IOMMU driver. - * Must be called with the domain lock held. - */ -static dma_addr_t __map_single(struct device *dev, - struct dma_ops_domain *dma_dom, - phys_addr_t paddr, - size_t size, - enum dma_data_direction direction, - u64 dma_mask) -{ - dma_addr_t offset = paddr & ~PAGE_MASK; - dma_addr_t address, start, ret; - unsigned long flags; - unsigned int pages; - int prot = 0; - int i; - - pages = iommu_num_pages(paddr, size, PAGE_SIZE); - paddr &= PAGE_MASK; - - address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); - if (!address) - goto out; - - prot = dir2prot(direction); - - start = address; - for (i = 0; i < pages; ++i) { - ret = iommu_map_page(&dma_dom->domain, start, paddr, - PAGE_SIZE, prot, GFP_ATOMIC); - if (ret) - goto out_unmap; - - paddr += PAGE_SIZE; - start += PAGE_SIZE; - } - address += offset; - - domain_flush_np_cache(&dma_dom->domain, address, size); - -out: - return address; - -out_unmap: - - for (--i; i >= 0; --i) { - start -= PAGE_SIZE; - iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); - } - - spin_lock_irqsave(&dma_dom->domain.lock, flags); - domain_flush_tlb(&dma_dom->domain); - domain_flush_complete(&dma_dom->domain); - spin_unlock_irqrestore(&dma_dom->domain.lock, flags); - - dma_ops_free_iova(dma_dom, address, pages); - - return DMA_MAPPING_ERROR; -} - -/* - * Does the reverse of the __map_single function. Must be called with - * the domain lock held too - */ -static void __unmap_single(struct dma_ops_domain *dma_dom, - dma_addr_t dma_addr, - size_t size, - int dir) -{ - dma_addr_t i, start; - unsigned int pages; - - pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); - dma_addr &= PAGE_MASK; - start = dma_addr; - - for (i = 0; i < pages; ++i) { - iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); - start += PAGE_SIZE; - } - - if (amd_iommu_unmap_flush) { - unsigned long flags; - - spin_lock_irqsave(&dma_dom->domain.lock, flags); - domain_flush_tlb(&dma_dom->domain); - domain_flush_complete(&dma_dom->domain); - spin_unlock_irqrestore(&dma_dom->domain.lock, flags); - dma_ops_free_iova(dma_dom, dma_addr, pages); - } else { - pages = __roundup_pow_of_two(pages); - queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); - } -} - -/* - * The exported map_single function for dma_ops. - */ -static dma_addr_t map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = page_to_phys(page) + offset; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - u64 dma_mask; - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return (dma_addr_t)paddr; - else if (IS_ERR(domain)) - return DMA_MAPPING_ERROR; - - dma_mask = *dev->dma_mask; - dma_dom = to_dma_ops_domain(domain); - - return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); -} - -/* - * The exported unmap_single function for dma_ops. - */ -static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - dma_dom = to_dma_ops_domain(domain); - - __unmap_single(dma_dom, dma_addr, size, dir); -} - -static int sg_num_pages(struct device *dev, - struct scatterlist *sglist, - int nelems) -{ - unsigned long mask, boundary_size; - struct scatterlist *s; - int i, npages = 0; - - mask = dma_get_seg_boundary(dev); - boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : - 1UL << (BITS_PER_LONG - PAGE_SHIFT); - - for_each_sg(sglist, s, nelems, i) { - int p, n; - - s->dma_address = npages << PAGE_SHIFT; - p = npages % boundary_size; - n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - if (p + n > boundary_size) - npages += boundary_size - p; - npages += n; - } - - return npages; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction, - unsigned long attrs) -{ - int mapped_pages = 0, npages = 0, prot = 0, i; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct scatterlist *s; - unsigned long address; - u64 dma_mask; - int ret; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return 0; - - dma_dom = to_dma_ops_domain(domain); - dma_mask = *dev->dma_mask; - - npages = sg_num_pages(dev, sglist, nelems); - - address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); - if (!address) - goto out_err; - - prot = dir2prot(direction); - - /* Map all sg entries */ - for_each_sg(sglist, s, nelems, i) { - int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - - for (j = 0; j < pages; ++j) { - unsigned long bus_addr, phys_addr; - - bus_addr = address + s->dma_address + (j << PAGE_SHIFT); - phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, - PAGE_SIZE, prot, - GFP_ATOMIC | __GFP_NOWARN); - if (ret) - goto out_unmap; - - mapped_pages += 1; - } - } - - /* Everything is mapped - write the right values into s->dma_address */ - for_each_sg(sglist, s, nelems, i) { - /* - * Add in the remaining piece of the scatter-gather offset that - * was masked out when we were determining the physical address - * via (sg_phys(s) & PAGE_MASK) earlier. - */ - s->dma_address += address + (s->offset & ~PAGE_MASK); - s->dma_length = s->length; - } - - if (s) - domain_flush_np_cache(domain, s->dma_address, s->dma_length); - - return nelems; - -out_unmap: - dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n", - npages, ret); - - for_each_sg(sglist, s, nelems, i) { - int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - - for (j = 0; j < pages; ++j) { - unsigned long bus_addr; - - bus_addr = address + s->dma_address + (j << PAGE_SHIFT); - iommu_unmap_page(domain, bus_addr, PAGE_SIZE); - - if (--mapped_pages == 0) - goto out_free_iova; - } - } - -out_free_iova: - free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); - -out_err: - return 0; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - unsigned long startaddr; - int npages; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - startaddr = sg_dma_address(sglist) & PAGE_MASK; - dma_dom = to_dma_ops_domain(domain); - npages = sg_num_pages(dev, sglist, nelems); - - __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); -} - -/* - * The exported alloc_coherent function for dma_ops. - */ -static void *alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs) -{ - u64 dma_mask = dev->coherent_dma_mask; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct page *page; - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) { - page = alloc_pages(flag, get_order(size)); - *dma_addr = page_to_phys(page); - return page_address(page); - } else if (IS_ERR(domain)) - return NULL; - - dma_dom = to_dma_ops_domain(domain); - size = PAGE_ALIGN(size); - dma_mask = dev->coherent_dma_mask; - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - flag |= __GFP_ZERO; - - page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); - if (!page) { - if (!gfpflags_allow_blocking(flag)) - return NULL; - - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), flag & __GFP_NOWARN); - if (!page) - return NULL; - } - - if (!dma_mask) - dma_mask = *dev->dma_mask; - - *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), - size, DMA_BIDIRECTIONAL, dma_mask); - - if (*dma_addr == DMA_MAPPING_ERROR) - goto out_free; - - return page_address(page); - -out_free: - - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, get_order(size)); - - return NULL; -} - -/* - * The exported free_coherent function for dma_ops. - */ -static void free_coherent(struct device *dev, size_t size, - void *virt_addr, dma_addr_t dma_addr, - unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct page *page; - - page = virt_to_page(virt_addr); - size = PAGE_ALIGN(size); - - domain = get_domain(dev); - if (IS_ERR(domain)) - goto free_mem; - - dma_dom = to_dma_ops_domain(domain); - - __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); - -free_mem: - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, get_order(size)); -} - -/* - * This function is called by the DMA layer to find out if we can handle a - * particular device. It is part of the dma_ops. - */ -static int amd_iommu_dma_supported(struct device *dev, u64 mask) -{ - if (!dma_direct_supported(dev, mask)) - return 0; - return check_device(dev); -} - -static const struct dma_map_ops amd_iommu_dma_ops = { - .alloc = alloc_coherent, - .free = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, - .mmap = dma_common_mmap, - .get_sgtable = dma_common_get_sgtable, -}; - -static int init_reserved_iova_ranges(void) -{ - struct pci_dev *pdev = NULL; - struct iova *val; - - init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN); - - lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, - &reserved_rbtree_key); - - /* MSI memory range */ - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END)); - if (!val) { - pr_err("Reserving MSI range failed\n"); - return -ENOMEM; - } - - /* HT memory range */ - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END)); - if (!val) { - pr_err("Reserving HT range failed\n"); - return -ENOMEM; - } - - /* - * Memory used for PCI resources - * FIXME: Check whether we can reserve the PCI-hole completly - */ - for_each_pci_dev(pdev) { - int i; - - for (i = 0; i < PCI_NUM_RESOURCES; ++i) { - struct resource *r = &pdev->resource[i]; - - if (!(r->flags & IORESOURCE_MEM)) - continue; - - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(r->start), - IOVA_PFN(r->end)); - if (!val) { - pci_err(pdev, "Reserve pci-resource range %pR failed\n", r); - return -ENOMEM; - } - } - } - - return 0; -} - int __init amd_iommu_init_api(void) { int ret, err = 0; @@ -2860,10 +2308,6 @@ int __init amd_iommu_init_api(void) if (ret) return ret; - ret = init_reserved_iova_ranges(); - if (ret) - return ret; - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) return err; @@ -2964,7 +2408,6 @@ out_err: static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *pdomain; - struct dma_ops_domain *dma_domain; switch (type) { case IOMMU_DOMAIN_UNMANAGED: @@ -2985,12 +2428,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) break; case IOMMU_DOMAIN_DMA: - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) { + pdomain = dma_ops_domain_alloc(); + if (!pdomain) { pr_err("Failed to allocate\n"); return NULL; } - pdomain = &dma_domain->domain; break; case IOMMU_DOMAIN_IDENTITY: pdomain = protection_domain_alloc(); @@ -3009,7 +2451,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; - struct dma_ops_domain *dma_dom; domain = to_pdomain(dom); @@ -3024,8 +2465,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) switch (dom->type) { case IOMMU_DOMAIN_DMA: /* Now release the domain */ - dma_dom = to_dma_ops_domain(domain); - dma_ops_domain_free(dma_dom); + dma_ops_domain_free(domain); break; default: if (domain->mode != PAGE_MODE_NONE) @@ -3081,6 +2521,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, return -EINVAL; dev_data = dev->archdata.iommu; + dev_data->defer_attach = false; iommu = amd_iommu_rlookup_table[dev_data->devid]; if (!iommu) @@ -3238,19 +2679,6 @@ static void amd_iommu_put_resv_regions(struct device *dev, kfree(entry); } -static void amd_iommu_apply_resv_region(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region) -{ - struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); - unsigned long start, end; - - start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length - 1); - - WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); -} - static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { @@ -3287,9 +2715,9 @@ const struct iommu_ops amd_iommu_ops = { .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, .device_group = amd_iommu_device_group, + .domain_get_attr = amd_iommu_domain_get_attr, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = amd_iommu_put_resv_regions, - .apply_resv_region = amd_iommu_apply_resv_region, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, .flush_iotlb_all = amd_iommu_flush_iotlb_all, @@ -3601,9 +3029,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr); struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) { struct protection_domain *pdomain; + struct iommu_domain *io_domain; + struct device *dev = &pdev->dev; - pdomain = get_domain(&pdev->dev); - if (IS_ERR(pdomain)) + if (!check_device(dev)) + return NULL; + + pdomain = get_dev_data(dev)->domain; + if (pdomain == NULL && get_dev_data(dev)->defer_attach) { + get_dev_data(dev)->defer_attach = false; + io_domain = iommu_get_domain_for_dev(dev); + pdomain = to_pdomain(io_domain); + attach_device(dev, pdomain); + } + if (pdomain == NULL) + return NULL; + + if (!dma_ops_domain(pdomain)) return NULL; /* Only return IOMMUv2 domains */ From da6b05dce2a9c69880c3b9838938d5714703e80d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 16 Sep 2019 22:29:36 +0200 Subject: [PATCH 13/62] iommu/qcom: Simplify a test in 'qcom_iommu_add_device()' 'iommu_group_get_for_dev()' never returns NULL, so this test can be simplified a bit. This way, the test is consistent with all other calls to 'iommu_group_get_for_dev()'. Signed-off-by: Christophe JAILLET Reviewed-by: Dan Carpenter Signed-off-by: Joerg Roedel --- drivers/iommu/qcom_iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c31e7bc4ccbe..3d99ec868892 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev) } group = iommu_group_get_for_dev(dev); - if (IS_ERR_OR_NULL(group)) - return PTR_ERR_OR_ZERO(group); + if (IS_ERR(group)) + return PTR_ERR(group); iommu_group_put(group); iommu_device_link(&qcom_iommu->iommu, dev); From 1ee0186b9a128a872887e16e2d1520ea37a95dc4 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 21 Sep 2019 15:06:44 +0800 Subject: [PATCH 14/62] iommu/vt-d: Refactor find_domain() helper Current find_domain() helper checks and does the deferred domain attachment and return the domain in use. This isn't always the use case for the callers. Some callers only want to retrieve the current domain in use. This refactors find_domain() into two helpers: 1) find_domain() only returns the domain in use; 2) deferred_attach_domain() does the deferred domain attachment if required and return the domain in use. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3f974919d3bd..e70cc1c5055f 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain) spin_unlock_irqrestore(&device_domain_lock, flags); } -/* - * find_domain - * Note: we use struct device->archdata.iommu stores the info - */ static struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; + if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO || + dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)) + return NULL; + + /* No lock here, assumes no domain exit in normal case */ + info = dev->archdata.iommu; + if (likely(info)) + return info->domain; + + return NULL; +} + +static struct dmar_domain *deferred_attach_domain(struct device *dev) +{ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { struct iommu_domain *domain; @@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev) intel_iommu_attach_device(domain, dev); } - /* No lock here, assumes no domain exit in normal case */ - info = dev->archdata.iommu; - - if (likely(info)) - return info->domain; - return NULL; + return find_domain(dev); } static inline struct device_domain_info * @@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, BUG_ON(dir == DMA_NONE); - domain = find_domain(dev); + domain = deferred_attach_domain(dev); if (!domain) return DMA_MAPPING_ERROR; @@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele if (!iommu_need_mapping(dev)) return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); - domain = find_domain(dev); + domain = deferred_attach_domain(dev); if (!domain) return 0; @@ -3819,7 +3824,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, int prot = 0; int ret; - domain = find_domain(dev); + domain = deferred_attach_domain(dev); if (WARN_ON(dir == DMA_NONE || !domain)) return DMA_MAPPING_ERROR; From d0635ebf85aa2cbc17a5b83eaa65f20fa71bf388 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 24 Sep 2019 08:40:54 +0100 Subject: [PATCH 15/62] dt-bindings: iommu: ipmmu-vmsa: Add r8a774b1 support Document RZ/G2N (R8A774B1) SoC bindings. Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Acked-by: Rob Herring Signed-off-by: Joerg Roedel --- Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt index b6bfbec3a849..020d6f226efb 100644 --- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt +++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt @@ -15,6 +15,7 @@ Required Properties: - "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU. - "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU. - "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU. + - "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU. - "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU. - "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU. - "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU. From 757f26a3a9ec2c072fdd1b14ec93daf1e4cfed8c Mon Sep 17 00:00:00 2001 From: Biju Das Date: Fri, 27 Sep 2019 11:53:21 +0100 Subject: [PATCH 16/62] iommu/ipmmu-vmsa: Hook up r8a774b1 DT matching code Support RZ/G2N (R8A774B1) IPMMU. Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9da8309f7170..67bdf8e5c8ef 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -783,6 +783,7 @@ static int ipmmu_init_platform_device(struct device *dev, static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a774a1", }, + { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, { .soc_id = "r8a7795", }, { .soc_id = "r8a7796", }, @@ -794,6 +795,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = { }; static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = { + { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, { .soc_id = "r8a7795", .revision = "ES3.*" }, { .soc_id = "r8a77965", }, @@ -1017,6 +1019,9 @@ static const struct of_device_id ipmmu_of_ids[] = { }, { .compatible = "renesas,ipmmu-r8a774a1", .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a774b1", + .data = &ipmmu_features_rcar_gen3, }, { .compatible = "renesas,ipmmu-r8a774c0", .data = &ipmmu_features_rcar_gen3, From 4c7c171f85b261f91270d405b7c7390aa6ddfb60 Mon Sep 17 00:00:00 2001 From: Yi L Liu Date: Wed, 2 Oct 2019 12:42:40 -0700 Subject: [PATCH 17/62] iommu: Introduce cache_invalidate API In any virtualization use case, when the first translation stage is "owned" by the guest OS, the host IOMMU driver has no knowledge of caching structure updates unless the guest invalidation activities are trapped by the virtualizer and passed down to the host. Since the invalidation data can be obtained from user space and will be written into physical IOMMU, we must allow security check at various layers. Therefore, generic invalidation data format are proposed here, model specific IOMMU drivers need to convert them into their own format. Signed-off-by: Yi L Liu Signed-off-by: Jacob Pan Signed-off-by: Ashok Raj Signed-off-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Reviewed-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 10 ++++ include/linux/iommu.h | 14 +++++ include/uapi/linux/iommu.h | 110 +++++++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d658c7c6a2ab..6ca9d28c08bb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1665,6 +1665,16 @@ out_unlock: } EXPORT_SYMBOL_GPL(iommu_attach_device); +int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev, + struct iommu_cache_invalidate_info *inv_info) +{ + if (unlikely(!domain->ops->cache_invalidate)) + return -ENODEV; + + return domain->ops->cache_invalidate(domain, dev, inv_info); +} +EXPORT_SYMBOL_GPL(iommu_cache_invalidate); + static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 29bac5345563..9b22055e6f85 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -244,6 +244,7 @@ struct iommu_iotlb_gather { * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response + * @cache_invalidate: invalidate translation caches * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { @@ -306,6 +307,8 @@ struct iommu_ops { int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); + int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, + struct iommu_cache_invalidate_info *inv_info); unsigned long pgsize_bitmap; }; @@ -417,6 +420,9 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); +extern int iommu_cache_invalidate(struct iommu_domain *domain, + struct device *dev, + struct iommu_cache_invalidate_info *inv_info); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, @@ -1005,6 +1011,14 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle) return IOMMU_PASID_INVALID; } +static inline int +iommu_cache_invalidate(struct iommu_domain *domain, + struct device *dev, + struct iommu_cache_invalidate_info *inv_info) +{ + return -ENODEV; +} + #endif /* CONFIG_IOMMU_API */ #ifdef CONFIG_IOMMU_DEBUGFS diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index fc00c5d4741b..f3e96214df8e 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -152,4 +152,114 @@ struct iommu_page_response { __u32 code; }; +/* defines the granularity of the invalidation */ +enum iommu_inv_granularity { + IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ + IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ + IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ + IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ +}; + +/** + * struct iommu_inv_addr_info - Address Selective Invalidation Structure + * + * @flags: indicates the granularity of the address-selective invalidation + * - If the PASID bit is set, the @pasid field is populated and the invalidation + * relates to cache entries tagged with this PASID and matching the address + * range. + * - If ARCHID bit is set, @archid is populated and the invalidation relates + * to cache entries tagged with this architecture specific ID and matching + * the address range. + * - Both PASID and ARCHID can be set as they may tag different caches. + * - If neither PASID or ARCHID is set, global addr invalidation applies. + * - The LEAF flag indicates whether only the leaf PTE caching needs to be + * invalidated and other paging structure caches can be preserved. + * @pasid: process address space ID + * @archid: architecture-specific ID + * @addr: first stage/level input address + * @granule_size: page/block size of the mapping in bytes + * @nb_granules: number of contiguous granules to be invalidated + */ +struct iommu_inv_addr_info { +#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) +#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) +#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) + __u32 flags; + __u32 archid; + __u64 pasid; + __u64 addr; + __u64 granule_size; + __u64 nb_granules; +}; + +/** + * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure + * + * @flags: indicates the granularity of the PASID-selective invalidation + * - If the PASID bit is set, the @pasid field is populated and the invalidation + * relates to cache entries tagged with this PASID and matching the address + * range. + * - If the ARCHID bit is set, the @archid is populated and the invalidation + * relates to cache entries tagged with this architecture specific ID and + * matching the address range. + * - Both PASID and ARCHID can be set as they may tag different caches. + * - At least one of PASID or ARCHID must be set. + * @pasid: process address space ID + * @archid: architecture-specific ID + */ +struct iommu_inv_pasid_info { +#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) +#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) + __u32 flags; + __u32 archid; + __u64 pasid; +}; + +/** + * struct iommu_cache_invalidate_info - First level/stage invalidation + * information + * @version: API version of this structure + * @cache: bitfield that allows to select which caches to invalidate + * @granularity: defines the lowest granularity used for the invalidation: + * domain > PASID > addr + * @padding: reserved for future use (should be zero) + * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID + * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR + * + * Not all the combinations of cache/granularity are valid: + * + * +--------------+---------------+---------------+---------------+ + * | type / | DEV_IOTLB | IOTLB | PASID | + * | granularity | | | cache | + * +==============+===============+===============+===============+ + * | DOMAIN | N/A | Y | Y | + * +--------------+---------------+---------------+---------------+ + * | PASID | Y | Y | Y | + * +--------------+---------------+---------------+---------------+ + * | ADDR | Y | Y | N/A | + * +--------------+---------------+---------------+---------------+ + * + * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than + * @version and @cache. + * + * If multiple cache types are invalidated simultaneously, they all + * must support the used granularity. + */ +struct iommu_cache_invalidate_info { +#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 + __u32 version; +/* IOMMU paging structure cache */ +#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ +#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ +#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ +#define IOMMU_CACHE_INV_TYPE_NR (3) + __u8 cache; + __u8 granularity; + __u8 padding[2]; + union { + struct iommu_inv_pasid_info pasid_info; + struct iommu_inv_addr_info addr_info; + }; +}; + #endif /* _UAPI_IOMMU_H */ From fa83433c92e340822a056a610a4fa2063a3db304 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Wed, 2 Oct 2019 12:42:41 -0700 Subject: [PATCH 18/62] iommu: Add I/O ASID allocator Some devices might support multiple DMA address spaces, in particular those that have the PCI PASID feature. PASID (Process Address Space ID) allows to share process address spaces with devices (SVA), partition a device into VM-assignable entities (VFIO mdev) or simply provide multiple DMA address space to kernel drivers. Add a global PASID allocator usable by different drivers at the same time. Name it I/O ASID to avoid confusion with ASIDs allocated by arch code, which are usually a separate ID space. The IOASID space is global. Each device can have its own PASID space, but by convention the IOMMU ended up having a global PASID space, so that with SVA, each mm_struct is associated to a single PASID. The allocator is primarily used by IOMMU subsystem but in rare occasions drivers would like to allocate PASIDs for devices that aren't managed by an IOMMU, using the same ID space as IOMMU. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Jacob Pan Reviewed-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 4 ++ drivers/iommu/Makefile | 1 + drivers/iommu/ioasid.c | 151 +++++++++++++++++++++++++++++++++++++++++ include/linux/ioasid.h | 48 +++++++++++++ 4 files changed, 204 insertions(+) create mode 100644 drivers/iommu/ioasid.c create mode 100644 include/linux/ioasid.h diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e3842eabcfdd..fd50ddffffbf 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -3,6 +3,10 @@ config IOMMU_IOVA tristate +# The IOASID library may also be used by non-IOMMU_API users +config IOASID + tristate + # IOMMU_API always gets selected by whoever wants it. config IOMMU_API bool diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4f405f926e73..35d17094fe3b 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o +obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c new file mode 100644 index 000000000000..aadf500c281c --- /dev/null +++ b/drivers/iommu/ioasid.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * I/O Address Space ID allocator. There is one global IOASID space, split into + * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and + * free IOASIDs with ioasid_alloc and ioasid_free. + */ +#include +#include +#include +#include +#include + +struct ioasid_data { + ioasid_t id; + struct ioasid_set *set; + void *private; + struct rcu_head rcu; +}; + +static DEFINE_XARRAY_ALLOC(ioasid_xa); + +/** + * ioasid_set_data - Set private data for an allocated ioasid + * @ioasid: the ID to set data + * @data: the private data + * + * For IOASID that is already allocated, private data can be set + * via this API. Future lookup can be done via ioasid_find. + */ +int ioasid_set_data(ioasid_t ioasid, void *data) +{ + struct ioasid_data *ioasid_data; + int ret = 0; + + xa_lock(&ioasid_xa); + ioasid_data = xa_load(&ioasid_xa, ioasid); + if (ioasid_data) + rcu_assign_pointer(ioasid_data->private, data); + else + ret = -ENOENT; + xa_unlock(&ioasid_xa); + + /* + * Wait for readers to stop accessing the old private data, so the + * caller can free it. + */ + if (!ret) + synchronize_rcu(); + + return ret; +} +EXPORT_SYMBOL_GPL(ioasid_set_data); + +/** + * ioasid_alloc - Allocate an IOASID + * @set: the IOASID set + * @min: the minimum ID (inclusive) + * @max: the maximum ID (inclusive) + * @private: data private to the caller + * + * Allocate an ID between @min and @max. The @private pointer is stored + * internally and can be retrieved with ioasid_find(). + * + * Return: the allocated ID on success, or %INVALID_IOASID on failure. + */ +ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, + void *private) +{ + struct ioasid_data *data; + ioasid_t id; + + data = kzalloc(sizeof(*data), GFP_ATOMIC); + if (!data) + return INVALID_IOASID; + + data->set = set; + data->private = private; + + if (xa_alloc(&ioasid_xa, &id, data, XA_LIMIT(min, max), GFP_KERNEL)) { + pr_err("Failed to alloc ioasid from %d to %d\n", min, max); + goto exit_free; + } + data->id = id; + + return id; +exit_free: + kfree(data); + return INVALID_IOASID; +} +EXPORT_SYMBOL_GPL(ioasid_alloc); + +/** + * ioasid_free - Free an IOASID + * @ioasid: the ID to remove + */ +void ioasid_free(ioasid_t ioasid) +{ + struct ioasid_data *ioasid_data; + + ioasid_data = xa_erase(&ioasid_xa, ioasid); + + kfree_rcu(ioasid_data, rcu); +} +EXPORT_SYMBOL_GPL(ioasid_free); + +/** + * ioasid_find - Find IOASID data + * @set: the IOASID set + * @ioasid: the IOASID to find + * @getter: function to call on the found object + * + * The optional getter function allows to take a reference to the found object + * under the rcu lock. The function can also check if the object is still valid: + * if @getter returns false, then the object is invalid and NULL is returned. + * + * If the IOASID exists, return the private pointer passed to ioasid_alloc. + * Private data can be NULL if not set. Return an error if the IOASID is not + * found, or if @set is not NULL and the IOASID does not belong to the set. + */ +void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, + bool (*getter)(void *)) +{ + void *priv; + struct ioasid_data *ioasid_data; + + rcu_read_lock(); + ioasid_data = xa_load(&ioasid_xa, ioasid); + if (!ioasid_data) { + priv = ERR_PTR(-ENOENT); + goto unlock; + } + if (set && ioasid_data->set != set) { + /* data found but does not belong to the set */ + priv = ERR_PTR(-EACCES); + goto unlock; + } + /* Now IOASID and its set is verified, we can return the private data */ + priv = rcu_dereference(ioasid_data->private); + if (getter && !getter(priv)) + priv = NULL; +unlock: + rcu_read_unlock(); + + return priv; +} +EXPORT_SYMBOL_GPL(ioasid_find); + +MODULE_AUTHOR("Jean-Philippe Brucker "); +MODULE_AUTHOR("Jacob Pan "); +MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h new file mode 100644 index 000000000000..17337a13f06e --- /dev/null +++ b/include/linux/ioasid.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_IOASID_H +#define __LINUX_IOASID_H + +#include +#include + +#define INVALID_IOASID ((ioasid_t)-1) +typedef unsigned int ioasid_t; + +struct ioasid_set { + int dummy; +}; + +#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 } + +#if IS_ENABLED(CONFIG_IOASID) +ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, + void *private); +void ioasid_free(ioasid_t ioasid); +void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, + bool (*getter)(void *)); +int ioasid_set_data(ioasid_t ioasid, void *data); + +#else /* !CONFIG_IOASID */ +static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, + ioasid_t max, void *private) +{ + return INVALID_IOASID; +} + +static inline void ioasid_free(ioasid_t ioasid) +{ +} + +static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, + bool (*getter)(void *)) +{ + return NULL; +} + +static inline int ioasid_set_data(ioasid_t ioasid, void *data) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_IOASID */ +#endif /* __LINUX_IOASID_H */ From e5c0bd7f2206cd288029edb6afbfde93c73b4048 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Wed, 2 Oct 2019 12:42:42 -0700 Subject: [PATCH 19/62] iommu/ioasid: Add custom allocators IOASID allocation may rely on platform specific methods. One use case is that when running in the guest, in order to obtain system wide global IOASIDs, emulated allocation interface is needed to communicate with the host. Here we call these platform specific allocators custom allocators. Custom IOASID allocators can be registered at runtime and take precedence over the default XArray allocator. They have these attributes: - provides platform specific alloc()/free() functions with private data. - allocation results lookup are not provided by the allocator, lookup request must be done by the IOASID framework by its own XArray. - allocators can be unregistered at runtime, either fallback to the next custom allocator or to the default allocator. - custom allocators can share the same set of alloc()/free() helpers, in this case they also share the same IOASID space, thus the same XArray. - switching between allocators requires all outstanding IOASIDs to be freed unless the two allocators share the same alloc()/free() helpers. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Jacob Pan Link: https://lkml.org/lkml/2019/4/26/462 Reviewed-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Signed-off-by: Joerg Roedel --- drivers/iommu/ioasid.c | 289 +++++++++++++++++++++++++++++++++++++++-- include/linux/ioasid.h | 28 ++++ 2 files changed, 308 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index aadf500c281c..0f8dd377aada 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -17,7 +17,245 @@ struct ioasid_data { struct rcu_head rcu; }; -static DEFINE_XARRAY_ALLOC(ioasid_xa); +/* + * struct ioasid_allocator_data - Internal data structure to hold information + * about an allocator. There are two types of allocators: + * + * - Default allocator always has its own XArray to track the IOASIDs allocated. + * - Custom allocators may share allocation helpers with different private data. + * Custom allocators that share the same helper functions also share the same + * XArray. + * Rules: + * 1. Default allocator is always available, not dynamically registered. This is + * to prevent race conditions with early boot code that want to register + * custom allocators or allocate IOASIDs. + * 2. Custom allocators take precedence over the default allocator. + * 3. When all custom allocators sharing the same helper functions are + * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be + * freed. Otherwise, outstanding IOASIDs will be lost and orphaned. + * 4. When switching between custom allocators sharing the same helper + * functions, outstanding IOASIDs are preserved. + * 5. When switching between custom allocator and default allocator, all IOASIDs + * must be freed to ensure unadulterated space for the new allocator. + * + * @ops: allocator helper functions and its data + * @list: registered custom allocators + * @slist: allocators share the same ops but different data + * @flags: attributes of the allocator + * @xa: xarray holds the IOASID space + * @rcu: used for kfree_rcu when unregistering allocator + */ +struct ioasid_allocator_data { + struct ioasid_allocator_ops *ops; + struct list_head list; + struct list_head slist; +#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */ + unsigned long flags; + struct xarray xa; + struct rcu_head rcu; +}; + +static DEFINE_SPINLOCK(ioasid_allocator_lock); +static LIST_HEAD(allocators_list); + +static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque); +static void default_free(ioasid_t ioasid, void *opaque); + +static struct ioasid_allocator_ops default_ops = { + .alloc = default_alloc, + .free = default_free, +}; + +static struct ioasid_allocator_data default_allocator = { + .ops = &default_ops, + .flags = 0, + .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC), +}; + +static struct ioasid_allocator_data *active_allocator = &default_allocator; + +static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque) +{ + ioasid_t id; + + if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) { + pr_err("Failed to alloc ioasid from %d to %d\n", min, max); + return INVALID_IOASID; + } + + return id; +} + +static void default_free(ioasid_t ioasid, void *opaque) +{ + struct ioasid_data *ioasid_data; + + ioasid_data = xa_erase(&default_allocator.xa, ioasid); + kfree_rcu(ioasid_data, rcu); +} + +/* Allocate and initialize a new custom allocator with its helper functions */ +static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *ia_data; + + ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC); + if (!ia_data) + return NULL; + + xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC); + INIT_LIST_HEAD(&ia_data->slist); + ia_data->flags |= IOASID_ALLOCATOR_CUSTOM; + ia_data->ops = ops; + + /* For tracking custom allocators that share the same ops */ + list_add_tail(&ops->list, &ia_data->slist); + + return ia_data; +} + +static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b) +{ + return (a->free == b->free) && (a->alloc == b->alloc); +} + +/** + * ioasid_register_allocator - register a custom allocator + * @ops: the custom allocator ops to be registered + * + * Custom allocators take precedence over the default xarray based allocator. + * Private data associated with the IOASID allocated by the custom allocators + * are managed by IOASID framework similar to data stored in xa by default + * allocator. + * + * There can be multiple allocators registered but only one is active. In case + * of runtime removal of a custom allocator, the next one is activated based + * on the registration ordering. + * + * Multiple allocators can share the same alloc() function, in this case the + * IOASID space is shared. + */ +int ioasid_register_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *ia_data; + struct ioasid_allocator_data *pallocator; + int ret = 0; + + spin_lock(&ioasid_allocator_lock); + + ia_data = ioasid_alloc_allocator(ops); + if (!ia_data) { + ret = -ENOMEM; + goto out_unlock; + } + + /* + * No particular preference, we activate the first one and keep + * the later registered allocators in a list in case the first one gets + * removed due to hotplug. + */ + if (list_empty(&allocators_list)) { + WARN_ON(active_allocator != &default_allocator); + /* Use this new allocator if default is not active */ + if (xa_empty(&active_allocator->xa)) { + rcu_assign_pointer(active_allocator, ia_data); + list_add_tail(&ia_data->list, &allocators_list); + goto out_unlock; + } + pr_warn("Default allocator active with outstanding IOASID\n"); + ret = -EAGAIN; + goto out_free; + } + + /* Check if the allocator is already registered */ + list_for_each_entry(pallocator, &allocators_list, list) { + if (pallocator->ops == ops) { + pr_err("IOASID allocator already registered\n"); + ret = -EEXIST; + goto out_free; + } else if (use_same_ops(pallocator->ops, ops)) { + /* + * If the new allocator shares the same ops, + * then they will share the same IOASID space. + * We should put them under the same xarray. + */ + list_add_tail(&ops->list, &pallocator->slist); + goto out_free; + } + } + list_add_tail(&ia_data->list, &allocators_list); + + spin_unlock(&ioasid_allocator_lock); + return 0; +out_free: + kfree(ia_data); +out_unlock: + spin_unlock(&ioasid_allocator_lock); + return ret; +} +EXPORT_SYMBOL_GPL(ioasid_register_allocator); + +/** + * ioasid_unregister_allocator - Remove a custom IOASID allocator ops + * @ops: the custom allocator to be removed + * + * Remove an allocator from the list, activate the next allocator in + * the order it was registered. Or revert to default allocator if all + * custom allocators are unregistered without outstanding IOASIDs. + */ +void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *pallocator; + struct ioasid_allocator_ops *sops; + + spin_lock(&ioasid_allocator_lock); + if (list_empty(&allocators_list)) { + pr_warn("No custom IOASID allocators active!\n"); + goto exit_unlock; + } + + list_for_each_entry(pallocator, &allocators_list, list) { + if (!use_same_ops(pallocator->ops, ops)) + continue; + + if (list_is_singular(&pallocator->slist)) { + /* No shared helper functions */ + list_del(&pallocator->list); + /* + * All IOASIDs should have been freed before + * the last allocator that shares the same ops + * is unregistered. + */ + WARN_ON(!xa_empty(&pallocator->xa)); + if (list_empty(&allocators_list)) { + pr_info("No custom IOASID allocators, switch to default.\n"); + rcu_assign_pointer(active_allocator, &default_allocator); + } else if (pallocator == active_allocator) { + rcu_assign_pointer(active_allocator, + list_first_entry(&allocators_list, + struct ioasid_allocator_data, list)); + pr_info("IOASID allocator changed"); + } + kfree_rcu(pallocator, rcu); + break; + } + /* + * Find the matching shared ops to delete, + * but keep outstanding IOASIDs + */ + list_for_each_entry(sops, &pallocator->slist, list) { + if (sops == ops) { + list_del(&ops->list); + break; + } + } + break; + } + +exit_unlock: + spin_unlock(&ioasid_allocator_lock); +} +EXPORT_SYMBOL_GPL(ioasid_unregister_allocator); /** * ioasid_set_data - Set private data for an allocated ioasid @@ -32,13 +270,13 @@ int ioasid_set_data(ioasid_t ioasid, void *data) struct ioasid_data *ioasid_data; int ret = 0; - xa_lock(&ioasid_xa); - ioasid_data = xa_load(&ioasid_xa, ioasid); + spin_lock(&ioasid_allocator_lock); + ioasid_data = xa_load(&active_allocator->xa, ioasid); if (ioasid_data) rcu_assign_pointer(ioasid_data->private, data); else ret = -ENOENT; - xa_unlock(&ioasid_xa); + spin_unlock(&ioasid_allocator_lock); /* * Wait for readers to stop accessing the old private data, so the @@ -67,6 +305,7 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void *private) { struct ioasid_data *data; + void *adata; ioasid_t id; data = kzalloc(sizeof(*data), GFP_ATOMIC); @@ -76,14 +315,31 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, data->set = set; data->private = private; - if (xa_alloc(&ioasid_xa, &id, data, XA_LIMIT(min, max), GFP_KERNEL)) { - pr_err("Failed to alloc ioasid from %d to %d\n", min, max); + /* + * Custom allocator needs allocator data to perform platform specific + * operations. + */ + spin_lock(&ioasid_allocator_lock); + adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data; + id = active_allocator->ops->alloc(min, max, adata); + if (id == INVALID_IOASID) { + pr_err("Failed ASID allocation %lu\n", active_allocator->flags); + goto exit_free; + } + + if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) && + xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) { + /* Custom allocator needs framework to store and track allocation results */ + pr_err("Failed to alloc ioasid from %d\n", id); + active_allocator->ops->free(id, active_allocator->ops->pdata); goto exit_free; } data->id = id; + spin_unlock(&ioasid_allocator_lock); return id; exit_free: + spin_unlock(&ioasid_allocator_lock); kfree(data); return INVALID_IOASID; } @@ -97,9 +353,22 @@ void ioasid_free(ioasid_t ioasid) { struct ioasid_data *ioasid_data; - ioasid_data = xa_erase(&ioasid_xa, ioasid); + spin_lock(&ioasid_allocator_lock); + ioasid_data = xa_load(&active_allocator->xa, ioasid); + if (!ioasid_data) { + pr_err("Trying to free unknown IOASID %u\n", ioasid); + goto exit_unlock; + } - kfree_rcu(ioasid_data, rcu); + active_allocator->ops->free(ioasid, active_allocator->ops->pdata); + /* Custom allocator needs additional steps to free the xa element */ + if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { + ioasid_data = xa_erase(&active_allocator->xa, ioasid); + kfree_rcu(ioasid_data, rcu); + } + +exit_unlock: + spin_unlock(&ioasid_allocator_lock); } EXPORT_SYMBOL_GPL(ioasid_free); @@ -122,9 +391,11 @@ void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, { void *priv; struct ioasid_data *ioasid_data; + struct ioasid_allocator_data *idata; rcu_read_lock(); - ioasid_data = xa_load(&ioasid_xa, ioasid); + idata = rcu_dereference(active_allocator); + ioasid_data = xa_load(&idata->xa, ioasid); if (!ioasid_data) { priv = ERR_PTR(-ENOENT); goto unlock; diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h index 17337a13f06e..6f000d7a0ddc 100644 --- a/include/linux/ioasid.h +++ b/include/linux/ioasid.h @@ -7,11 +7,28 @@ #define INVALID_IOASID ((ioasid_t)-1) typedef unsigned int ioasid_t; +typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data); +typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data); struct ioasid_set { int dummy; }; +/** + * struct ioasid_allocator_ops - IOASID allocator helper functions and data + * + * @alloc: helper function to allocate IOASID + * @free: helper function to free IOASID + * @list: for tracking ops that share helper functions but not data + * @pdata: data belong to the allocator, provided when calling alloc() + */ +struct ioasid_allocator_ops { + ioasid_alloc_fn_t alloc; + ioasid_free_fn_t free; + struct list_head list; + void *pdata; +}; + #define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 } #if IS_ENABLED(CONFIG_IOASID) @@ -20,6 +37,8 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void ioasid_free(ioasid_t ioasid); void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)); +int ioasid_register_allocator(struct ioasid_allocator_ops *allocator); +void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator); int ioasid_set_data(ioasid_t ioasid, void *data); #else /* !CONFIG_IOASID */ @@ -39,6 +58,15 @@ static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, return NULL; } +static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator) +{ + return -ENOTSUPP; +} + +static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator) +{ +} + static inline int ioasid_set_data(ioasid_t ioasid, void *data) { return -ENOTSUPP; From 808be0aae53a3675337fad9cde616e086bdc8287 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Wed, 2 Oct 2019 12:42:43 -0700 Subject: [PATCH 20/62] iommu: Introduce guest PASID bind function Guest shared virtual address (SVA) may require host to shadow guest PASID tables. Guest PASID can also be allocated from the host via enlightened interfaces. In this case, guest needs to bind the guest mm, i.e. cr3 in guest physical address to the actual PASID table in the host IOMMU. Nesting will be turned on such that guest virtual address can go through a two level translation: - 1st level translates GVA to GPA - 2nd level translates GPA to HPA This patch introduces APIs to bind guest PASID data to the assigned device entry in the physical IOMMU. See the diagram below for usage explanation. .-------------. .---------------------------. | vIOMMU | | Guest process mm, FL only | | | '---------------------------' .----------------/ | PASID Entry |--- PASID cache flush - '-------------' | | | V | | GP '-------------' Guest ------| Shadow |----------------------- GP->HP* --------- v v | Host v .-------------. .----------------------. | pIOMMU | | Bind FL for GVA-GPA | | | '----------------------' .----------------/ | | PASID Entry | V (Nested xlate) '----------------\.---------------------. | | |Set SL to GPA-HPA | | | '---------------------' '-------------' Where: - FL = First level/stage one page tables - SL = Second level/stage two page tables - GP = Guest PASID - HP = Host PASID * Conversion needed if non-identity GP-HP mapping option is chosen. Signed-off-by: Jacob Pan Signed-off-by: Liu Yi L Reviewed-by: Jean-Philippe Brucker Reviewed-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 20 +++++++++++++ include/linux/iommu.h | 22 ++++++++++++++ include/uapi/linux/iommu.h | 59 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6ca9d28c08bb..4486c4e6830a 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1675,6 +1675,26 @@ int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev, } EXPORT_SYMBOL_GPL(iommu_cache_invalidate); +int iommu_sva_bind_gpasid(struct iommu_domain *domain, + struct device *dev, struct iommu_gpasid_bind_data *data) +{ + if (unlikely(!domain->ops->sva_bind_gpasid)) + return -ENODEV; + + return domain->ops->sva_bind_gpasid(domain, dev, data); +} +EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid); + +int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, + ioasid_t pasid) +{ + if (unlikely(!domain->ops->sva_unbind_gpasid)) + return -ENODEV; + + return domain->ops->sva_unbind_gpasid(dev, pasid); +} +EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); + static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9b22055e6f85..f8959f759e41 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IOMMU_READ (1 << 0) @@ -246,6 +247,8 @@ struct iommu_iotlb_gather { * @page_response: handle page request response * @cache_invalidate: invalidate translation caches * @pgsize_bitmap: bitmap of all possible supported page sizes + * @sva_bind_gpasid: bind guest pasid and mm + * @sva_unbind_gpasid: unbind guest pasid and mm */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -309,6 +312,10 @@ struct iommu_ops { struct iommu_page_response *msg); int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, struct iommu_cache_invalidate_info *inv_info); + int (*sva_bind_gpasid)(struct iommu_domain *domain, + struct device *dev, struct iommu_gpasid_bind_data *data); + + int (*sva_unbind_gpasid)(struct device *dev, int pasid); unsigned long pgsize_bitmap; }; @@ -423,6 +430,10 @@ extern void iommu_detach_device(struct iommu_domain *domain, extern int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev, struct iommu_cache_invalidate_info *inv_info); +extern int iommu_sva_bind_gpasid(struct iommu_domain *domain, + struct device *dev, struct iommu_gpasid_bind_data *data); +extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, @@ -1018,6 +1029,17 @@ iommu_cache_invalidate(struct iommu_domain *domain, { return -ENODEV; } +static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain, + struct device *dev, struct iommu_gpasid_bind_data *data) +{ + return -ENODEV; +} + +static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, + struct device *dev, int pasid) +{ + return -ENODEV; +} #endif /* CONFIG_IOMMU_API */ diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index f3e96214df8e..4ad3496e5c43 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -262,4 +262,63 @@ struct iommu_cache_invalidate_info { }; }; +/** + * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest + * SVA binding. + * + * @flags: VT-d PASID table entry attributes + * @pat: Page attribute table data to compute effective memory type + * @emt: Extended memory type + * + * Only guest vIOMMU selectable and effective options are passed down to + * the host IOMMU. + */ +struct iommu_gpasid_bind_data_vtd { +#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ +#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ +#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ +#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ +#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ +#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ + __u64 flags; + __u32 pat; + __u32 emt; +}; + +/** + * struct iommu_gpasid_bind_data - Information about device and guest PASID binding + * @version: Version of this data structure + * @format: PASID table entry format + * @flags: Additional information on guest bind request + * @gpgd: Guest page directory base of the guest mm to bind + * @hpasid: Process address space ID used for the guest mm in host IOMMU + * @gpasid: Process address space ID used for the guest mm in guest IOMMU + * @addr_width: Guest virtual address width + * @padding: Reserved for future use (should be zero) + * @vtd: Intel VT-d specific data + * + * Guest to host PASID mapping can be an identity or non-identity, where guest + * has its own PASID space. For non-identify mapping, guest to host PASID lookup + * is needed when VM programs guest PASID into an assigned device. VMM may + * trap such PASID programming then request host IOMMU driver to convert guest + * PASID to host PASID based on this bind data. + */ +struct iommu_gpasid_bind_data { +#define IOMMU_GPASID_BIND_VERSION_1 1 + __u32 version; +#define IOMMU_PASID_FORMAT_INTEL_VTD 1 + __u32 format; +#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ + __u64 flags; + __u64 gpgd; + __u64 hpasid; + __u64 gpasid; + __u32 addr_width; + __u8 padding[12]; + /* Vendor specific data */ + union { + struct iommu_gpasid_bind_data_vtd vtd; + }; +}; + #endif /* _UAPI_IOMMU_H */ From 470eb3b31134ada545dcb41e94a0c97b42c95803 Mon Sep 17 00:00:00 2001 From: "Suthikulpanit, Suravee" Date: Mon, 14 Oct 2019 20:06:19 +0000 Subject: [PATCH 21/62] iommu/amd: Simpify decoding logic for INVALID_PPR_REQUEST event Reuse existing macro to simplify the code and improve readability. Cc: Joerg Roedel Cc: Gary R Hook Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1881fe8bdfed..0d2479546b77 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -617,8 +617,7 @@ retry: pasid, address, flags); break; case EVENT_TYPE_INV_PPR_REQ: - pasid = ((event[0] >> 16) & 0xFFFF) - | ((event[1] << 6) & 0xF0000); + pasid = PPR_PASID(*((u64 *)__evt)); tag = event[1] & 0x03FF; dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), From fb03082a54acd66c61535edfefe96b2ff88ce7e2 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Wed, 9 Oct 2019 19:59:33 +0800 Subject: [PATCH 22/62] memory: mtk-smi: Add PM suspend and resume ops In the commit 4f0a1a1ae351 ("memory: mtk-smi: Invoke pm runtime_callback to enable clocks"), we use pm_runtime callback to enable/disable the smi larb clocks. It will cause the larb's clock may not be disabled when suspend. That is because device_prepare will call pm_runtime_get_noresume which will keep the larb's PM runtime status still is active when suspend, then it won't enter our pm_runtime suspend callback to disable the corresponding clocks. This patch adds suspend pm_ops to force disable the clocks, Use "LATE" to make sure it disable the larb's clocks after the multimedia devices. Fixes: 4f0a1a1ae351 ("memory: mtk-smi: Invoke pm runtime_callback to enable clocks") Signed-off-by: Anan Sun Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/memory/mtk-smi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 439d7d886873..a113e811faab 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) static const struct dev_pm_ops smi_larb_pm_ops = { SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver mtk_smi_larb_driver = { @@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev) static const struct dev_pm_ops smi_common_pm_ops = { SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver mtk_smi_common_driver = { From 42bb97b80f2e3bf592e3e99d109b67309aa1b30e Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Wed, 2 Oct 2019 14:29:23 -0300 Subject: [PATCH 23/62] iommu: rockchip: Free domain on .domain_free IOMMU domain resource life is well-defined, managed by .domain_alloc and .domain_free. Therefore, domain-specific resources shouldn't be tied to the device life, but instead to its domain. Signed-off-by: Ezequiel Garcia Reviewed-by: Robin Murphy Acked-by: Heiko Stuebner Signed-off-by: Joerg Roedel --- drivers/iommu/rockchip-iommu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 26290f310f90..e845bd01a1a2 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -979,13 +979,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) if (!dma_dev) return NULL; - rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) return NULL; if (type == IOMMU_DOMAIN_DMA && iommu_get_dma_cookie(&rk_domain->domain)) - return NULL; + goto err_free_domain; /* * rk32xx iommus use a 2 level pagetable. @@ -1020,6 +1020,8 @@ err_free_dt: err_put_cookie: if (type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); +err_free_domain: + kfree(rk_domain); return NULL; } @@ -1048,6 +1050,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (domain->type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); + kfree(rk_domain); } static int rk_iommu_add_device(struct device *dev) From 3057fb9377eb5e73386dd0d8804bf72bdd23e391 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 18 Oct 2019 11:00:33 +0200 Subject: [PATCH 24/62] iommu/amd: Pass gfp flags to iommu_map_page() in amd_iommu_map() A recent commit added a gfp parameter to amd_iommu_map() to make it callable from atomic context, but forgot to pass it down to iommu_map_page() and left GFP_KERNEL there. This caused sleep-while-atomic warnings and needs to be fixed. Reported-by: Qian Cai Reported-by: Dan Carpenter Fixes: 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map") Reviewed-by: Jerry Snitselaar Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0d2479546b77..fb54df5c2e11 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2561,7 +2561,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, if (iommu_prot & IOMMU_WRITE) prot |= IOMMU_PROT_IW; - ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); + ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp); domain_flush_np_cache(domain, iova, page_size); From 446152d5b6537bae576c321e4cc24ff690a69403 Mon Sep 17 00:00:00 2001 From: Navneet Kumar Date: Wed, 16 Oct 2019 13:50:24 +0200 Subject: [PATCH 25/62] iommu/tegra-smmu: Use non-secure register for flushing Use PTB_ASID instead of SMMU_CONFIG to flush smmu. PTB_ASID can be accessed from non-secure mode, SMMU_CONFIG cannot be. Using SMMU_CONFIG could pose a problem when kernel doesn't have secure mode access enabled from boot. Signed-off-by: Navneet Kumar Reviewed-by: Dmitry Osipenko Tested-by: Dmitry Osipenko Signed-off-by: Thierry Reding Signed-off-by: Joerg Roedel --- drivers/iommu/tegra-smmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 7293fc3f796d..0b74e17794b1 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, static inline void smmu_flush(struct tegra_smmu *smmu) { - smmu_readl(smmu, SMMU_CONFIG); + smmu_readl(smmu, SMMU_PTB_ASID); } static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) From e31e5929547edf396ea2c0873244c734c6bceafa Mon Sep 17 00:00:00 2001 From: Navneet Kumar Date: Wed, 16 Oct 2019 13:50:25 +0200 Subject: [PATCH 26/62] iommu/tegra-smmu: Fix client enablement order Enable clients' translation only after setting up the swgroups. Signed-off-by: Navneet Kumar Signed-off-by: Thierry Reding Signed-off-by: Joerg Roedel --- drivers/iommu/tegra-smmu.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 0b74e17794b1..f51fe9b48bb7 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, unsigned int i; u32 value; + group = tegra_smmu_find_swgroup(smmu, swgroup); + if (group) { + value = smmu_readl(smmu, group->reg); + value &= ~SMMU_ASID_MASK; + value |= SMMU_ASID_VALUE(asid); + value |= SMMU_ASID_ENABLE; + smmu_writel(smmu, value, group->reg); + } else { + pr_warn("%s group from swgroup %u not found\n", __func__, + swgroup); + /* No point moving ahead if group was not found */ + return; + } + for (i = 0; i < smmu->soc->num_clients; i++) { const struct tegra_mc_client *client = &smmu->soc->clients[i]; @@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, value |= BIT(client->smmu.bit); smmu_writel(smmu, value, client->smmu.reg); } - - group = tegra_smmu_find_swgroup(smmu, swgroup); - if (group) { - value = smmu_readl(smmu, group->reg); - value &= ~SMMU_ASID_MASK; - value |= SMMU_ASID_VALUE(asid); - value |= SMMU_ASID_ENABLE; - smmu_writel(smmu, value, group->reg); - } } static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, From 96d3ab802e4930a29a33934373157d6dff1b2c7e Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 16 Oct 2019 13:50:26 +0200 Subject: [PATCH 27/62] iommu/tegra-smmu: Fix page tables in > 4 GiB memory Page tables that reside in physical memory beyond the 4 GiB boundary are currently not working properly. The reason is that when the physical address for page directory entries is read, it gets truncated at 32 bits and can cause crashes when passing that address to the DMA API. Fix this by first casting the PDE value to a dma_addr_t and then using the page frame number mask for the SMMU instance to mask out the invalid bits, which are typically used for mapping attributes, etc. Signed-off-by: Thierry Reding Signed-off-by: Joerg Roedel --- drivers/iommu/tegra-smmu.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index f51fe9b48bb7..965c097aa0e1 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) return (addr & smmu->pfn_mask) == addr; } -static dma_addr_t smmu_pde_to_dma(u32 pde) +static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) { - return pde << 12; + return (dma_addr_t)(pde & smmu->pfn_mask) << 12; } static void smmu_flush_ptc_all(struct tegra_smmu *smmu) @@ -554,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, dma_addr_t *dmap) { unsigned int pd_index = iova_pd_index(iova); + struct tegra_smmu *smmu = as->smmu; struct page *pt_page; u32 *pd; @@ -562,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, return NULL; pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pd_index]); + *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); return tegra_smmu_pte_offset(pt_page, iova); } @@ -604,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, } else { u32 *pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pde]); + *dmap = smmu_pde_to_dma(smmu, pd[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); @@ -629,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); - dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); tegra_smmu_set_pde(as, iova, 0); From a5bbbf37c6f8522a1afd46c37b5a0d1ce63232b7 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 24 Oct 2019 14:54:10 +0200 Subject: [PATCH 28/62] iommu/amd: Do not re-fetch iommu->cmd_buf_tail The compiler is not smart enough to realize that iommu->cmd_buf_tail can't be modified across memcpy: 41 8b 45 74 mov 0x74(%r13),%eax # iommu->cmd_buf_tail 44 8d 78 10 lea 0x10(%rax),%r15d # += sizeof(*cmd) 41 81 e7 ff 1f 00 00 and $0x1fff,%r15d # %= CMD_BUFFER_SIZE 49 03 45 68 add 0x68(%r13),%rax # target = iommu->cmd_buf + iommu->cmd_buf_tail 45 89 7d 74 mov %r15d,0x74(%r13) # store to iommu->cmd_buf_tail 49 8b 34 24 mov (%r12),%rsi # memcpy 49 8b 7c 24 08 mov 0x8(%r12),%rdi # memcpy 48 89 30 mov %rsi,(%rax) # memcpy 48 89 78 08 mov %rdi,0x8(%rax) # memcpy 49 8b 55 38 mov 0x38(%r13),%rdx # iommu->mmio_base 41 8b 45 74 mov 0x74(%r13),%eax # redundant load of iommu->cmd_buf_tail ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 89 82 08 20 00 00 mov %eax,0x2008(%rdx) # writel CC: Tom Lendacky CC: Joerg Roedel CC: linux-kernel@vger.kernel.org Signed-off-by: Denys Vlasenko Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index fb54df5c2e11..69458d309be0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -852,17 +852,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, struct iommu_cmd *cmd) { u8 *target; - - target = iommu->cmd_buf + iommu->cmd_buf_tail; - - iommu->cmd_buf_tail += sizeof(*cmd); - iommu->cmd_buf_tail %= CMD_BUFFER_SIZE; + u32 tail; /* Copy command to buffer */ + tail = iommu->cmd_buf_tail; + target = iommu->cmd_buf + tail; memcpy(target, cmd, sizeof(*cmd)); + tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; + iommu->cmd_buf_tail = tail; + /* Tell the IOMMU about it */ - writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } static void build_completion_wait(struct iommu_cmd *cmd, u64 address) From 3332364e4ebc0581d133a334645a20fd13b580f1 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Tue, 22 Oct 2019 16:01:20 -0600 Subject: [PATCH 29/62] iommu/amd: Support multiple PCI DMA aliases in device table Non-Transparent Bridge (NTB) devices (among others) may have many DMA aliases seeing the hardware will send requests with different device ids depending on their origin across the bridged hardware. See commit ad281ecf1c7d ("PCI: Add DMA alias quirk for Microsemi Switchtec NTB") for more information on this. The AMD IOMMU ignores all the PCI aliases except the last one so DMA transfers from these aliases will be blocked on AMD hardware with the IOMMU enabled. To fix this, ensure the DTEs are cloned for every PCI alias. This is done by copying the DTE data for each alias as well as the IVRS alias every time it is changed. Signed-off-by: Logan Gunthorpe Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 139 +++++++++++++++----------------- drivers/iommu/amd_iommu_types.h | 2 +- 2 files changed, 65 insertions(+), 76 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 69458d309be0..bb2df2b304a9 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -204,71 +204,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid) return NULL; } -static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) +static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { - *(u16 *)data = alias; + u16 devid = pci_dev_id(pdev); + + if (devid == alias) + return 0; + + amd_iommu_rlookup_table[alias] = + amd_iommu_rlookup_table[devid]; + memcpy(amd_iommu_dev_table[alias].data, + amd_iommu_dev_table[devid].data, + sizeof(amd_iommu_dev_table[alias].data)); + return 0; } -static u16 get_alias(struct device *dev) +static void clone_aliases(struct pci_dev *pdev) +{ + if (!pdev) + return; + + /* + * The IVRS alias stored in the alias table may not be + * part of the PCI DMA aliases if it's bus differs + * from the original device. + */ + clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL); + + pci_for_each_dma_alias(pdev, clone_alias, NULL); +} + +static struct pci_dev *setup_aliases(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - u16 devid, ivrs_alias, pci_alias; + u16 ivrs_alias; - /* The callers make sure that get_device_id() does not fail here */ - devid = get_device_id(dev); - - /* For ACPI HID devices, we simply return the devid as such */ + /* For ACPI HID devices, there are no aliases */ if (!dev_is_pci(dev)) - return devid; - - ivrs_alias = amd_iommu_alias_table[devid]; - - pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); - - if (ivrs_alias == pci_alias) - return ivrs_alias; + return NULL; /* - * DMA alias showdown - * - * The IVRS is fairly reliable in telling us about aliases, but it - * can't know about every screwy device. If we don't have an IVRS - * reported alias, use the PCI reported alias. In that case we may - * still need to initialize the rlookup and dev_table entries if the - * alias is to a non-existent device. + * Add the IVRS alias to the pci aliases if it is on the same + * bus. The IVRS table may know about a quirk that we don't. */ - if (ivrs_alias == devid) { - if (!amd_iommu_rlookup_table[pci_alias]) { - amd_iommu_rlookup_table[pci_alias] = - amd_iommu_rlookup_table[devid]; - memcpy(amd_iommu_dev_table[pci_alias].data, - amd_iommu_dev_table[devid].data, - sizeof(amd_iommu_dev_table[pci_alias].data)); - } - - return pci_alias; - } - - pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d " - "for device [%04x:%04x], kernel reported alias " - "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), - PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device, - PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), - PCI_FUNC(pci_alias)); - - /* - * If we don't have a PCI DMA alias and the IVRS alias is on the same - * bus, then the IVRS table may know about a quirk that we don't. - */ - if (pci_alias == devid && + ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)]; + if (ivrs_alias != pci_dev_id(pdev) && PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { pci_add_dma_alias(pdev, ivrs_alias & 0xff); pci_info(pdev, "Added PCI DMA alias %02x.%d\n", PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias)); } - return ivrs_alias; + clone_aliases(pdev); + + return pdev; } static struct iommu_dev_data *find_dev_data(u16 devid) @@ -406,7 +396,7 @@ static int iommu_init_device(struct device *dev) if (!dev_data) return -ENOMEM; - dev_data->alias = get_alias(dev); + dev_data->pdev = setup_aliases(dev); /* * By default we use passthrough mode for IOMMUv2 capable device. @@ -431,20 +421,16 @@ static int iommu_init_device(struct device *dev) static void iommu_ignore_device(struct device *dev) { - u16 alias; int devid; devid = get_device_id(dev); if (devid < 0) return; - alias = get_alias(dev); - - memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); - memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); - amd_iommu_rlookup_table[devid] = NULL; - amd_iommu_rlookup_table[alias] = NULL; + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); + + setup_aliases(dev); } static void iommu_uninit_device(struct device *dev) @@ -1213,6 +1199,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data, return iommu_queue_command(iommu, &cmd); } +static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct amd_iommu *iommu = data; + + return iommu_flush_dte(iommu, alias); +} + /* * Command send function for invalidating a device table entry */ @@ -1223,14 +1216,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data) int ret; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; - ret = iommu_flush_dte(iommu, dev_data->devid); - if (!ret && alias != dev_data->devid) - ret = iommu_flush_dte(iommu, alias); + if (dev_data->pdev) + ret = pci_for_each_dma_alias(dev_data->pdev, + device_flush_dte_alias, iommu); + else + ret = iommu_flush_dte(iommu, dev_data->devid); if (ret) return ret; + alias = amd_iommu_alias_table[dev_data->devid]; + if (alias != dev_data->devid) { + ret = iommu_flush_dte(iommu, alias); + if (ret) + return ret; + } + if (dev_data->ats.enabled) ret = device_flush_iotlb(dev_data, 0, ~0UL); @@ -1944,11 +1945,9 @@ static void do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu; - u16 alias; bool ats; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; ats = dev_data->ats.enabled; /* Update data structures */ @@ -1961,8 +1960,7 @@ static void do_attach(struct iommu_dev_data *dev_data, /* Update device table */ set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); - if (alias != dev_data->devid) - set_dte_entry(alias, domain, ats, dev_data->iommu_v2); + clone_aliases(dev_data->pdev); device_flush_dte(dev_data); } @@ -1971,17 +1969,14 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; - u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); clear_dte_entry(dev_data->devid); - if (alias != dev_data->devid) - clear_dte_entry(alias); + clone_aliases(dev_data->pdev); /* Flush the DTE entry */ device_flush_dte(dev_data); @@ -2282,13 +2277,7 @@ static void update_device_table(struct protection_domain *domain) list_for_each_entry(dev_data, &domain->dev_list, list) { set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, dev_data->iommu_v2); - - if (dev_data->devid == dev_data->alias) - continue; - - /* There is an alias, update device table entry for it */ - set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled, - dev_data->iommu_v2); + clone_aliases(dev_data->pdev); } } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index becbd3bd9180..b611459f369a 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -638,8 +638,8 @@ struct iommu_dev_data { struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ struct protection_domain *domain; /* Domain the device is bound to */ + struct pci_dev *pdev; u16 devid; /* PCI Device ID */ - u16 alias; /* Alias Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Device is identity mapped */ struct { From 3c124435e8dd516df4b2fc983f4415386fd6edae Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Tue, 22 Oct 2019 16:01:21 -0600 Subject: [PATCH 30/62] iommu/amd: Support multiple PCI DMA aliases in IRQ Remapping Non-Transparent Bridge (NTB) devices (among others) may have many DMA aliases seeing the hardware will send requests with different device ids depending on their origin across the bridged hardware. See commit ad281ecf1c7d ("PCI: Add DMA alias quirk for Microsemi Switchtec NTB") for more information on this. The AMD IOMMU IRQ remapping functionality ignores all PCI aliases for IRQs so if devices send an interrupt from one of their aliases they will be blocked on AMD hardware with the IOMMU enabled. To fix this, ensure IRQ remapping is enabled for all aliases with MSI interrupts. This is analogous to the functionality added to the Intel IRQ remapping code in commit 3f0c625c6ae7 ("iommu/vt-d: Allow interrupts from the entire bus for aliased devices") Signed-off-by: Logan Gunthorpe Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index bb2df2b304a9..be2894bf92fc 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3174,7 +3174,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, iommu_flush_dte(iommu, devid); } -static struct irq_remap_table *alloc_irq_table(u16 devid) +static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, + void *data) +{ + struct irq_remap_table *table = data; + + irq_lookup_table[alias] = table; + set_dte_irq_entry(alias, table); + + iommu_flush_dte(amd_iommu_rlookup_table[alias], alias); + + return 0; +} + +static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev) { struct irq_remap_table *table = NULL; struct irq_remap_table *new_table = NULL; @@ -3220,7 +3233,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid) table = new_table; new_table = NULL; - set_remap_table_entry(iommu, devid, table); + if (pdev) + pci_for_each_dma_alias(pdev, set_remap_table_entry_alias, + table); + else + set_remap_table_entry(iommu, devid, table); + if (devid != alias) set_remap_table_entry(iommu, alias, table); @@ -3237,7 +3255,8 @@ out_unlock: return table; } -static int alloc_irq_index(u16 devid, int count, bool align) +static int alloc_irq_index(u16 devid, int count, bool align, + struct pci_dev *pdev) { struct irq_remap_table *table; int index, c, alignment = 1; @@ -3247,7 +3266,7 @@ static int alloc_irq_index(u16 devid, int count, bool align) if (!iommu) return -ENODEV; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, pdev); if (!table) return -ENODEV; @@ -3680,7 +3699,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, struct irq_remap_table *table; struct amd_iommu *iommu; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, NULL); if (table) { if (!table->min_index) { /* @@ -3697,11 +3716,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, } else { index = -ENOMEM; } - } else { + } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI || + info->type == X86_IRQ_ALLOC_TYPE_MSIX) { bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); - index = alloc_irq_index(devid, nr_irqs, align); + index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev); + } else { + index = alloc_irq_index(devid, nr_irqs, false, NULL); } + if (index < 0) { pr_warn("Failed to allocate IRTE\n"); ret = index; From c1c8058dfb9852eb5adf968b7617a9a4771a08ce Mon Sep 17 00:00:00 2001 From: Cristiane Naves Date: Fri, 25 Oct 2019 13:13:40 -0300 Subject: [PATCH 31/62] iommu/virtio: Remove unused variable Remove the variable of return. Issue found by coccicheck(scripts/coccinelle/misc/returnvar.cocci) Signed-off-by: Cristiane Naves Signed-off-by: Joerg Roedel --- drivers/iommu/virtio-iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3ea9d7682999..d8f29c8f73f5 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, */ static int __viommu_sync_req(struct viommu_dev *viommu) { - int ret = 0; unsigned int len; size_t write_len; struct viommu_request *req; @@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu) kfree(req); } - return ret; + return 0; } static int viommu_sync_req(struct viommu_dev *viommu) From ee9bdfedd3dc1b3303390663189defa4d6b9e458 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Thu, 31 Oct 2019 14:31:02 -0700 Subject: [PATCH 32/62] iommu/arm-smmu: Avoid pathological RPM behaviour for unmaps When games, browser, or anything using a lot of GPU buffers exits, there can be many hundreds or thousands of buffers to unmap and free. If the GPU is otherwise suspended, this can cause arm-smmu to resume/suspend for each buffer, resulting 5-10 seconds worth of reprogramming the context bank (arm_smmu_write_context_bank()/arm_smmu_write_s2cr()/etc). To the user it would appear that the system just locked up. A simple solution is to use pm_runtime_put_autosuspend() instead, so we don't immediately suspend the SMMU device. Reviewed-by: Robin Murphy Signed-off-by: Rob Clark Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 080af0326816..a2b1ca55b73e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -123,7 +123,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) { if (pm_runtime_enabled(smmu->dev)) - pm_runtime_put(smmu->dev); + pm_runtime_put_autosuspend(smmu->dev); } static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -1167,6 +1167,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) /* Looks ok, so add the device to the domain */ ret = arm_smmu_domain_add_master(smmu_domain, fwspec); + /* + * Setup an autosuspend delay to avoid bouncing runpm state. + * Otherwise, if a driver for a suspended consumer device + * unmaps buffers, it will runpm resume/suspend for each one. + * + * For example, when used by a GPU device, when an application + * or game exits, it can trigger unmapping 100s or 1000s of + * buffers. With a runpm cycle for each buffer, that adds up + * to 5-10sec worth of reprogramming the context bank, while + * the system appears to be locked up to the user. + */ + pm_runtime_set_autosuspend_delay(smmu->dev, 20); + pm_runtime_use_autosuspend(smmu->dev); + rpm_put: arm_smmu_rpm_put(smmu); return ret; From ff34f3cce278a0982a7b66b1afaed6295141b1fc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 4 Nov 2019 15:58:15 +0000 Subject: [PATCH 33/62] firmware: qcom: scm: Ensure 'a0' status code is treated as signed The 'a0' member of 'struct arm_smccc_res' is declared as 'unsigned long', however the Qualcomm SCM firmware interface driver expects to receive negative error codes via this field, so ensure that it's cast to 'long' before comparing to see if it is less than 0. Cc: Reviewed-by: Bjorn Andersson Signed-off-by: Will Deacon --- drivers/firmware/qcom_scm-64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 91d5ad7cf58b..25e0f60c759a 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -150,7 +150,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, kfree(args_virt); } - if (res->a0 < 0) + if ((long)res->a0 < 0) return qcom_scm_remap_error(res->a0); return 0; From 1a5ea3b7a6ac6c133660b9aeda95b2087c8eec47 Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Fri, 20 Sep 2019 13:34:27 +0530 Subject: [PATCH 34/62] firmware: qcom_scm-64: Add atomic version of qcom_scm_call There are scnenarios where drivers are required to make a scm call in atomic context, such as in one of the qcom's arm-smmu-500 errata [1]. [1] ("https://source.codeaurora.org/quic/la/kernel/msm-4.9/ tree/drivers/iommu/arm-smmu.c?h=msm-4.9#n4842") Signed-off-by: Vivek Gautam Reviewed-by: Bjorn Andersson Reviewed-by: Stephen Boyd Acked-by: Andy Gross Signed-off-by: Sai Prakash Ranjan Signed-off-by: Will Deacon --- drivers/firmware/qcom_scm-64.c | 138 ++++++++++++++++++++++----------- 1 file changed, 94 insertions(+), 44 deletions(-) diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 25e0f60c759a..872155c67b2d 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock); #define FIRST_EXT_ARG_IDX 3 #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) -/** - * qcom_scm_call() - Invoke a syscall in the secure world - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * - * Sends a command to the SCM and waits for the command to finish processing. - * This should *only* be called in pre-emptible context. -*/ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) +static void __qcom_scm_call_do(const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, u32 fn_id, + u64 x5, u32 type) +{ + u64 cmd; + struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; + + cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention, + ARM_SMCCC_OWNER_SIP, fn_id); + + quirk.state.a6 = 0; + + do { + arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], + desc->args[1], desc->args[2], x5, + quirk.state.a6, 0, res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + cmd = res->a0; + + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +static void qcom_scm_call_do(const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, u32 fn_id, + u64 x5, bool atomic) +{ + int retry_count = 0; + + if (atomic) { + __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL); + return; + } + + do { + mutex_lock(&qcom_scm_lock); + + __qcom_scm_call_do(desc, res, fn_id, x5, + ARM_SMCCC_STD_CALL); + + mutex_unlock(&qcom_scm_lock); + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); +} + +static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, bool atomic) { int arglen = desc->arginfo & 0xf; - int retry_count = 0, i; + int i; u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); - u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX]; + u64 x5 = desc->args[FIRST_EXT_ARG_IDX]; dma_addr_t args_phys = 0; void *args_virt = NULL; size_t alloc_len; - struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6}; + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; if (unlikely(arglen > N_REGISTER_ARGS)) { alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); if (!args_virt) return -ENOMEM; @@ -117,33 +157,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, x5 = args_phys; } - do { - mutex_lock(&qcom_scm_lock); - - cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, - qcom_smccc_convention, - ARM_SMCCC_OWNER_SIP, fn_id); - - quirk.state.a6 = 0; - - do { - arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], - desc->args[1], desc->args[2], x5, - quirk.state.a6, 0, res, &quirk); - - if (res->a0 == QCOM_SCM_INTERRUPTED) - cmd = res->a0; - - } while (res->a0 == QCOM_SCM_INTERRUPTED); - - mutex_unlock(&qcom_scm_lock); - - if (res->a0 == QCOM_SCM_V2_EBUSY) { - if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) - break; - msleep(QCOM_SCM_EBUSY_WAIT_MS); - } - } while (res->a0 == QCOM_SCM_V2_EBUSY); + qcom_scm_call_do(desc, res, fn_id, x5, atomic); if (args_virt) { dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); @@ -156,6 +170,42 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, return 0; } +/** + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. + */ +static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + might_sleep(); + return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false); +} + +/** + * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call + * + * Sends a command to the SCM and waits for the command to finish processing. + * This can be called in atomic context. + */ +static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true); +} + /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus From 5eb0e0e4f90addc6b79ebf1cb4b06b56b09f09de Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Fri, 20 Sep 2019 13:34:28 +0530 Subject: [PATCH 35/62] firmware/qcom_scm: Add scm call to handle smmu errata Qcom's smmu-500 needs to toggle wait-for-safe sequence to handle TLB invalidation sync's. Few firmwares allow doing that through SCM interface. Add API to toggle wait for safe from firmware through a SCM call. Signed-off-by: Vivek Gautam Reviewed-by: Bjorn Andersson Reviewed-by: Stephen Boyd Acked-by: Andy Gross Signed-off-by: Sai Prakash Ranjan Signed-off-by: Will Deacon --- drivers/firmware/qcom_scm-32.c | 5 +++++ drivers/firmware/qcom_scm-64.c | 13 +++++++++++++ drivers/firmware/qcom_scm.c | 6 ++++++ drivers/firmware/qcom_scm.h | 5 +++++ include/linux/qcom_scm.h | 2 ++ 5 files changed, 31 insertions(+) diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 215061c581e1..bee8729525ec 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, addr, val); } + +int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable) +{ + return -ENODEV; +} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 872155c67b2d..e1cd933ea9ae 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -552,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, &desc, &res); } + +int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL; + desc.args[1] = en; + desc.arginfo = QCOM_SCM_ARGS(2); + + return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM, + QCOM_SCM_CONFIG_ERRATA1, &desc, &res); +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 4802ab170fe5..a729e05c21b8 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +int qcom_scm_qsmmu500_wait_safe_toggle(bool en) +{ + return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); +} +EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return __qcom_scm_io_readl(__scm->dev, addr, val); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 99506bd873c0..baee744dbcfe 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare); #define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 #define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 +#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_CONFIG_ERRATA1 0x3 +#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2 extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, size_t *size); extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, u32 spare); +extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, + bool enable); #define QCOM_MEM_PROT_ASSIGN_ID 0x16 extern int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, size_t mem_sz, diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 2d5eff506e13..ffd72b3b14ee 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -58,6 +58,7 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); #else @@ -97,6 +98,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; } +static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; } static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; } static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; } #endif From 759aaa10c76cbaaefc0670410fb2d54cf4ec10cc Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Fri, 20 Sep 2019 13:34:29 +0530 Subject: [PATCH 36/62] iommu: arm-smmu-impl: Add sdm845 implementation hook Add reset hook for sdm845 based platforms to turn off the wait-for-safe sequence. Understanding how wait-for-safe logic affects USB and UFS performance on MTP845 and DB845 boards: Qcom's implementation of arm,mmu-500 adds a WAIT-FOR-SAFE logic to address under-performance issues in real-time clients, such as Display, and Camera. On receiving an invalidation requests, the SMMU forwards SAFE request to these clients and waits for SAFE ack signal from real-time clients. The SAFE signal from such clients is used to qualify the start of invalidation. This logic is controlled by chicken bits, one for each - MDP (display), IFE0, and IFE1 (camera), that can be accessed only from secure software on sdm845. This configuration, however, degrades the performance of non-real time clients, such as USB, and UFS etc. This happens because, with wait-for-safe logic enabled the hardware tries to throttle non-real time clients while waiting for SAFE ack signals from real-time clients. On mtp845 and db845 devices, with wait-for-safe logic enabled by the bootloaders we see degraded performance of USB and UFS when kernel enables the smmu stage-1 translations for these clients. Turn off this wait-for-safe logic from the kernel gets us back the perf of USB and UFS devices until we re-visit this when we start seeing perf issues on display/camera on upstream supported SDM845 platforms. The bootloaders on these boards implement secure monitor callbacks to handle a specific command - QCOM_SCM_SVC_SMMU_PROGRAM with which the logic can be toggled. There are other boards such as cheza whose bootloaders don't enable this logic. Such boards don't implement callbacks to handle the specific SCM call so disabling this logic for such boards will be a no-op. This change is inspired by the downstream change from Patrick Daly to address performance issues with display and camera by handling this wait-for-safe within separte io-pagetable ops to do TLB maintenance. So a big thanks to him for the change and for all the offline discussions. Without this change the UFS reads are pretty slow: $ time dd if=/dev/sda of=/dev/zero bs=1048576 count=10 conv=sync 10+0 records in 10+0 records out 10485760 bytes (10.0MB) copied, 22.394903 seconds, 457.2KB/s real 0m 22.39s user 0m 0.00s sys 0m 0.01s With this change they are back to rock! $ time dd if=/dev/sda of=/dev/zero bs=1048576 count=300 conv=sync 300+0 records in 300+0 records out 314572800 bytes (300.0MB) copied, 1.030541 seconds, 291.1MB/s real 0m 1.03s user 0m 0.00s sys 0m 0.54s Signed-off-by: Vivek Gautam Reviewed-by: Robin Murphy Reviewed-by: Stephen Boyd Reviewed-by: Bjorn Andersson Signed-off-by: Sai Prakash Ranjan Signed-off-by: Will Deacon --- drivers/iommu/Makefile | 2 +- drivers/iommu/arm-smmu-impl.c | 5 +++- drivers/iommu/arm-smmu-qcom.c | 51 +++++++++++++++++++++++++++++++++++ drivers/iommu/arm-smmu.h | 3 +++ 4 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 drivers/iommu/arm-smmu-qcom.c diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4f405f926e73..86dadd13b2e6 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o +obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c index 5c87a38620c4..b2fe72a8f019 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm-smmu-impl.c @@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm #define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) -static int arm_mmu500_reset(struct arm_smmu_device *smmu) +int arm_mmu500_reset(struct arm_smmu_device *smmu) { u32 reg, major; int i; @@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; + if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500")) + return qcom_smmu_impl_init(smmu); + return smmu; } diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c new file mode 100644 index 000000000000..24c071c1d8b0 --- /dev/null +++ b/drivers/iommu/arm-smmu-qcom.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#include + +#include "arm-smmu.h" + +struct qcom_smmu { + struct arm_smmu_device smmu; +}; + +static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) +{ + int ret; + + arm_mmu500_reset(smmu); + + /* + * To address performance degradation in non-real time clients, + * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, + * such as MTP and db845, whose firmwares implement secure monitor + * call handlers to turn on/off the wait-for-safe logic. + */ + ret = qcom_scm_qsmmu500_wait_safe_toggle(0); + if (ret) + dev_warn(smmu->dev, "Failed to turn off SAFE logic\n"); + + return ret; +} + +static const struct arm_smmu_impl qcom_smmu_impl = { + .reset = qcom_sdm845_smmu500_reset, +}; + +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +{ + struct qcom_smmu *qsmmu; + + qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL); + if (!qsmmu) + return ERR_PTR(-ENOMEM); + + qsmmu->smmu = *smmu; + + qsmmu->smmu.impl = &qcom_smmu_impl; + devm_kfree(smmu->dev, smmu); + + return &qsmmu->smmu; +} diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 409716410b0d..62b9f0cec49b 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -395,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); + +int arm_mmu500_reset(struct arm_smmu_device *smmu); #endif /* _ARM_SMMU_H */ From b5813c164ec82790be892b0f8e79cf080a503706 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:30 +0100 Subject: [PATCH 37/62] iommu/io-pgtable: Make selftest gubbins consistently __init The selftests run as an initcall, but the annotation of the various callbacks and data seems to be somewhat arbitrary. Add it consistently for everything related to the selftests. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 15 ++++++++------- drivers/iommu/io-pgtable-arm.c | 13 +++++++------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 4cb394937700..7c3bd2c3cdca 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST -static struct io_pgtable_cfg *cfg_cookie; +static struct io_pgtable_cfg *cfg_cookie __initdata; -static void dummy_tlb_flush_all(void *cookie) +static void __init dummy_tlb_flush_all(void *cookie) { WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, - void *cookie) +static void __init dummy_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, void *cookie) +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { dummy_tlb_flush(iova, granule, granule, cookie); } -static const struct iommu_flush_ops dummy_tlb_ops = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5040676f3242..c5c4f247acb4 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1097,22 +1097,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST -static struct io_pgtable_cfg *cfg_cookie; +static struct io_pgtable_cfg *cfg_cookie __initdata; -static void dummy_tlb_flush_all(void *cookie) +static void __init dummy_tlb_flush_all(void *cookie) { WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, - void *cookie) +static void __init dummy_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, void *cookie) +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { dummy_tlb_flush(iova, granule, granule, cookie); } From f7b90d2c7422a815c094961751582347935045cd Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:31 +0100 Subject: [PATCH 38/62] iommu/io-pgtable-arm: Rationalise size check It makes little sense to only validate the requested size after we think we've found a matching block size - making the check up-front is simple, and far more logical than waiting to walk off the bottom of the table to infer that we must have been passed a bogus size to start with. We're missing an equivalent check on the unmap path, so add that as well for consistency. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index c5c4f247acb4..abe794576e8f 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -392,7 +392,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); /* If we can install a leaf entry at this level, then do so */ - if (size == block_size && (size & cfg->pgsize_bitmap)) + if (size == block_size) return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); /* We can't allocate tables at the final level */ @@ -479,6 +479,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int iommu_prot) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; int ret, lvl = ARM_LPAE_START_LVL(data); arm_lpae_iopte prot; @@ -487,6 +488,9 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) return 0; + if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) + return -EINVAL; + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; @@ -652,9 +656,13 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); + if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) + return 0; + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) return 0; From 67f3e53d2a37ab27b00610eb25724103055beafc Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:32 +0100 Subject: [PATCH 39/62] iommu/io-pgtable-arm: Simplify bounds checks We're merely checking that the relevant upper bits of each address are all zero, so there are cheaper ways to achieve that. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index abe794576e8f..5da3cbdb76f7 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -491,8 +491,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return -EINVAL; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || - paddr >= (1ULL << data->iop.cfg.oas))) + if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas)) return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); @@ -663,7 +662,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return 0; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) + if (WARN_ON(iova >> data->iop.cfg.ias)) return 0; return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep); From 594ab90fc46c0842e4f1b60b6642fa4555a999f9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:33 +0100 Subject: [PATCH 40/62] iommu/io-pgtable-arm: Simplify start level lookup Beyond a couple of allocation-time calculations, data->levels is only ever used to derive the start level. Storing the start level directly leads to a small reduction in object code, which should help eke out a little more efficiency, and slightly more readable source to boot. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 45 +++++++++++++++------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5da3cbdb76f7..f4c2fae11256 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -31,19 +31,13 @@ #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) -/* - * For consistency with the architecture, we always consider - * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 - */ -#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) - /* * Calculate the right shift amount to get to the portion describing level l * in a virtual address mapped by the pagetable in d. */ #define ARM_LPAE_LVL_SHIFT(l,d) \ - ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ - * (d)->bits_per_level) + (d)->pg_shift) + (((ARM_LPAE_MAX_LEVELS - 1 - (l)) * (d)->bits_per_level) + \ + (d)->pg_shift) #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) @@ -55,7 +49,7 @@ * pagetable in d. */ #define ARM_LPAE_PGD_IDX(l,d) \ - ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + ((l) == (d)->start_level ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ @@ -180,7 +174,7 @@ struct arm_lpae_io_pgtable { struct io_pgtable iop; - int levels; + int start_level; size_t pgd_size; unsigned long pg_shift; unsigned long bits_per_level; @@ -481,7 +475,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; - int ret, lvl = ARM_LPAE_START_LVL(data); + int ret, lvl = data->start_level; arm_lpae_iopte prot; /* If no access, then nothing to do */ @@ -511,7 +505,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *start, *end; unsigned long table_size; - if (lvl == ARM_LPAE_START_LVL(data)) + if (lvl == data->start_level) table_size = data->pgd_size; else table_size = ARM_LPAE_GRANULE(data); @@ -540,7 +534,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) { struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); - __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); + __arm_lpae_free_pgtable(data, data->start_level, data->pgd); kfree(data); } @@ -657,7 +651,6 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; - int lvl = ARM_LPAE_START_LVL(data); if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return 0; @@ -665,7 +658,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iova >> data->iop.cfg.ias)) return 0; - return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep); + return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep); } static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, @@ -673,7 +666,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte pte, *ptep = data->pgd; - int lvl = ARM_LPAE_START_LVL(data); + int lvl = data->start_level; do { /* Valid IOPTE pointer? */ @@ -752,6 +745,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { unsigned long va_bits, pgd_bits; struct arm_lpae_io_pgtable *data; + int levels; arm_lpae_restrict_pgsizes(cfg); @@ -777,10 +771,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); va_bits = cfg->ias - data->pg_shift; - data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + data->start_level = ARM_LPAE_MAX_LEVELS - levels; /* Calculate the actual size of our pgd (without concatenation) */ - pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); + pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); data->iop.ops = (struct io_pgtable_ops) { @@ -910,13 +905,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) * Concatenate PGDs at level 1 if possible in order to reduce * the depth of the stage-2 walk. */ - if (data->levels == ARM_LPAE_MAX_LEVELS) { + if (data->start_level == 0) { unsigned long pgd_pages; pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { data->pgd_size = pgd_pages << data->pg_shift; - data->levels--; + data->start_level++; } } @@ -926,7 +921,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); - sl = ARM_LPAE_START_LVL(data); + sl = data->start_level; switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: @@ -1041,8 +1036,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) return NULL; /* Mali seems to need a full 4-level table regardless of IAS */ - if (data->levels < ARM_LPAE_MAX_LEVELS) { - data->levels = ARM_LPAE_MAX_LEVELS; + if (data->start_level > 0) { + data->start_level = 0; data->pgd_size = sizeof(arm_lpae_iopte); } /* @@ -1140,8 +1135,8 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", cfg->pgsize_bitmap, cfg->ias); pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", - data->levels, data->pgd_size, data->pg_shift, - data->bits_per_level, data->pgd); + ARM_LPAE_MAX_LEVELS - data->start_level, data->pgd_size, + data->pg_shift, data->bits_per_level, data->pgd); } #define __FAIL(ops, i) ({ \ From c79278c185c8848fefc25cf304eecec9c4623a40 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:34 +0100 Subject: [PATCH 41/62] iommu/io-pgtable-arm: Simplify PGD size handling We use data->pgd_size directly for the one-off allocation and freeing of the top-level table, but otherwise it serves for ARM_LPAE_PGD_IDX() to repeatedly re-calculate the effective number of top-level address bits it represents. Flip this around so we store the form we most commonly need, and derive the lesser-used one instead. This cuts a whole bunch of code out of the map/unmap/iova_to_phys fast-paths. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f4c2fae11256..e67112107cef 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -40,16 +40,15 @@ (d)->pg_shift) #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) - -#define ARM_LPAE_PAGES_PER_PGD(d) \ - DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) +#define ARM_LPAE_PGD_SIZE(d) \ + (sizeof(arm_lpae_iopte) << (d)->pgd_bits) /* * Calculate the index at level l used to map virtual address a using the * pagetable in d. */ #define ARM_LPAE_PGD_IDX(l,d) \ - ((l) == (d)->start_level ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ @@ -174,8 +173,8 @@ struct arm_lpae_io_pgtable { struct io_pgtable iop; + int pgd_bits; int start_level; - size_t pgd_size; unsigned long pg_shift; unsigned long bits_per_level; @@ -506,7 +505,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, unsigned long table_size; if (lvl == data->start_level) - table_size = data->pgd_size; + table_size = ARM_LPAE_PGD_SIZE(data); else table_size = ARM_LPAE_GRANULE(data); @@ -743,7 +742,7 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) static struct arm_lpae_io_pgtable * arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { - unsigned long va_bits, pgd_bits; + unsigned long va_bits; struct arm_lpae_io_pgtable *data; int levels; @@ -775,8 +774,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) data->start_level = ARM_LPAE_MAX_LEVELS - levels; /* Calculate the actual size of our pgd (without concatenation) */ - pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); - data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); + data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, @@ -870,7 +868,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) cfg->arm_lpae_s1_cfg.mair[1] = 0; /* Looking good; allocate a pgd */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), + GFP_KERNEL, cfg); if (!data->pgd) goto out_free_data; @@ -908,9 +907,9 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) if (data->start_level == 0) { unsigned long pgd_pages; - pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); + pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { - data->pgd_size = pgd_pages << data->pg_shift; + data->pgd_bits += data->bits_per_level; data->start_level++; } } @@ -967,7 +966,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) cfg->arm_lpae_s2_cfg.vtcr = reg; /* Allocate pgd pages */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), + GFP_KERNEL, cfg); if (!data->pgd) goto out_free_data; @@ -1038,7 +1038,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) /* Mali seems to need a full 4-level table regardless of IAS */ if (data->start_level > 0) { data->start_level = 0; - data->pgd_size = sizeof(arm_lpae_iopte); + data->pgd_bits = 0; } /* * MEMATTR: Mali has no actual notion of a non-cacheable type, so the @@ -1055,7 +1055,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) (ARM_MALI_LPAE_MEMATTR_IMP_DEF << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, + cfg); if (!data->pgd) goto out_free_data; @@ -1135,7 +1136,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", cfg->pgsize_bitmap, cfg->ias); pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", - ARM_LPAE_MAX_LEVELS - data->start_level, data->pgd_size, + ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), data->pg_shift, data->bits_per_level, data->pgd); } From 5fb190b0b52552de880536d4f409c4300c25e3d4 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:35 +0100 Subject: [PATCH 42/62] iommu/io-pgtable-arm: Simplify level indexing The nature of the LPAE format means that data->pg_shift is always redundant with data->bits_per_level, since they represent the size of a page and the number of PTEs per page respectively, and the size of a PTE is constant. Thus it works out more efficient to only store the latter, and derive the former via a trivial addition where necessary. Signed-off-by: Robin Murphy [will: Reworked granule check in iopte_to_paddr()] Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index e67112107cef..fcb302704053 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -36,10 +36,11 @@ * in a virtual address mapped by the pagetable in d. */ #define ARM_LPAE_LVL_SHIFT(l,d) \ - (((ARM_LPAE_MAX_LEVELS - 1 - (l)) * (d)->bits_per_level) + \ - (d)->pg_shift) + (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ + ilog2(sizeof(arm_lpae_iopte))) -#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) +#define ARM_LPAE_GRANULE(d) \ + (sizeof(arm_lpae_iopte) << (d)->bits_per_level) #define ARM_LPAE_PGD_SIZE(d) \ (sizeof(arm_lpae_iopte) << (d)->pgd_bits) @@ -55,9 +56,7 @@ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ -#define ARM_LPAE_BLOCK_SIZE(l,d) \ - (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ - ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) +#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) /* Page table bits */ #define ARM_LPAE_PTE_TYPE_SHIFT 0 @@ -175,8 +174,7 @@ struct arm_lpae_io_pgtable { int pgd_bits; int start_level; - unsigned long pg_shift; - unsigned long bits_per_level; + int bits_per_level; void *pgd; }; @@ -206,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, { u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; - if (data->pg_shift < 16) + if (ARM_LPAE_GRANULE(data) < SZ_64K) return paddr; /* Rotate the packed high-order bits back to the top */ @@ -742,9 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) static struct arm_lpae_io_pgtable * arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { - unsigned long va_bits; struct arm_lpae_io_pgtable *data; - int levels; + int levels, va_bits, pg_shift; arm_lpae_restrict_pgsizes(cfg); @@ -766,10 +763,10 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (!data) return NULL; - data->pg_shift = __ffs(cfg->pgsize_bitmap); - data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); + pg_shift = __ffs(cfg->pgsize_bitmap); + data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); - va_bits = cfg->ias - data->pg_shift; + va_bits = cfg->ias - pg_shift; levels = DIV_ROUND_UP(va_bits, data->bits_per_level); data->start_level = ARM_LPAE_MAX_LEVELS - levels; @@ -1135,9 +1132,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", cfg->pgsize_bitmap, cfg->ias); - pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", + pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), - data->pg_shift, data->bits_per_level, data->pgd); + ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); } #define __FAIL(ops, i) ({ \ From 205577ab6f7ade6185f764ed78fb6875dca40205 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 25 Oct 2019 19:08:36 +0100 Subject: [PATCH 43/62] iommu/io-pgtable-arm: Rationalise MAIR handling Between VMSAv8-64 and the various 32-bit formats, there is either one 64-bit MAIR or a pair of 32-bit MAIR0/MAIR1 or NMRR/PMRR registers. As such, keeping two 64-bit values in io_pgtable_cfg has always been overkill. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 4 ++-- drivers/iommu/io-pgtable-arm.c | 3 +-- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/qcom_iommu.c | 4 ++-- include/linux/io-pgtable.h | 2 +- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8da93e730d6f..3f20e548f1ec 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, cfg->cd.asid = (u16)asid; cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; return 0; out_free_asid: diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a180665ea002..424ebf38cd09 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -552,8 +552,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; } else { - cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; + cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; + cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; } } } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index fcb302704053..cd96442af44b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -861,8 +861,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) (ARM_LPAE_MAIR_ATTR_INC_OWBRWA << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); - cfg->arm_lpae_s1_cfg.mair[0] = reg; - cfg->arm_lpae_s1_cfg.mair[1] = 0; + cfg->arm_lpae_s1_cfg.mair = reg; /* Looking good; allocate a pgd */ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9da8309f7170..e4da6efbda49 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -438,7 +438,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) /* MAIR0 */ ipmmu_ctx_write_root(domain, IMMAIR0, - domain->cfg.arm_lpae_s1_cfg.mair[0]); + domain->cfg.arm_lpae_s1_cfg.mair); /* IMBUSCR */ if (domain->mmu->features->setup_imbuscr) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c31e7bc4ccbe..66e9b40e9275 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, /* MAIRs (stage-1 only) */ iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, - pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); + pgtbl_cfg.arm_lpae_s1_cfg.mair); iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, - pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); + pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32); /* SCTLR */ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index ec7a13405f10..ee21eedafe98 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -102,7 +102,7 @@ struct io_pgtable_cfg { struct { u64 ttbr[2]; u64 tcr; - u64 mair[2]; + u64 mair; } arm_lpae_s1_cfg; struct { From dd5ddd3c7a8c7ac382a82d15757f0ca3ab2b2dbc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 24 Oct 2019 16:57:39 +0100 Subject: [PATCH 44/62] iommu/io-pgtable-arm: Rename IOMMU_QCOM_SYS_CACHE and improve doc The 'IOMMU_QCOM_SYS_CACHE' IOMMU protection flag is exposed to all users of the IOMMU API. Despite its name, the idea behind it isn't especially tied to Qualcomm implementations and could conceivably be used by other systems. Rename it to 'IOMMU_SYS_CACHE_ONLY' and update the comment to describe a bit better the idea behind it. Cc: Robin Murphy Cc: "Isaac J. Manjarres" Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 2 +- include/linux/iommu.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index cd96442af44b..bdf47f745268 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -455,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_QCOM_SYS_CACHE) + else if (prot & IOMMU_SYS_CACHE_ONLY) pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 29bac5345563..a86bd21d08a9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -31,11 +31,11 @@ */ #define IOMMU_PRIV (1 << 5) /* - * Non-coherent masters on few Qualcomm SoCs can use this page protection flag - * to set correct cacheability attributes to use an outer level of cache - - * last level cache, aka system cache. + * Non-coherent masters can use this page protection flag to set cacheable + * memory attributes for only a transparent outer level of cache, also known as + * the last-level or system cache. */ -#define IOMMU_QCOM_SYS_CACHE (1 << 6) +#define IOMMU_SYS_CACHE_ONLY (1 << 6) struct iommu_ops; struct iommu_group; From 2009122f1d83dd8375572661961eab1e7e86bffe Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:02 +0800 Subject: [PATCH 45/62] iommu/mediatek: Correct the flush_iotlb_all callback Use the correct tlb_flush_all instead of the original one. Fixes: 4d689b619445 ("iommu/io-pgtable-arm-v7s: Convert to IOMMU API TLB sync") Signed-off-by: Yong Wu Reviewed-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 67a483c1a935..76b9388cf689 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -447,7 +447,7 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) { - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); + mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, From da3cc91b8db403728cde03c8a95cba268d8cbf1b Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:03 +0800 Subject: [PATCH 46/62] iommu/mediatek: Add a new tlb_lock for tlb_flush The commit 4d689b619445 ("iommu/io-pgtable-arm-v7s: Convert to IOMMU API TLB sync") help move the tlb_sync of unmap from v7s into the iommu framework. It helps add a new function "mtk_iommu_iotlb_sync", But it lacked the lock, then it will cause the variable "tlb_flush_active" may be changed unexpectedly, we could see this warning log randomly: mtk-iommu 10205000.iommu: Partial TLB flush timed out, falling back to full flush The HW requires tlb_flush/tlb_sync in pairs strictly, this patch adds a new tlb_lock for tlb operations to fix this issue. Fixes: 4d689b619445 ("iommu/io-pgtable-arm-v7s: Convert to IOMMU API TLB sync") Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 23 ++++++++++++++++++++++- drivers/iommu/mtk_iommu.h | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 76b9388cf689..c2f6c78fee44 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -219,22 +219,37 @@ static void mtk_iommu_tlb_sync(void *cookie) static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); mtk_iommu_tlb_sync(cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); mtk_iommu_tlb_sync(cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { + struct mtk_iommu_data *data = cookie; + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static const struct iommu_flush_ops mtk_iommu_flush_ops = { @@ -453,7 +468,12 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); + mtk_iommu_tlb_sync(data); + spin_unlock_irqrestore(&data->tlb_lock, flags); } static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, @@ -733,6 +753,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + spin_lock_init(&data->tlb_lock); list_add_tail(&data->list, &m4ulist); if (!iommu_present(&platform_bus_type)) diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index fc0f16eabacd..8cae22de7663 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -58,6 +58,7 @@ struct mtk_iommu_data { struct iommu_group *m4u_group; bool enable_4GB; bool tlb_flush_active; + spinlock_t tlb_lock; /* lock for tlb range flush */ struct iommu_device iommu; const struct mtk_iommu_plat_data *plat_data; From a7a04ea34e1c483d10d3e72250ff5503b1076fe3 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:04 +0800 Subject: [PATCH 47/62] iommu/mediatek: Use gather to achieve the tlb range flush Use the iommu_gather mechanism to achieve the tlb range flush. Gather the iova range in the "tlb_add_page", then flush the merged iova range in iotlb_sync. Suggested-by: Tomasz Figa Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c2f6c78fee44..81ac95fe6f3b 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -245,11 +245,9 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, void *cookie) { struct mtk_iommu_data *data = cookie; - unsigned long flags; + struct iommu_domain *domain = &data->m4u_dom->domain; - spin_lock_irqsave(&data->tlb_lock, flags); - mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); - spin_unlock_irqrestore(&data->tlb_lock, flags); + iommu_iotlb_gather_add_page(domain, gather, iova, granule); } static const struct iommu_flush_ops mtk_iommu_flush_ops = { @@ -469,9 +467,15 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); + size_t length = gather->end - gather->start; unsigned long flags; + if (gather->start == ULONG_MAX) + return; + spin_lock_irqsave(&data->tlb_lock, flags); + mtk_iommu_tlb_add_flush_nosync(gather->start, length, gather->pgsize, + false, data); mtk_iommu_tlb_sync(data); spin_unlock_irqrestore(&data->tlb_lock, flags); } From 67caf7e2b5a4701b24ef8ccc3b49f4e24f473fc8 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:05 +0800 Subject: [PATCH 48/62] iommu/mediatek: Delete the leaf in the tlb_flush In our tlb range flush, we don't care the "leaf". Remove it to simplify the code. no functional change. "granule" also is unnecessary for us, Keep it satisfy the format of tlb_flush_walk. Signed-off-by: Yong Wu Reviewed-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 81ac95fe6f3b..1d7254cf8b65 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -174,8 +174,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie) } static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, - void *cookie) + size_t granule, void *cookie) { struct mtk_iommu_data *data = cookie; @@ -223,19 +222,7 @@ static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, unsigned long flags; spin_lock_irqsave(&data->tlb_lock, flags); - mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); - mtk_iommu_tlb_sync(cookie); - spin_unlock_irqrestore(&data->tlb_lock, flags); -} - -static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - struct mtk_iommu_data *data = cookie; - unsigned long flags; - - spin_lock_irqsave(&data->tlb_lock, flags); - mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); + mtk_iommu_tlb_add_flush_nosync(iova, size, granule, cookie); mtk_iommu_tlb_sync(cookie); spin_unlock_irqrestore(&data->tlb_lock, flags); } @@ -253,7 +240,7 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_walk = mtk_iommu_tlb_flush_walk, - .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, + .tlb_flush_leaf = mtk_iommu_tlb_flush_walk, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, }; @@ -475,7 +462,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, spin_lock_irqsave(&data->tlb_lock, flags); mtk_iommu_tlb_add_flush_nosync(gather->start, length, gather->pgsize, - false, data); + data); mtk_iommu_tlb_sync(data); spin_unlock_irqrestore(&data->tlb_lock, flags); } From 1f4fd6248139b5406bb9cf1dde25dbec05f6c57e Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:06 +0800 Subject: [PATCH 49/62] iommu/mediatek: Move the tlb_sync into tlb_flush Right now, the tlb_add_flush_nosync and tlb_sync always appear together. we merge the two functions into one(also move the tlb_lock into the new function). No functional change. Signed-off-by: Chao Hao Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 45 +++++++++------------------------------ drivers/iommu/mtk_iommu.h | 1 - 2 files changed, 10 insertions(+), 36 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 1d7254cf8b65..0e5f41fab86e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -173,12 +173,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie) } } -static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, +static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, size_t granule, void *cookie) { struct mtk_iommu_data *data = cookie; + unsigned long flags; + int ret; + u32 tmp; for_each_m4u(data) { + spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); @@ -187,21 +191,8 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, data->base + REG_MMU_INVLD_END_A); writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); - data->tlb_flush_active = true; - } -} - -static void mtk_iommu_tlb_sync(void *cookie) -{ - struct mtk_iommu_data *data = cookie; - int ret; - u32 tmp; - - for_each_m4u(data) { - /* Avoid timing out if there's nothing to wait for */ - if (!data->tlb_flush_active) - return; + /* tlb sync */ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 100000); if (ret) { @@ -211,22 +202,10 @@ static void mtk_iommu_tlb_sync(void *cookie) } /* Clear the CPE status */ writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - data->tlb_flush_active = false; + spin_unlock_irqrestore(&data->tlb_lock, flags); } } -static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - struct mtk_iommu_data *data = cookie; - unsigned long flags; - - spin_lock_irqsave(&data->tlb_lock, flags); - mtk_iommu_tlb_add_flush_nosync(iova, size, granule, cookie); - mtk_iommu_tlb_sync(cookie); - spin_unlock_irqrestore(&data->tlb_lock, flags); -} - static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) @@ -239,8 +218,8 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, - .tlb_flush_walk = mtk_iommu_tlb_flush_walk, - .tlb_flush_leaf = mtk_iommu_tlb_flush_walk, + .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync, + .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, }; @@ -455,16 +434,12 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, { struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); size_t length = gather->end - gather->start; - unsigned long flags; if (gather->start == ULONG_MAX) return; - spin_lock_irqsave(&data->tlb_lock, flags); - mtk_iommu_tlb_add_flush_nosync(gather->start, length, gather->pgsize, + mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, data); - mtk_iommu_tlb_sync(data); - spin_unlock_irqrestore(&data->tlb_lock, flags); } static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 8cae22de7663..ea949a324e33 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -57,7 +57,6 @@ struct mtk_iommu_data { struct mtk_iommu_domain *m4u_dom; struct iommu_group *m4u_group; bool enable_4GB; - bool tlb_flush_active; spinlock_t tlb_lock; /* lock for tlb range flush */ struct iommu_device iommu; From 60829b4d00aa2f45f419ff62fb487063153a1d71 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:07 +0800 Subject: [PATCH 50/62] iommu/mediatek: Get rid of the pgtlock Now we have tlb_lock for the HW tlb flush, then pgtable code hasn't needed the external "pgtlock" for a while. this patch remove the "pgtlock". Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 0e5f41fab86e..c2b7ed5d0f7c 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -101,8 +101,6 @@ #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) struct mtk_iommu_domain { - spinlock_t pgtlock; /* lock for page table */ - struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; @@ -295,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) { struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - spin_lock_init(&dom->pgtlock); - dom->cfg = (struct io_pgtable_cfg) { .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | @@ -395,18 +391,13 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, { struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - unsigned long flags; - int ret; /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ if (data->enable_4GB) paddr |= BIT_ULL(32); - spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr, size, prot); - spin_unlock_irqrestore(&dom->pgtlock, flags); - - return ret; + /* Synchronize with the tlb_lock */ + return dom->iop->map(dom->iop, iova, paddr, size, prot); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, @@ -414,14 +405,8 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - unsigned long flags; - size_t unmapsz; - spin_lock_irqsave(&dom->pgtlock, flags); - unmapsz = dom->iop->unmap(dom->iop, iova, size, gather); - spin_unlock_irqrestore(&dom->pgtlock, flags); - - return unmapsz; + return dom->iop->unmap(dom->iop, iova, size, gather); } static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) @@ -447,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, { struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - unsigned long flags; phys_addr_t pa; - spin_lock_irqsave(&dom->pgtlock, flags); pa = dom->iop->iova_to_phys(dom->iop, iova); - spin_unlock_irqrestore(&dom->pgtlock, flags); - if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) pa &= ~BIT_ULL(32); From c90ae4a63541e05f77815762ddb0f7aee917b083 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 4 Nov 2019 15:01:08 +0800 Subject: [PATCH 51/62] iommu/mediatek: Reduce the tlb flush timeout value Reduce the tlb timeout value from 100000us to 1000us. The original value would make the kernel stuck for 100 ms with interrupts disabled, which could have other side effects. The flush is expected to always take much less than 1 ms, so use that instead. Signed-off-by: Yong Wu Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c2b7ed5d0f7c..8ca2e99964fe 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -192,7 +192,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, /* tlb sync */ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, - tmp, tmp != 0, 10, 100000); + tmp, tmp != 0, 10, 1000); if (ret) { dev_warn(data->dev, "Partial TLB flush timed out, falling back to full flush\n"); From 77cf983892b2e0d40dc256b784930a9ffaad4fc8 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:45 +0900 Subject: [PATCH 52/62] iommu/ipmmu-vmsa: Remove all unused register definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To support different registers memory mapping hardware easily in the future, this patch removes all unused register definitions. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 76 -------------------------------------- 1 file changed, 76 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 67bdf8e5c8ef..2575fd3509fd 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -102,122 +102,46 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IM_CTX_SIZE 0x40 #define IMCTR 0x0000 -#define IMCTR_TRE (1 << 17) -#define IMCTR_AFE (1 << 16) -#define IMCTR_RTSEL_MASK (3 << 4) -#define IMCTR_RTSEL_SHIFT 4 -#define IMCTR_TREN (1 << 3) #define IMCTR_INTEN (1 << 2) #define IMCTR_FLUSH (1 << 1) #define IMCTR_MMUEN (1 << 0) -#define IMCAAR 0x0004 - #define IMTTBCR 0x0008 #define IMTTBCR_EAE (1 << 31) -#define IMTTBCR_PMB (1 << 30) -#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */ -#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */ -#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */ -#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */ -#define IMTTBCR_TSZ1_MASK (7 << 16) -#define IMTTBCR_TSZ1_SHIFT 16 -#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */ -#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */ #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ -#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */ #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */ -#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */ #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */ -#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */ -#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */ -#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */ #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ -#define IMTTBCR_SL0_LVL_2 (0 << 4) #define IMTTBCR_SL0_LVL_1 (1 << 4) -#define IMTTBCR_TSZ0_MASK (7 << 0) -#define IMTTBCR_TSZ0_SHIFT O #define IMBUSCR 0x000c #define IMBUSCR_DVM (1 << 2) -#define IMBUSCR_BUSSEL_SYS (0 << 0) -#define IMBUSCR_BUSSEL_CCI (1 << 0) -#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) -#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) #define IMBUSCR_BUSSEL_MASK (3 << 0) #define IMTTLBR0 0x0010 #define IMTTUBR0 0x0014 -#define IMTTLBR1 0x0018 -#define IMTTUBR1 0x001c #define IMSTR 0x0020 -#define IMSTR_ERRLVL_MASK (3 << 12) -#define IMSTR_ERRLVL_SHIFT 12 -#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) -#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) -#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) -#define IMSTR_ERRCODE_MASK (7 << 8) #define IMSTR_MHIT (1 << 4) #define IMSTR_ABORT (1 << 2) #define IMSTR_PF (1 << 1) #define IMSTR_TF (1 << 0) #define IMMAIR0 0x0028 -#define IMMAIR1 0x002c -#define IMMAIR_ATTR_MASK 0xff -#define IMMAIR_ATTR_DEVICE 0x04 -#define IMMAIR_ATTR_NC 0x44 -#define IMMAIR_ATTR_WBRWA 0xff -#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) -#define IMMAIR_ATTR_IDX_NC 0 -#define IMMAIR_ATTR_IDX_WBRWA 1 -#define IMMAIR_ATTR_IDX_DEV 2 #define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */ #define IMEUAR 0x0034 /* R-Car Gen3 only */ -#define IMPCTR 0x0200 -#define IMPSTR 0x0208 -#define IMPEAR 0x020c -#define IMPMBA(n) (0x0280 + ((n) * 4)) -#define IMPMBD(n) (0x02c0 + ((n) * 4)) - #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) #define IMUCTR0(n) (0x0300 + ((n) * 16)) #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) -#define IMUCTR_FIXADDEN (1 << 31) -#define IMUCTR_FIXADD_MASK (0xff << 16) -#define IMUCTR_FIXADD_SHIFT 16 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) -#define IMUCTR_TTSEL_PMB (8 << 4) -#define IMUCTR_TTSEL_MASK (15 << 4) #define IMUCTR_FLUSH (1 << 1) #define IMUCTR_MMUEN (1 << 0) #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) #define IMUASID0(n) (0x0308 + ((n) * 16)) #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) -#define IMUASID_ASID8_MASK (0xff << 8) -#define IMUASID_ASID8_SHIFT 8 -#define IMUASID_ASID0_MASK (0xff << 0) -#define IMUASID_ASID0_SHIFT 0 /* ----------------------------------------------------------------------------- * Root device handling From df9828aaa43256bda7e26573c44af25e78596b09 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:46 +0900 Subject: [PATCH 53/62] iommu/ipmmu-vmsa: tidyup register definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To support different registers memory mapping hardware easily in the future, this patch tidies up the register definitions as below: - Add comments to state to which SoCs or SoC families they apply - Add categories about MMU "context" and uTLB registers No change behavior. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 56 ++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2575fd3509fd..fb1395bb0c87 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -101,47 +101,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IM_CTX_SIZE 0x40 -#define IMCTR 0x0000 -#define IMCTR_INTEN (1 << 2) -#define IMCTR_FLUSH (1 << 1) -#define IMCTR_MMUEN (1 << 0) +/* MMU "context" registers */ +#define IMCTR 0x0000 /* R-Car Gen2/3 */ +#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ +#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ -#define IMTTBCR 0x0008 -#define IMTTBCR_EAE (1 << 31) +#define IMTTBCR 0x0008 /* R-Car Gen2/3 */ +#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ -#define IMTTBCR_SL0_LVL_1 (1 << 4) +#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ -#define IMBUSCR 0x000c -#define IMBUSCR_DVM (1 << 2) -#define IMBUSCR_BUSSEL_MASK (3 << 0) +#define IMBUSCR 0x000c /* R-Car Gen2 only */ +#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ +#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ -#define IMTTLBR0 0x0010 -#define IMTTUBR0 0x0014 +#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ +#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ -#define IMSTR 0x0020 -#define IMSTR_MHIT (1 << 4) -#define IMSTR_ABORT (1 << 2) -#define IMSTR_PF (1 << 1) -#define IMSTR_TF (1 << 0) +#define IMSTR 0x0020 /* R-Car Gen2/3 */ +#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ +#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ +#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ +#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ -#define IMMAIR0 0x0028 +#define IMMAIR0 0x0028 /* R-Car Gen2/3 */ -#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */ -#define IMEUAR 0x0034 /* R-Car Gen3 only */ +#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ +#define IMEUAR 0x0034 /* R-Car Gen3 only */ +/* uTLB registers */ #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) -#define IMUCTR0(n) (0x0300 + ((n) * 16)) -#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) -#define IMUCTR_TTSEL_MMU(n) ((n) << 4) -#define IMUCTR_FLUSH (1 << 1) -#define IMUCTR_MMUEN (1 << 0) +#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ +#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) -#define IMUASID0(n) (0x0308 + ((n) * 16)) -#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) +#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ /* ----------------------------------------------------------------------------- * Root device handling From 16d9454f5e0447f9c19cbf350b35ed377b9f64eb Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:47 +0900 Subject: [PATCH 54/62] iommu/ipmmu-vmsa: Add helper functions for MMU "context" registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we will have changed memory mapping of the IPMMU in the future, This patch adds helper functions ipmmu_ctx_{reg,read,write}() for MMU "context" registers. No behavior change. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index fb1395bb0c87..44c6bc2976dc 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -190,29 +190,43 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, iowrite32(data, mmu->base + offset); } +static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) +{ + return context_id * IM_CTX_SIZE + reg; +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) +{ + return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); +} + +static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg, u32 data) +{ + ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); +} + static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, unsigned int reg) { - return ipmmu_read(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg); + return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); } static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, unsigned int reg, u32 data) { - ipmmu_write(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); } static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, unsigned int reg, u32 data) { if (domain->mmu != domain->mmu->root) - ipmmu_write(domain->mmu, - domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); - ipmmu_write(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); } /* ----------------------------------------------------------------------------- @@ -913,7 +927,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) /* Disable all contexts. */ for (i = 0; i < mmu->num_ctx; ++i) - ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); + ipmmu_ctx_write(mmu, i, IMCTR, 0); } static const struct ipmmu_features ipmmu_features_default = { From 3dc28d9f59eaae41461542b27afe70339347ebb3 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:48 +0900 Subject: [PATCH 55/62] iommu/ipmmu-vmsa: Calculate context registers' offset instead of a macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we will have changed memory mapping of the IPMMU in the future, this patch uses ipmmu_features values instead of a macro to calculate context registers offset. No behavior change. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 44c6bc2976dc..c5a435a78333 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -50,6 +50,8 @@ struct ipmmu_features { bool twobit_imttbcr_sl0; bool reserved_context; bool cache_snoop; + unsigned int ctx_offset_base; + unsigned int ctx_offset_stride; }; struct ipmmu_vmsa_device { @@ -99,8 +101,6 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IM_NS_ALIAS_OFFSET 0x800 -#define IM_CTX_SIZE 0x40 - /* MMU "context" registers */ #define IMCTR 0x0000 /* R-Car Gen2/3 */ #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ @@ -193,7 +193,8 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, unsigned int context_id, unsigned int reg) { - return context_id * IM_CTX_SIZE + reg; + return mmu->features->ctx_offset_base + + context_id * mmu->features->ctx_offset_stride + reg; } static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, @@ -939,6 +940,8 @@ static const struct ipmmu_features ipmmu_features_default = { .twobit_imttbcr_sl0 = false, .reserved_context = false, .cache_snoop = true, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { @@ -950,6 +953,8 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .twobit_imttbcr_sl0 = true, .reserved_context = true, .cache_snoop = false, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, }; static const struct of_device_id ipmmu_of_ids[] = { From 3667c9978b2911dc1ded77f5971df477885409c4 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:49 +0900 Subject: [PATCH 56/62] iommu/ipmmu-vmsa: Add helper functions for "uTLB" registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we will have changed memory mapping of the IPMMU in the future, This patch adds helper functions ipmmu_utlb_reg() and ipmmu_imu{asid,ctr}_write() for "uTLB" registers. No behavior change. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index c5a435a78333..ed591a2b175b 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -230,6 +230,23 @@ static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); } +static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) +{ + return reg; +} + +static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); +} + +static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); +} + /* ----------------------------------------------------------------------------- * TLB and microTLB Management */ @@ -275,11 +292,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, */ /* TODO: What should we set the ASID to ? */ - ipmmu_write(mmu, IMUASID(utlb), 0); + ipmmu_imuasid_write(mmu, utlb, 0); /* TODO: Do we need to flush the microTLB ? */ - ipmmu_write(mmu, IMUCTR(utlb), - IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | - IMUCTR_MMUEN); + ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | + IMUCTR_FLUSH | IMUCTR_MMUEN); mmu->utlb_ctx[utlb] = domain->context_id; } @@ -291,7 +307,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, { struct ipmmu_vmsa_device *mmu = domain->mmu; - ipmmu_write(mmu, IMUCTR(utlb), 0); + ipmmu_imuctr_write(mmu, utlb, 0); mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; } From 1289f7f15001c7ed36be6d23cb145c1d5feacdc8 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 6 Nov 2019 11:35:50 +0900 Subject: [PATCH 57/62] iommu/ipmmu-vmsa: Add utlb_offset_base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we will have changed memory mapping of the IPMMU in the future, this patch adds a utlb_offset_base into struct ipmmu_features for IMUCTR and IMUASID registers. No behavior change. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Niklas Söderlund Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ed591a2b175b..7ef9c199ab06 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -52,6 +52,7 @@ struct ipmmu_features { bool cache_snoop; unsigned int ctx_offset_base; unsigned int ctx_offset_stride; + unsigned int utlb_offset_base; }; struct ipmmu_vmsa_device { @@ -232,7 +233,7 @@ static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) { - return reg; + return mmu->features->utlb_offset_base + reg; } static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, @@ -958,6 +959,7 @@ static const struct ipmmu_features ipmmu_features_default = { .cache_snoop = true, .ctx_offset_base = 0, .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { @@ -971,6 +973,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .cache_snoop = false, .ctx_offset_base = 0, .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, }; static const struct of_device_id ipmmu_of_ids[] = { From f7aff1a93f52047739af31072de0ad8d149641f3 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 11 Nov 2019 12:17:20 +0100 Subject: [PATCH 58/62] iommu/arm-smmu-v3: Don't display an error when IRQ lines are missing Since commit 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()"), platform_get_irq_byname() displays an error when the IRQ isn't found. Since the SMMUv3 driver uses that function to query which interrupt method is available, the message is now displayed during boot for any SMMUv3 that doesn't implement the combined interrupt, or that implements MSIs. [ 20.700337] arm-smmu-v3 arm-smmu-v3.7.auto: IRQ combined not found [ 20.706508] arm-smmu-v3 arm-smmu-v3.7.auto: IRQ eventq not found [ 20.712503] arm-smmu-v3 arm-smmu-v3.7.auto: IRQ priq not found [ 20.718325] arm-smmu-v3 arm-smmu-v3.7.auto: IRQ gerror not found Use platform_get_irq_byname_optional() to avoid displaying a spurious error. Fixes: 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()") Signed-off-by: Jean-Philippe Brucker Acked-by: Will Deacon Signed-off-by: Joerg Roedel --- drivers/iommu/arm-smmu-v3.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 3f20e548f1ec..9be6e0a8fe9a 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Interrupt lines */ - irq = platform_get_irq_byname(pdev, "combined"); + irq = platform_get_irq_byname_optional(pdev, "combined"); if (irq > 0) smmu->combined_irq = irq; else { - irq = platform_get_irq_byname(pdev, "eventq"); + irq = platform_get_irq_byname_optional(pdev, "eventq"); if (irq > 0) smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "priq"); + irq = platform_get_irq_byname_optional(pdev, "priq"); if (irq > 0) smmu->priq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "gerror"); + irq = platform_get_irq_byname_optional(pdev, "gerror"); if (irq > 0) smmu->gerr_irq = irq; } From 34d1b0895dbd10713c73615d8f532e78509e12d9 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 11 Nov 2019 12:17:21 +0100 Subject: [PATCH 59/62] iommu/arm-smmu: Remove duplicate error message Since commit 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()"), platform_get_irq() displays an error when the IRQ isn't found. Remove the error print from the SMMU driver. Note the slight change of behaviour: no message is printed if platform_get_irq() returns -EPROBE_DEFER, which probably doesn't concern the SMMU. Fixes: 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()") Signed-off-by: Jean-Philippe Brucker Acked-by: Will Deacon Signed-off-by: Joerg Roedel --- drivers/iommu/arm-smmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 424ebf38cd09..f05c821dd181 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2095,10 +2095,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) for (i = 0; i < num_irqs; ++i) { int irq = platform_get_irq(pdev, i); - if (irq < 0) { - dev_err(dev, "failed to get irq index %d\n", i); + if (irq < 0) return -ENODEV; - } smmu->irqs[i] = irq; } From f036c7fa0ab60b7ea47560c32c78e435eb1cd214 Mon Sep 17 00:00:00 2001 From: Yian Chen Date: Thu, 17 Oct 2019 04:39:19 -0700 Subject: [PATCH 60/62] iommu/vt-d: Check VT-d RMRR region in BIOS is reported as reserved VT-d RMRR (Reserved Memory Region Reporting) regions are reserved for device use only and should not be part of allocable memory pool of OS. BIOS e820_table reports complete memory map to OS, including OS usable memory ranges and BIOS reserved memory ranges etc. x86 BIOS may not be trusted to include RMRR regions as reserved type of memory in its e820 memory map, hence validate every RMRR entry with the e820 memory map to make sure the RMRR regions will not be used by OS for any other purposes. ia64 EFI is working fine so implement RMRR validation as a dummy function Reviewed-by: Lu Baolu Reviewed-by: Sohil Mehta Signed-off-by: Yian Chen Signed-off-by: Joerg Roedel --- arch/ia64/include/asm/iommu.h | 5 +++++ arch/x86/include/asm/iommu.h | 18 ++++++++++++++++++ drivers/iommu/intel-iommu.c | 8 +++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h index 7904f591a79b..eb0db20c9d4c 100644 --- a/arch/ia64/include/asm/iommu.h +++ b/arch/ia64/include/asm/iommu.h @@ -2,6 +2,8 @@ #ifndef _ASM_IA64_IOMMU_H #define _ASM_IA64_IOMMU_H 1 +#include + /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) @@ -9,6 +11,9 @@ extern void no_iommu_init(void); #ifdef CONFIG_INTEL_IOMMU extern int force_iommu, no_iommu; extern int iommu_detected; + +static inline int __init +arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; } #else #define no_iommu (1) #define iommu_detected (0) diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index b91623d521d9..bf1ed2ddc74b 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -2,10 +2,28 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H +#include + +#include + extern int force_iommu, no_iommu; extern int iommu_detected; /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) +static inline int __init +arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) +{ + u64 start = rmrr->base_address; + u64 end = rmrr->end_address + 1; + + if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + return 0; + + pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", + start, end - 1); + return -EINVAL; +} + #endif /* _ASM_X86_IOMMU_H */ diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index e70cc1c5055f..f168cd8ee570 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4311,13 +4311,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; + int ret; + + rmrr = (struct acpi_dmar_reserved_memory *)header; + ret = arch_rmrr_sanity_check(rmrr); + if (ret) + return ret; rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) goto out; rmrru->hdr = header; - rmrr = (struct acpi_dmar_reserved_memory *)header; + rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; From 6c3a44ed3c553c324845744f30bcd1d3b07d61fd Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sun, 10 Nov 2019 09:27:44 -0800 Subject: [PATCH 61/62] iommu/vt-d: Turn off translations at shutdown The intel-iommu driver assumes that the iommu state is cleaned up at the start of the new kernel. But, when we try to kexec boot something other than the Linux kernel, the cleanup cannot be relied upon. Hence, cleanup before we go down for reboot. Keeping the cleanup at initialization also, in case BIOS leaves the IOMMU enabled. I considered turning off iommu only during kexec reboot, but a clean shutdown seems always a good idea. But if someone wants to make it conditional, such as VMM live update, we can do that. There doesn't seem to be such a condition at this time. Tested that before, the info message 'DMAR: Translation was enabled for but we are not in kdump mode' would be reported for each iommu. The message will not appear when the DMA-remapping is not enabled on entry to the kernel. Signed-off-by: Deepa Dinamani Acked-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 5 ++++- drivers/iommu/intel-iommu.c | 20 ++++++++++++++++++++ include/linux/dmar.h | 2 ++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index eecd6a421667..3acfa6a25fa2 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -895,8 +895,11 @@ int __init detect_intel_iommu(void) } #ifdef CONFIG_X86 - if (!ret) + if (!ret) { x86_init.iommu.iommu_init = intel_iommu_init; + x86_platform.iommu_shutdown = intel_iommu_shutdown; + } + #endif if (dmar_tbl) { diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f168cd8ee570..10ee8fa4b1b8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4762,6 +4762,26 @@ static void intel_disable_iommus(void) iommu_disable_translation(iommu); } +void intel_iommu_shutdown(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + + if (no_iommu || dmar_disabled) + return; + + down_write(&dmar_global_lock); + + /* Disable PMRs explicitly here. */ + for_each_iommu(iommu, drhd) + iommu_disable_protect_mem_regions(iommu); + + /* Make sure the IOMMUs are switched off */ + intel_disable_iommus(); + + up_write(&dmar_global_lock); +} + static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { struct iommu_device *iommu_dev = dev_to_iommu_device(dev); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index a7cf3599d9a1..f64ca27dc210 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -129,6 +129,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); +extern void intel_iommu_shutdown(void); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); @@ -137,6 +138,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } +static inline void intel_iommu_shutdown(void) { } #define dmar_parse_one_rmrr dmar_res_noop #define dmar_parse_one_atsr dmar_res_noop From 5b47748ecf2e3b7e346d6ce136e1c57239f995b0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 11 Nov 2019 18:55:18 +0000 Subject: [PATCH 62/62] iommu/rockchip: Don't provoke WARN for harmless IRQs Although we don't generally expect IRQs to fire for a suspended IOMMU, there are certain situations (particularly with debug options) where we might legitimately end up with the pm_runtime_get_if_in_use() call from rk_iommu_irq() returning 0. Since this doesn't represent an actual error, follow the other parts of the driver and save the WARN_ON() condition for a genuine negative value. Even if we do have spurious IRQs due to a wedged VOP asserting the shared line, it's not this driver's job to try to second-guess the IRQ core to warn about that. Reported-by: Vasily Khoruzhick Signed-off-by: Robin Murphy Acked-by: Marc Zyngier Signed-off-by: Joerg Roedel --- drivers/iommu/rockchip-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index e845bd01a1a2..9be032236a03 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -526,7 +526,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) int i, err; err = pm_runtime_get_if_in_use(iommu->dev); - if (WARN_ON_ONCE(err <= 0)) + if (!err || WARN_ON_ONCE(err < 0)) return ret; if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))