IOMMU Fix for Linux v5.8-rc6

One fix:
 
 	- Fix a NULL-ptr dereference in the QCOM IOMMU driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl8bJqsACgkQK/BELZcB
 GuMPOhAA3wwo+FUDabwY8LEwMdiW+WfHkD7PaUb97vWMwFkCl2LhWr8bqawa41jr
 7jUN12G760JUGlN0Jf/ZNv5RgCrjOn8GnBzMnLZOSvyWkidHpKTA8F4slqx7Xj+R
 6VS0R7eXYKlosW8tblOJYMdgsHDbPCyP1xBlBKX/4qk/vyBbU87G5bXP4J/IvQdb
 MkRaLkP8TE9IlOkLku3YcdNU2WJxhTqPtP/o4PIP8rfmmXqhON/e0cUKz9D0ckc8
 0XhwWg0/Gop4DcMCtWMezhJf3ZLowsGVb1BYx7uok1mE7l3AKT/ElYsKgGdzrGXj
 /2N/XUluWsAReJgov29LXDUlFZBINygsbeJyKk6MkAFKXf95rsD7LSejI4XTHBBA
 OkgMWknwZcPgsZ6rYyFcpuHmX7t07lE/ELYaZLlDVXctNqCeC2Dd7xdktLhe+Zr/
 vpzbJYDWrEUeklJUg6YGRI8P0Ulnd4QgDKR5AZXl84obQl4E9b+Rf6nQmu0cGok3
 f7CUlQxCEwCHw7E5b0EFG6/lmzIEoy0fAhtIkBmnzgXnwBIC6bM1eKvoLEYmuMJ/
 kk9WzX53pCVpNiMs5UaFyFaxise02/VJzMnWETmzvy4hN/wl+qf781/D50QQmOjD
 xQxR4Fe44SbnnPOgIV54+4Uc4BY+s688TnjBVmCP4hYdyoUOqb8=
 =Vun9
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fix-v5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu into master

Pull iommu fix from Joerg Roedel:
 "Fix a NULL-ptr dereference in the QCOM IOMMU driver"

* tag 'iommu-fix-v5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/qcom: Use domain rather than dev as tlb cookie
This commit is contained in:
Linus Torvalds 2020-07-24 13:58:05 -07:00
commit 5a0b8af071
1 changed files with 17 additions and 20 deletions

View File

@ -65,6 +65,7 @@ struct qcom_iommu_domain {
struct mutex init_mutex; /* Protects iommu pointer */
struct iommu_domain domain;
struct qcom_iommu_dev *iommu;
struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
return dev_iommu_priv_get(dev);
}
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
struct iommu_fwspec *fwspec;
struct device *dev = cookie;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
qcom_domain->fwspec = fwspec;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);