iommu/vt-d: Move intel_iommu info from struct intel_svm to struct intel_svm_dev
'struct intel_svm' is shared by all devices bound to a give process,
but records only a single pointer to a 'struct intel_iommu'. Consequently,
cache invalidations may only be applied to a single DMAR unit, and are
erroneously skipped for the other devices.
In preparation for fixing this, rework the structures so that the iommu
pointer resides in 'struct intel_svm_dev', allowing 'struct intel_svm'
to track them in its device list.
Fixes: 1c4f88b7f1
("iommu/vt-d: Shared virtual address in scalable mode")
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Raj Ashok <ashok.raj@intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Reported-by: Guo Kaijie <Kaijie.Guo@intel.com>
Reported-by: Xin Zeng <xin.zeng@intel.com>
Signed-off-by: Guo Kaijie <Kaijie.Guo@intel.com>
Signed-off-by: Xin Zeng <xin.zeng@intel.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
Tested-by: Guo Kaijie <Kaijie.Guo@intel.com>
Cc: stable@vger.kernel.org # v5.0+
Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/1609949037-25291-2-git-send-email-yi.l.liu@intel.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
aded8c7c2b
commit
9ad9f45b3b
|
@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
|||
}
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
qi_submit_sync(svm->iommu, &desc, 1, 0);
|
||||
qi_submit_sync(sdev->iommu, &desc, 1, 0);
|
||||
|
||||
if (sdev->dev_iotlb) {
|
||||
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
|
||||
|
@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
|||
}
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
qi_submit_sync(svm->iommu, &desc, 1, 0);
|
||||
qi_submit_sync(sdev->iommu, &desc, 1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
|
||||
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
|
||||
svm->pasid, true);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -364,6 +364,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
|
|||
}
|
||||
sdev->dev = dev;
|
||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||
sdev->iommu = iommu;
|
||||
|
||||
/* Only count users if device has aux domains */
|
||||
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
|
||||
|
@ -548,6 +549,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
|
|||
goto out;
|
||||
}
|
||||
sdev->dev = dev;
|
||||
sdev->iommu = iommu;
|
||||
|
||||
ret = intel_iommu_enable_pasid(iommu, dev);
|
||||
if (ret) {
|
||||
|
@ -577,7 +579,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
|
|||
kfree(sdev);
|
||||
goto out;
|
||||
}
|
||||
svm->iommu = iommu;
|
||||
|
||||
if (pasid_max > intel_pasid_max_id)
|
||||
pasid_max = intel_pasid_max_id;
|
||||
|
|
|
@ -758,6 +758,7 @@ struct intel_svm_dev {
|
|||
struct list_head list;
|
||||
struct rcu_head rcu;
|
||||
struct device *dev;
|
||||
struct intel_iommu *iommu;
|
||||
struct svm_dev_ops *ops;
|
||||
struct iommu_sva sva;
|
||||
u32 pasid;
|
||||
|
@ -771,7 +772,6 @@ struct intel_svm {
|
|||
struct mmu_notifier notifier;
|
||||
struct mm_struct *mm;
|
||||
|
||||
struct intel_iommu *iommu;
|
||||
unsigned int flags;
|
||||
u32 pasid;
|
||||
int gpasid; /* In case that guest PASID is different from host PASID */
|
||||
|
|
Loading…
Reference in New Issue