IOMMU Updates for Linux v5.7
Including: - ARM-SMMU support for the TLB range invalidation command in SMMUv3.2. - ARM-SMMU introduction of command batching helpers to batch up CD and ATC invalidation. - ARM-SMMU support for PCI PASID, along with necessary PCI symbol exports. - Introduce a generic (actually rename an existing) IOMMU related pointer in struct device and reduce the IOMMU related pointers. - Some fixes for the OMAP IOMMU driver to make it build on 64bit architectures. - Various smaller fixes and improvements. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl6MmlQACgkQK/BELZcB GuP9ug//QtyPYRYdO4ltD6mPvfB7V0qdXksJz+ZVbPOMvqUs1jr1FVYFH1HkOVu5 mFD6OJuQQJrrGukXMERyVgDhUqNr+xHrkGS+X67NrOkUrguyvUfLYSU/GmOH/kdk w1Smp7pTcHHAMmxGyQWTSFa9jSxKes5ZYBo065Z3/SlcIcTTkbw7V87N3RPrlnCX s/K7CtSGnKJMpL9DVuNH27eqGlfiuIrQhj/vTQVSn1nF7TjaGKXaRXj+3fcUgrIt KAfflWiTJncMY6WLjz65iiUtUvgA2Mmgn3CKJnWjgECd70+NybLQ9OAvQO+A2H6s 8XO9DsOOe8HFq/ljev1JGSw5LgB5Ip1RtSk7Ost6mkUFzLlaeTBJFQeHbECI9dne hksRYL4R8bwiQu+MkQe7HLa6TDb+asqjsayIO3M1oIpF+8mIz/oNOGCeP0cqSiuj lVMnblAWatrsZrf+AlxZKddIJWiduXoTjtpV64HTTvZeL4/g3kY0ykBXpS4xLj5V s0KvR6kjR1LYUgpe9jJ3CJTdIlU4MzSlrtq4CYFZvRa7rBLmk2cGsR1jiA3GTGpn bcqOQNgb5X1mpAzmOZb//pbjozgvCjQpQexyU4tRzs38yk+TK5OnOe5z4M1srHPY 7dTZoUEpAcRm4K+JFQ3+yOtxRTsINYyFUL/Qt8ALbWy4hXluRGY= =nhuS -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu updates from Joerg Roedel: - ARM-SMMU support for the TLB range invalidation command in SMMUv3.2 - ARM-SMMU introduction of command batching helpers to batch up CD and ATC invalidation - ARM-SMMU support for PCI PASID, along with necessary PCI symbol exports - Introduce a generic (actually rename an existing) IOMMU related pointer in struct device and reduce the IOMMU related pointers - Some fixes for the OMAP IOMMU driver to make it build on 64bit architectures - Various smaller fixes and improvements * tag 'iommu-updates-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (39 commits) iommu: Move fwspec->iommu_priv to struct dev_iommu iommu/virtio: Use accessor functions for iommu private data iommu/qcom: Use accessor functions for iommu private data iommu/mediatek: Use accessor functions for iommu private data iommu/renesas: Use accessor functions for iommu private data iommu/arm-smmu: Use accessor functions for iommu private data iommu/arm-smmu: Refactor master_cfg/fwspec usage iommu/arm-smmu-v3: Use accessor functions for iommu private data iommu: Introduce accessors for iommu private data iommu/arm-smmu: Fix uninitilized variable warning iommu: Move iommu_fwspec to struct dev_iommu iommu: Rename struct iommu_param to dev_iommu iommu/tegra-gart: Remove direct access of dev->iommu_fwspec drm/msm/mdp5: Remove direct access of dev->iommu_fwspec ACPI/IORT: Remove direct access of dev->iommu_fwspec iommu: Define dev_iommu_fwspec_get() for !CONFIG_IOMMU_API iommu/virtio: Reject IOMMU page granule larger than PAGE_SIZE iommu/virtio: Fix freeing of incomplete domains iommu/virtio: Fix sparse warning iommu/vt-d: Add build dependency on IOASID ...
This commit is contained in:
commit
0906d8b975
|
@ -1444,6 +1444,7 @@ M: Will Deacon <will@kernel.org>
|
||||||
R: Robin Murphy <robin.murphy@arm.com>
|
R: Robin Murphy <robin.murphy@arm.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/iommu/arm,smmu*
|
||||||
F: drivers/iommu/arm-smmu*
|
F: drivers/iommu/arm-smmu*
|
||||||
F: drivers/iommu/io-pgtable-arm.c
|
F: drivers/iommu/io-pgtable-arm.c
|
||||||
F: drivers/iommu/io-pgtable-arm-v7s.c
|
F: drivers/iommu/io-pgtable-arm-v7s.c
|
||||||
|
|
|
@ -1015,6 +1015,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||||
return ops;
|
return ops;
|
||||||
|
|
||||||
if (dev_is_pci(dev)) {
|
if (dev_is_pci(dev)) {
|
||||||
|
struct iommu_fwspec *fwspec;
|
||||||
struct pci_bus *bus = to_pci_dev(dev)->bus;
|
struct pci_bus *bus = to_pci_dev(dev)->bus;
|
||||||
struct iort_pci_alias_info info = { .dev = dev };
|
struct iort_pci_alias_info info = { .dev = dev };
|
||||||
|
|
||||||
|
@ -1027,8 +1028,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||||
err = pci_for_each_dma_alias(to_pci_dev(dev),
|
err = pci_for_each_dma_alias(to_pci_dev(dev),
|
||||||
iort_pci_iommu_init, &info);
|
iort_pci_iommu_init, &info);
|
||||||
|
|
||||||
if (!err && iort_pci_rc_supports_ats(node))
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
|
if (fwspec && iort_pci_rc_supports_ats(node))
|
||||||
|
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
|
||||||
} else {
|
} else {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
|
|
|
@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||||
|
|
||||||
if (config->platform.iommu) {
|
if (config->platform.iommu) {
|
||||||
iommu_dev = &pdev->dev;
|
iommu_dev = &pdev->dev;
|
||||||
if (!iommu_dev->iommu_fwspec)
|
if (!dev_iommu_fwspec_get(iommu_dev))
|
||||||
iommu_dev = iommu_dev->parent;
|
iommu_dev = iommu_dev->parent;
|
||||||
|
|
||||||
aspace = msm_gem_address_space_create(iommu_dev,
|
aspace = msm_gem_address_space_create(iommu_dev,
|
||||||
|
|
|
@ -188,6 +188,7 @@ config INTEL_IOMMU
|
||||||
select NEED_DMA_MAP_STATE
|
select NEED_DMA_MAP_STATE
|
||||||
select DMAR_TABLE
|
select DMAR_TABLE
|
||||||
select SWIOTLB
|
select SWIOTLB
|
||||||
|
select IOASID
|
||||||
help
|
help
|
||||||
DMA remapping (DMAR) devices support enables independent address
|
DMA remapping (DMAR) devices support enables independent address
|
||||||
translations for Direct Memory Access (DMA) from devices.
|
translations for Direct Memory Access (DMA) from devices.
|
||||||
|
@ -273,7 +274,7 @@ config IRQ_REMAP
|
||||||
# OMAP IOMMU support
|
# OMAP IOMMU support
|
||||||
config OMAP_IOMMU
|
config OMAP_IOMMU
|
||||||
bool "OMAP IOMMU Support"
|
bool "OMAP IOMMU Support"
|
||||||
depends on ARM && MMU
|
depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
|
||||||
depends on ARCH_OMAP2PLUS || COMPILE_TEST
|
depends on ARCH_OMAP2PLUS || COMPILE_TEST
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
---help---
|
---help---
|
||||||
|
@ -291,7 +292,7 @@ config OMAP_IOMMU_DEBUG
|
||||||
|
|
||||||
config ROCKCHIP_IOMMU
|
config ROCKCHIP_IOMMU
|
||||||
bool "Rockchip IOMMU Support"
|
bool "Rockchip IOMMU Support"
|
||||||
depends on ARM || ARM64
|
depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
|
||||||
depends on ARCH_ROCKCHIP || COMPILE_TEST
|
depends on ARCH_ROCKCHIP || COMPILE_TEST
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
select ARM_DMA_USE_IOMMU
|
select ARM_DMA_USE_IOMMU
|
||||||
|
@ -325,7 +326,7 @@ config TEGRA_IOMMU_SMMU
|
||||||
|
|
||||||
config EXYNOS_IOMMU
|
config EXYNOS_IOMMU
|
||||||
bool "Exynos IOMMU Support"
|
bool "Exynos IOMMU Support"
|
||||||
depends on ARCH_EXYNOS && MMU
|
depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
|
||||||
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
|
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
select ARM_DMA_USE_IOMMU
|
select ARM_DMA_USE_IOMMU
|
||||||
|
@ -361,7 +362,7 @@ config IPMMU_VMSA
|
||||||
|
|
||||||
config SPAPR_TCE_IOMMU
|
config SPAPR_TCE_IOMMU
|
||||||
bool "sPAPR TCE IOMMU Support"
|
bool "sPAPR TCE IOMMU Support"
|
||||||
depends on PPC_POWERNV || PPC_PSERIES
|
depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST)
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
help
|
help
|
||||||
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
||||||
|
@ -370,7 +371,7 @@ config SPAPR_TCE_IOMMU
|
||||||
# ARM IOMMU support
|
# ARM IOMMU support
|
||||||
config ARM_SMMU
|
config ARM_SMMU
|
||||||
tristate "ARM Ltd. System MMU (SMMU) Support"
|
tristate "ARM Ltd. System MMU (SMMU) Support"
|
||||||
depends on (ARM64 || ARM) && MMU
|
depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
select IOMMU_IO_PGTABLE_LPAE
|
select IOMMU_IO_PGTABLE_LPAE
|
||||||
select ARM_DMA_USE_IOMMU if ARM
|
select ARM_DMA_USE_IOMMU if ARM
|
||||||
|
@ -440,7 +441,7 @@ config S390_IOMMU
|
||||||
|
|
||||||
config S390_CCW_IOMMU
|
config S390_CCW_IOMMU
|
||||||
bool "S390 CCW IOMMU Support"
|
bool "S390 CCW IOMMU Support"
|
||||||
depends on S390 && CCW
|
depends on S390 && CCW || COMPILE_TEST
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
help
|
help
|
||||||
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
||||||
|
@ -448,7 +449,7 @@ config S390_CCW_IOMMU
|
||||||
|
|
||||||
config S390_AP_IOMMU
|
config S390_AP_IOMMU
|
||||||
bool "S390 AP IOMMU Support"
|
bool "S390 AP IOMMU Support"
|
||||||
depends on S390 && ZCRYPT
|
depends on S390 && ZCRYPT || COMPILE_TEST
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
help
|
help
|
||||||
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
||||||
|
@ -456,7 +457,7 @@ config S390_AP_IOMMU
|
||||||
|
|
||||||
config MTK_IOMMU
|
config MTK_IOMMU
|
||||||
bool "MTK IOMMU Support"
|
bool "MTK IOMMU Support"
|
||||||
depends on ARM || ARM64
|
depends on ARM || ARM64 || COMPILE_TEST
|
||||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||||
select ARM_DMA_USE_IOMMU
|
select ARM_DMA_USE_IOMMU
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
|
@ -506,8 +507,8 @@ config HYPERV_IOMMU
|
||||||
guests to run with x2APIC mode enabled.
|
guests to run with x2APIC mode enabled.
|
||||||
|
|
||||||
config VIRTIO_IOMMU
|
config VIRTIO_IOMMU
|
||||||
bool "Virtio IOMMU driver"
|
tristate "Virtio IOMMU driver"
|
||||||
depends on VIRTIO=y
|
depends on VIRTIO
|
||||||
depends on ARM64
|
depends on ARM64
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
select INTERVAL_TREE
|
select INTERVAL_TREE
|
||||||
|
|
|
@ -348,7 +348,7 @@
|
||||||
|
|
||||||
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
|
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
|
||||||
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
|
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
|
||||||
#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
|
#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
|
||||||
|
|
||||||
#define DTE_GCR3_INDEX_A 0
|
#define DTE_GCR3_INDEX_A 0
|
||||||
#define DTE_GCR3_INDEX_B 1
|
#define DTE_GCR3_INDEX_B 1
|
||||||
|
|
|
@ -69,6 +69,9 @@
|
||||||
#define IDR1_SSIDSIZE GENMASK(10, 6)
|
#define IDR1_SSIDSIZE GENMASK(10, 6)
|
||||||
#define IDR1_SIDSIZE GENMASK(5, 0)
|
#define IDR1_SIDSIZE GENMASK(5, 0)
|
||||||
|
|
||||||
|
#define ARM_SMMU_IDR3 0xc
|
||||||
|
#define IDR3_RIL (1 << 10)
|
||||||
|
|
||||||
#define ARM_SMMU_IDR5 0x14
|
#define ARM_SMMU_IDR5 0x14
|
||||||
#define IDR5_STALL_MAX GENMASK(31, 16)
|
#define IDR5_STALL_MAX GENMASK(31, 16)
|
||||||
#define IDR5_GRAN64K (1 << 6)
|
#define IDR5_GRAN64K (1 << 6)
|
||||||
|
@ -346,9 +349,14 @@
|
||||||
#define CMDQ_CFGI_1_LEAF (1UL << 0)
|
#define CMDQ_CFGI_1_LEAF (1UL << 0)
|
||||||
#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
|
#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
|
||||||
|
|
||||||
|
#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
|
||||||
|
#define CMDQ_TLBI_RANGE_NUM_MAX 31
|
||||||
|
#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
|
||||||
#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
|
#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
|
||||||
#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
|
#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
|
||||||
#define CMDQ_TLBI_1_LEAF (1UL << 0)
|
#define CMDQ_TLBI_1_LEAF (1UL << 0)
|
||||||
|
#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
|
||||||
|
#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
|
||||||
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
|
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
|
||||||
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
|
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
|
||||||
|
|
||||||
|
@ -473,9 +481,13 @@ struct arm_smmu_cmdq_ent {
|
||||||
#define CMDQ_OP_TLBI_S2_IPA 0x2a
|
#define CMDQ_OP_TLBI_S2_IPA 0x2a
|
||||||
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
|
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
|
||||||
struct {
|
struct {
|
||||||
|
u8 num;
|
||||||
|
u8 scale;
|
||||||
u16 asid;
|
u16 asid;
|
||||||
u16 vmid;
|
u16 vmid;
|
||||||
bool leaf;
|
bool leaf;
|
||||||
|
u8 ttl;
|
||||||
|
u8 tg;
|
||||||
u64 addr;
|
u64 addr;
|
||||||
} tlbi;
|
} tlbi;
|
||||||
|
|
||||||
|
@ -548,6 +560,11 @@ struct arm_smmu_cmdq {
|
||||||
atomic_t lock;
|
atomic_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct arm_smmu_cmdq_batch {
|
||||||
|
u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
|
||||||
|
int num;
|
||||||
|
};
|
||||||
|
|
||||||
struct arm_smmu_evtq {
|
struct arm_smmu_evtq {
|
||||||
struct arm_smmu_queue q;
|
struct arm_smmu_queue q;
|
||||||
u32 max_stalls;
|
u32 max_stalls;
|
||||||
|
@ -627,6 +644,7 @@ struct arm_smmu_device {
|
||||||
#define ARM_SMMU_FEAT_HYP (1 << 12)
|
#define ARM_SMMU_FEAT_HYP (1 << 12)
|
||||||
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
|
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
|
||||||
#define ARM_SMMU_FEAT_VAX (1 << 14)
|
#define ARM_SMMU_FEAT_VAX (1 << 14)
|
||||||
|
#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
|
||||||
u32 features;
|
u32 features;
|
||||||
|
|
||||||
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
|
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
|
||||||
|
@ -895,14 +913,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
|
||||||
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
|
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
|
||||||
break;
|
break;
|
||||||
case CMDQ_OP_TLBI_NH_VA:
|
case CMDQ_OP_TLBI_NH_VA:
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
|
||||||
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
|
||||||
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
|
||||||
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
|
||||||
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
|
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
|
||||||
break;
|
break;
|
||||||
case CMDQ_OP_TLBI_S2_IPA:
|
case CMDQ_OP_TLBI_S2_IPA:
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
|
||||||
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
|
||||||
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
|
||||||
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
|
||||||
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
|
||||||
|
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
|
||||||
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
|
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
|
||||||
break;
|
break;
|
||||||
case CMDQ_OP_TLBI_NH_ASID:
|
case CMDQ_OP_TLBI_NH_ASID:
|
||||||
|
@ -1482,6 +1508,24 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
|
||||||
return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
|
return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
|
||||||
|
struct arm_smmu_cmdq_batch *cmds,
|
||||||
|
struct arm_smmu_cmdq_ent *cmd)
|
||||||
|
{
|
||||||
|
if (cmds->num == CMDQ_BATCH_ENTRIES) {
|
||||||
|
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
|
||||||
|
cmds->num = 0;
|
||||||
|
}
|
||||||
|
arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd);
|
||||||
|
cmds->num++;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
|
||||||
|
struct arm_smmu_cmdq_batch *cmds)
|
||||||
|
{
|
||||||
|
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
|
||||||
|
}
|
||||||
|
|
||||||
/* Context descriptor manipulation functions */
|
/* Context descriptor manipulation functions */
|
||||||
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
|
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
|
||||||
int ssid, bool leaf)
|
int ssid, bool leaf)
|
||||||
|
@ -1489,6 +1533,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
|
||||||
size_t i;
|
size_t i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct arm_smmu_master *master;
|
struct arm_smmu_master *master;
|
||||||
|
struct arm_smmu_cmdq_batch cmds = {};
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
struct arm_smmu_cmdq_ent cmd = {
|
struct arm_smmu_cmdq_ent cmd = {
|
||||||
.opcode = CMDQ_OP_CFGI_CD,
|
.opcode = CMDQ_OP_CFGI_CD,
|
||||||
|
@ -1502,12 +1547,12 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
|
||||||
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
||||||
for (i = 0; i < master->num_sids; i++) {
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
cmd.cfgi.sid = master->sids[i];
|
cmd.cfgi.sid = master->sids[i];
|
||||||
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
|
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||||
|
|
||||||
arm_smmu_cmdq_issue_sync(smmu);
|
arm_smmu_cmdq_batch_submit(smmu, &cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
|
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
|
||||||
|
@ -1531,6 +1576,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
|
||||||
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
|
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
|
||||||
CTXDESC_L1_DESC_V;
|
CTXDESC_L1_DESC_V;
|
||||||
|
|
||||||
|
/* See comment in arm_smmu_write_ctx_desc() */
|
||||||
WRITE_ONCE(*dst, cpu_to_le64(val));
|
WRITE_ONCE(*dst, cpu_to_le64(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1726,7 +1772,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
|
||||||
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
|
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
|
||||||
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
|
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
|
||||||
|
|
||||||
*dst = cpu_to_le64(val);
|
/* See comment in arm_smmu_write_ctx_desc() */
|
||||||
|
WRITE_ONCE(*dst, cpu_to_le64(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||||
|
@ -2132,17 +2179,16 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
|
||||||
cmd->atc.size = log2_span;
|
cmd->atc.size = log2_span;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
|
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
|
||||||
struct arm_smmu_cmdq_ent *cmd)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
struct arm_smmu_cmdq_ent cmd;
|
||||||
|
|
||||||
if (!master->ats_enabled)
|
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
|
||||||
return 0;
|
|
||||||
|
|
||||||
for (i = 0; i < master->num_sids; i++) {
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
cmd->atc.sid = master->sids[i];
|
cmd.atc.sid = master->sids[i];
|
||||||
arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
|
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
return arm_smmu_cmdq_issue_sync(master->smmu);
|
return arm_smmu_cmdq_issue_sync(master->smmu);
|
||||||
|
@ -2151,10 +2197,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
|
||||||
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
|
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
|
||||||
int ssid, unsigned long iova, size_t size)
|
int ssid, unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct arm_smmu_cmdq_ent cmd;
|
struct arm_smmu_cmdq_ent cmd;
|
||||||
struct arm_smmu_master *master;
|
struct arm_smmu_master *master;
|
||||||
|
struct arm_smmu_cmdq_batch cmds = {};
|
||||||
|
|
||||||
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
|
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2179,11 +2226,18 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
|
||||||
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
|
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
|
||||||
|
|
||||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||||
list_for_each_entry(master, &smmu_domain->devices, domain_head)
|
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
||||||
ret |= arm_smmu_atc_inv_master(master, &cmd);
|
if (!master->ats_enabled)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
|
cmd.atc.sid = master->sids[i];
|
||||||
|
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
|
||||||
|
}
|
||||||
|
}
|
||||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||||
|
|
||||||
return ret ? -ETIMEDOUT : 0;
|
return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* IO_PGTABLE API */
|
/* IO_PGTABLE API */
|
||||||
|
@ -2218,10 +2272,10 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
|
||||||
size_t granule, bool leaf,
|
size_t granule, bool leaf,
|
||||||
struct arm_smmu_domain *smmu_domain)
|
struct arm_smmu_domain *smmu_domain)
|
||||||
{
|
{
|
||||||
u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
|
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
unsigned long start = iova, end = iova + size;
|
unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
|
||||||
int i = 0;
|
size_t inv_range = granule;
|
||||||
|
struct arm_smmu_cmdq_batch cmds = {};
|
||||||
struct arm_smmu_cmdq_ent cmd = {
|
struct arm_smmu_cmdq_ent cmd = {
|
||||||
.tlbi = {
|
.tlbi = {
|
||||||
.leaf = leaf,
|
.leaf = leaf,
|
||||||
|
@ -2239,19 +2293,50 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
|
||||||
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
|
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
|
||||||
|
/* Get the leaf page size */
|
||||||
|
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
|
||||||
|
|
||||||
|
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
|
||||||
|
cmd.tlbi.tg = (tg - 10) / 2;
|
||||||
|
|
||||||
|
/* Determine what level the granule is at */
|
||||||
|
cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
|
||||||
|
|
||||||
|
num_pages = size >> tg;
|
||||||
|
}
|
||||||
|
|
||||||
while (iova < end) {
|
while (iova < end) {
|
||||||
if (i == CMDQ_BATCH_ENTRIES) {
|
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
|
||||||
arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false);
|
/*
|
||||||
i = 0;
|
* On each iteration of the loop, the range is 5 bits
|
||||||
|
* worth of the aligned size remaining.
|
||||||
|
* The range in pages is:
|
||||||
|
*
|
||||||
|
* range = (num_pages & (0x1f << __ffs(num_pages)))
|
||||||
|
*/
|
||||||
|
unsigned long scale, num;
|
||||||
|
|
||||||
|
/* Determine the power of 2 multiple number of pages */
|
||||||
|
scale = __ffs(num_pages);
|
||||||
|
cmd.tlbi.scale = scale;
|
||||||
|
|
||||||
|
/* Determine how many chunks of 2^scale size we have */
|
||||||
|
num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
|
||||||
|
cmd.tlbi.num = num - 1;
|
||||||
|
|
||||||
|
/* range is num * 2^scale * pgsize */
|
||||||
|
inv_range = num << (scale + tg);
|
||||||
|
|
||||||
|
/* Clear out the lower order bits for the next iteration */
|
||||||
|
num_pages -= num << scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.tlbi.addr = iova;
|
cmd.tlbi.addr = iova;
|
||||||
arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd);
|
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
|
||||||
iova += granule;
|
iova += inv_range;
|
||||||
i++;
|
|
||||||
}
|
}
|
||||||
|
arm_smmu_cmdq_batch_submit(smmu, &cmds);
|
||||||
arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unfortunately, this can't be leaf-only since we may have
|
* Unfortunately, this can't be leaf-only since we may have
|
||||||
|
@ -2611,7 +2696,6 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
|
||||||
|
|
||||||
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
|
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
|
||||||
{
|
{
|
||||||
struct arm_smmu_cmdq_ent cmd;
|
|
||||||
struct arm_smmu_domain *smmu_domain = master->domain;
|
struct arm_smmu_domain *smmu_domain = master->domain;
|
||||||
|
|
||||||
if (!master->ats_enabled)
|
if (!master->ats_enabled)
|
||||||
|
@ -2623,11 +2707,57 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master)
|
||||||
* ATC invalidation via the SMMU.
|
* ATC invalidation via the SMMU.
|
||||||
*/
|
*/
|
||||||
wmb();
|
wmb();
|
||||||
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
|
arm_smmu_atc_inv_master(master);
|
||||||
arm_smmu_atc_inv_master(master, &cmd);
|
|
||||||
atomic_dec(&smmu_domain->nr_ats_masters);
|
atomic_dec(&smmu_domain->nr_ats_masters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
int features;
|
||||||
|
int num_pasids;
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
if (!dev_is_pci(master->dev))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(master->dev);
|
||||||
|
|
||||||
|
features = pci_pasid_features(pdev);
|
||||||
|
if (features < 0)
|
||||||
|
return features;
|
||||||
|
|
||||||
|
num_pasids = pci_max_pasids(pdev);
|
||||||
|
if (num_pasids <= 0)
|
||||||
|
return num_pasids;
|
||||||
|
|
||||||
|
ret = pci_enable_pasid(pdev, features);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "Failed to enable PASID\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
master->ssid_bits = min_t(u8, ilog2(num_pasids),
|
||||||
|
master->smmu->ssid_bits);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
if (!dev_is_pci(master->dev))
|
||||||
|
return;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(master->dev);
|
||||||
|
|
||||||
|
if (!pdev->pasid_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
master->ssid_bits = 0;
|
||||||
|
pci_disable_pasid(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
|
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2659,7 +2789,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
if (!fwspec)
|
if (!fwspec)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
master = fwspec->iommu_priv;
|
master = dev_iommu_priv_get(dev);
|
||||||
smmu = master->smmu;
|
smmu = master->smmu;
|
||||||
|
|
||||||
arm_smmu_detach_dev(master);
|
arm_smmu_detach_dev(master);
|
||||||
|
@ -2795,7 +2925,7 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(fwspec->iommu_priv))
|
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
|
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
|
||||||
|
@ -2810,7 +2940,7 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
master->smmu = smmu;
|
master->smmu = smmu;
|
||||||
master->sids = fwspec->ids;
|
master->sids = fwspec->ids;
|
||||||
master->num_sids = fwspec->num_ids;
|
master->num_sids = fwspec->num_ids;
|
||||||
fwspec->iommu_priv = master;
|
dev_iommu_priv_set(dev, master);
|
||||||
|
|
||||||
/* Check the SIDs are in range of the SMMU and our stream table */
|
/* Check the SIDs are in range of the SMMU and our stream table */
|
||||||
for (i = 0; i < master->num_sids; i++) {
|
for (i = 0; i < master->num_sids; i++) {
|
||||||
|
@ -2831,13 +2961,23 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
|
|
||||||
master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
|
master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that PASID must be enabled before, and disabled after ATS:
|
||||||
|
* PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
|
||||||
|
*
|
||||||
|
* Behavior is undefined if this bit is Set and the value of the PASID
|
||||||
|
* Enable, Execute Requested Enable, or Privileged Mode Requested bits
|
||||||
|
* are changed.
|
||||||
|
*/
|
||||||
|
arm_smmu_enable_pasid(master);
|
||||||
|
|
||||||
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
|
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
|
||||||
master->ssid_bits = min_t(u8, master->ssid_bits,
|
master->ssid_bits = min_t(u8, master->ssid_bits,
|
||||||
CTXDESC_LINEAR_CDMAX);
|
CTXDESC_LINEAR_CDMAX);
|
||||||
|
|
||||||
ret = iommu_device_link(&smmu->iommu, dev);
|
ret = iommu_device_link(&smmu->iommu, dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_master;
|
goto err_disable_pasid;
|
||||||
|
|
||||||
group = iommu_group_get_for_dev(dev);
|
group = iommu_group_get_for_dev(dev);
|
||||||
if (IS_ERR(group)) {
|
if (IS_ERR(group)) {
|
||||||
|
@ -2850,9 +2990,11 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
|
|
||||||
err_unlink:
|
err_unlink:
|
||||||
iommu_device_unlink(&smmu->iommu, dev);
|
iommu_device_unlink(&smmu->iommu, dev);
|
||||||
|
err_disable_pasid:
|
||||||
|
arm_smmu_disable_pasid(master);
|
||||||
err_free_master:
|
err_free_master:
|
||||||
kfree(master);
|
kfree(master);
|
||||||
fwspec->iommu_priv = NULL;
|
dev_iommu_priv_set(dev, NULL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2865,11 +3007,12 @@ static void arm_smmu_remove_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
master = fwspec->iommu_priv;
|
master = dev_iommu_priv_get(dev);
|
||||||
smmu = master->smmu;
|
smmu = master->smmu;
|
||||||
arm_smmu_detach_dev(master);
|
arm_smmu_detach_dev(master);
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
iommu_device_unlink(&smmu->iommu, dev);
|
iommu_device_unlink(&smmu->iommu, dev);
|
||||||
|
arm_smmu_disable_pasid(master);
|
||||||
kfree(master);
|
kfree(master);
|
||||||
iommu_fwspec_free(dev);
|
iommu_fwspec_free(dev);
|
||||||
}
|
}
|
||||||
|
@ -3700,6 +3843,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
|
||||||
if (smmu->sid_bits <= STRTAB_SPLIT)
|
if (smmu->sid_bits <= STRTAB_SPLIT)
|
||||||
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
|
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
|
||||||
|
|
||||||
|
/* IDR3 */
|
||||||
|
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
|
||||||
|
if (FIELD_GET(IDR3_RIL, reg))
|
||||||
|
smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
|
||||||
|
|
||||||
/* IDR5 */
|
/* IDR5 */
|
||||||
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
|
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
|
||||||
|
|
||||||
|
|
|
@ -98,12 +98,10 @@ struct arm_smmu_master_cfg {
|
||||||
s16 smendx[];
|
s16 smendx[];
|
||||||
};
|
};
|
||||||
#define INVALID_SMENDX -1
|
#define INVALID_SMENDX -1
|
||||||
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
|
#define cfg_smendx(cfg, fw, i) \
|
||||||
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
|
(i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
|
||||||
#define fwspec_smendx(fw, i) \
|
#define for_each_cfg_sme(cfg, fw, i, idx) \
|
||||||
(i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
|
for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
|
||||||
#define for_each_cfg_sme(fw, i, idx) \
|
|
||||||
for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
|
|
||||||
|
|
||||||
static bool using_legacy_binding, using_generic_binding;
|
static bool using_legacy_binding, using_generic_binding;
|
||||||
|
|
||||||
|
@ -1061,7 +1059,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
|
||||||
static int arm_smmu_master_alloc_smes(struct device *dev)
|
static int arm_smmu_master_alloc_smes(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
|
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||||
struct arm_smmu_device *smmu = cfg->smmu;
|
struct arm_smmu_device *smmu = cfg->smmu;
|
||||||
struct arm_smmu_smr *smrs = smmu->smrs;
|
struct arm_smmu_smr *smrs = smmu->smrs;
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
|
@ -1069,7 +1067,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
|
||||||
|
|
||||||
mutex_lock(&smmu->stream_map_mutex);
|
mutex_lock(&smmu->stream_map_mutex);
|
||||||
/* Figure out a viable stream map entry allocation */
|
/* Figure out a viable stream map entry allocation */
|
||||||
for_each_cfg_sme(fwspec, i, idx) {
|
for_each_cfg_sme(cfg, fwspec, i, idx) {
|
||||||
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
|
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
|
||||||
u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
|
u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
|
||||||
|
|
||||||
|
@ -1100,7 +1098,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
|
||||||
iommu_group_put(group);
|
iommu_group_put(group);
|
||||||
|
|
||||||
/* It worked! Now, poke the actual hardware */
|
/* It worked! Now, poke the actual hardware */
|
||||||
for_each_cfg_sme(fwspec, i, idx) {
|
for_each_cfg_sme(cfg, fwspec, i, idx) {
|
||||||
arm_smmu_write_sme(smmu, idx);
|
arm_smmu_write_sme(smmu, idx);
|
||||||
smmu->s2crs[idx].group = group;
|
smmu->s2crs[idx].group = group;
|
||||||
}
|
}
|
||||||
|
@ -1117,14 +1115,14 @@ out_err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
|
static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
|
||||||
|
struct iommu_fwspec *fwspec)
|
||||||
{
|
{
|
||||||
struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
|
struct arm_smmu_device *smmu = cfg->smmu;
|
||||||
struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
|
|
||||||
int i, idx;
|
int i, idx;
|
||||||
|
|
||||||
mutex_lock(&smmu->stream_map_mutex);
|
mutex_lock(&smmu->stream_map_mutex);
|
||||||
for_each_cfg_sme(fwspec, i, idx) {
|
for_each_cfg_sme(cfg, fwspec, i, idx) {
|
||||||
if (arm_smmu_free_sme(smmu, idx))
|
if (arm_smmu_free_sme(smmu, idx))
|
||||||
arm_smmu_write_sme(smmu, idx);
|
arm_smmu_write_sme(smmu, idx);
|
||||||
cfg->smendx[i] = INVALID_SMENDX;
|
cfg->smendx[i] = INVALID_SMENDX;
|
||||||
|
@ -1133,6 +1131,7 @@ static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||||
|
struct arm_smmu_master_cfg *cfg,
|
||||||
struct iommu_fwspec *fwspec)
|
struct iommu_fwspec *fwspec)
|
||||||
{
|
{
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
|
@ -1146,7 +1145,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||||
else
|
else
|
||||||
type = S2CR_TYPE_TRANS;
|
type = S2CR_TYPE_TRANS;
|
||||||
|
|
||||||
for_each_cfg_sme(fwspec, i, idx) {
|
for_each_cfg_sme(cfg, fwspec, i, idx) {
|
||||||
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
|
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1160,10 +1159,11 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||||
|
|
||||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
||||||
struct arm_smmu_device *smmu;
|
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||||
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
struct arm_smmu_master_cfg *cfg;
|
||||||
|
struct arm_smmu_device *smmu;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
|
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
|
||||||
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
|
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
|
||||||
|
@ -1177,10 +1177,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
* domains, just say no (but more politely than by dereferencing NULL).
|
* domains, just say no (but more politely than by dereferencing NULL).
|
||||||
* This should be at least a WARN_ON once that's sorted.
|
* This should be at least a WARN_ON once that's sorted.
|
||||||
*/
|
*/
|
||||||
if (!fwspec->iommu_priv)
|
cfg = dev_iommu_priv_get(dev);
|
||||||
|
if (!cfg)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
smmu = fwspec_smmu(fwspec);
|
smmu = cfg->smmu;
|
||||||
|
|
||||||
ret = arm_smmu_rpm_get(smmu);
|
ret = arm_smmu_rpm_get(smmu);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -1204,7 +1205,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Looks ok, so add the device to the domain */
|
/* Looks ok, so add the device to the domain */
|
||||||
ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
|
ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup an autosuspend delay to avoid bouncing runpm state.
|
* Setup an autosuspend delay to avoid bouncing runpm state.
|
||||||
|
@ -1383,7 +1384,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
|
||||||
|
|
||||||
static int arm_smmu_add_device(struct device *dev)
|
static int arm_smmu_add_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu = NULL;
|
||||||
struct arm_smmu_master_cfg *cfg;
|
struct arm_smmu_master_cfg *cfg;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
@ -1429,7 +1430,7 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
cfg->smmu = smmu;
|
cfg->smmu = smmu;
|
||||||
fwspec->iommu_priv = cfg;
|
dev_iommu_priv_set(dev, cfg);
|
||||||
while (i--)
|
while (i--)
|
||||||
cfg->smendx[i] = INVALID_SMENDX;
|
cfg->smendx[i] = INVALID_SMENDX;
|
||||||
|
|
||||||
|
@ -1467,7 +1468,7 @@ static void arm_smmu_remove_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cfg = fwspec->iommu_priv;
|
cfg = dev_iommu_priv_get(dev);
|
||||||
smmu = cfg->smmu;
|
smmu = cfg->smmu;
|
||||||
|
|
||||||
ret = arm_smmu_rpm_get(smmu);
|
ret = arm_smmu_rpm_get(smmu);
|
||||||
|
@ -1475,23 +1476,25 @@ static void arm_smmu_remove_device(struct device *dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
iommu_device_unlink(&smmu->iommu, dev);
|
iommu_device_unlink(&smmu->iommu, dev);
|
||||||
arm_smmu_master_free_smes(fwspec);
|
arm_smmu_master_free_smes(cfg, fwspec);
|
||||||
|
|
||||||
arm_smmu_rpm_put(smmu);
|
arm_smmu_rpm_put(smmu);
|
||||||
|
|
||||||
|
dev_iommu_priv_set(dev, NULL);
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
kfree(fwspec->iommu_priv);
|
kfree(cfg);
|
||||||
iommu_fwspec_free(dev);
|
iommu_fwspec_free(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
|
struct arm_smmu_device *smmu = cfg->smmu;
|
||||||
struct iommu_group *group = NULL;
|
struct iommu_group *group = NULL;
|
||||||
int i, idx;
|
int i, idx;
|
||||||
|
|
||||||
for_each_cfg_sme(fwspec, i, idx) {
|
for_each_cfg_sme(cfg, fwspec, i, idx) {
|
||||||
if (group && smmu->s2crs[idx].group &&
|
if (group && smmu->s2crs[idx].group &&
|
||||||
group != smmu->s2crs[idx].group)
|
group != smmu->s2crs[idx].group)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
|
@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
|
||||||
struct dmar_atsr_unit *atsru;
|
struct dmar_atsr_unit *atsru;
|
||||||
struct acpi_dmar_atsr *tmp;
|
struct acpi_dmar_atsr *tmp;
|
||||||
|
|
||||||
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
|
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
|
||||||
|
dmar_rcu_check()) {
|
||||||
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
|
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
|
||||||
if (atsr->segment != tmp->segment)
|
if (atsr->segment != tmp->segment)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -531,7 +531,7 @@ struct page_req_dsc {
|
||||||
u64 priv_data[2];
|
u64 priv_data[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
|
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
|
||||||
|
|
||||||
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
|
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
|
||||||
{
|
{
|
||||||
|
@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||||
* any faults on kernel addresses. */
|
* any faults on kernel addresses. */
|
||||||
if (!svm->mm)
|
if (!svm->mm)
|
||||||
goto bad_req;
|
goto bad_req;
|
||||||
/* If the mm is already defunct, don't handle faults. */
|
|
||||||
if (!mmget_not_zero(svm->mm))
|
|
||||||
goto bad_req;
|
|
||||||
|
|
||||||
/* If address is not canonical, return invalid response */
|
/* If address is not canonical, return invalid response */
|
||||||
if (!is_canonical_address(address))
|
if (!is_canonical_address(address))
|
||||||
goto bad_req;
|
goto bad_req;
|
||||||
|
|
||||||
|
/* If the mm is already defunct, don't handle faults. */
|
||||||
|
if (!mmget_not_zero(svm->mm))
|
||||||
|
goto bad_req;
|
||||||
|
|
||||||
down_read(&svm->mm->mmap_sem);
|
down_read(&svm->mm->mmap_sem);
|
||||||
vma = find_extend_vma(svm->mm, address);
|
vma = find_extend_vma(svm->mm, address);
|
||||||
if (!vma || address < vma->vm_start)
|
if (!vma || address < vma->vm_start)
|
||||||
|
|
|
@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_device_unregister);
|
EXPORT_SYMBOL_GPL(iommu_device_unregister);
|
||||||
|
|
||||||
static struct iommu_param *iommu_get_dev_param(struct device *dev)
|
static struct dev_iommu *dev_iommu_get(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_param *param = dev->iommu_param;
|
struct dev_iommu *param = dev->iommu;
|
||||||
|
|
||||||
if (param)
|
if (param)
|
||||||
return param;
|
return param;
|
||||||
|
@ -164,14 +164,14 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
mutex_init(¶m->lock);
|
mutex_init(¶m->lock);
|
||||||
dev->iommu_param = param;
|
dev->iommu = param;
|
||||||
return param;
|
return param;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_free_dev_param(struct device *dev)
|
static void dev_iommu_free(struct device *dev)
|
||||||
{
|
{
|
||||||
kfree(dev->iommu_param);
|
kfree(dev->iommu);
|
||||||
dev->iommu_param = NULL;
|
dev->iommu = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iommu_probe_device(struct device *dev)
|
int iommu_probe_device(struct device *dev)
|
||||||
|
@ -183,7 +183,7 @@ int iommu_probe_device(struct device *dev)
|
||||||
if (!ops)
|
if (!ops)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!iommu_get_dev_param(dev))
|
if (!dev_iommu_get(dev))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (!try_module_get(ops->owner)) {
|
if (!try_module_get(ops->owner)) {
|
||||||
|
@ -200,7 +200,7 @@ int iommu_probe_device(struct device *dev)
|
||||||
err_module_put:
|
err_module_put:
|
||||||
module_put(ops->owner);
|
module_put(ops->owner);
|
||||||
err_free_dev_param:
|
err_free_dev_param:
|
||||||
iommu_free_dev_param(dev);
|
dev_iommu_free(dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,9 +211,9 @@ void iommu_release_device(struct device *dev)
|
||||||
if (dev->iommu_group)
|
if (dev->iommu_group)
|
||||||
ops->remove_device(dev);
|
ops->remove_device(dev);
|
||||||
|
|
||||||
if (dev->iommu_param) {
|
if (dev->iommu) {
|
||||||
module_put(ops->owner);
|
module_put(ops->owner);
|
||||||
iommu_free_dev_param(dev);
|
dev_iommu_free(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -972,7 +972,7 @@ int iommu_register_device_fault_handler(struct device *dev,
|
||||||
iommu_dev_fault_handler_t handler,
|
iommu_dev_fault_handler_t handler,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct iommu_param *param = dev->iommu_param;
|
struct dev_iommu *param = dev->iommu;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!param)
|
if (!param)
|
||||||
|
@ -1015,7 +1015,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
|
||||||
*/
|
*/
|
||||||
int iommu_unregister_device_fault_handler(struct device *dev)
|
int iommu_unregister_device_fault_handler(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_param *param = dev->iommu_param;
|
struct dev_iommu *param = dev->iommu;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!param)
|
if (!param)
|
||||||
|
@ -1055,7 +1055,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
|
||||||
*/
|
*/
|
||||||
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
|
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
|
||||||
{
|
{
|
||||||
struct iommu_param *param = dev->iommu_param;
|
struct dev_iommu *param = dev->iommu;
|
||||||
struct iommu_fault_event *evt_pending = NULL;
|
struct iommu_fault_event *evt_pending = NULL;
|
||||||
struct iommu_fault_param *fparam;
|
struct iommu_fault_param *fparam;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -1104,7 +1104,7 @@ int iommu_page_response(struct device *dev,
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct iommu_fault_event *evt;
|
struct iommu_fault_event *evt;
|
||||||
struct iommu_fault_page_request *prm;
|
struct iommu_fault_page_request *prm;
|
||||||
struct iommu_param *param = dev->iommu_param;
|
struct dev_iommu *param = dev->iommu;
|
||||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||||
|
|
||||||
if (!domain || !domain->ops->page_response)
|
if (!domain || !domain->ops->page_response)
|
||||||
|
@ -2405,7 +2405,11 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
||||||
if (fwspec)
|
if (fwspec)
|
||||||
return ops == fwspec->ops ? 0 : -EINVAL;
|
return ops == fwspec->ops ? 0 : -EINVAL;
|
||||||
|
|
||||||
fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
|
if (!dev_iommu_get(dev))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* Preallocate for the overwhelmingly common case of 1 ID */
|
||||||
|
fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
|
||||||
if (!fwspec)
|
if (!fwspec)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -2432,15 +2436,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free);
|
||||||
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
size_t size;
|
int i, new_num;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!fwspec)
|
if (!fwspec)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
|
new_num = fwspec->num_ids + num_ids;
|
||||||
if (size > sizeof(*fwspec)) {
|
if (new_num > 1) {
|
||||||
fwspec = krealloc(fwspec, size, GFP_KERNEL);
|
fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!fwspec)
|
if (!fwspec)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -2450,7 +2454,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
||||||
for (i = 0; i < num_ids; i++)
|
for (i = 0; i < num_ids; i++)
|
||||||
fwspec->ids[fwspec->num_ids + i] = ids[i];
|
fwspec->ids[fwspec->num_ids + i] = ids[i];
|
||||||
|
|
||||||
fwspec->num_ids += num_ids;
|
fwspec->num_ids = new_num;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
||||||
|
|
|
@ -89,9 +89,7 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
|
||||||
|
|
||||||
static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
|
static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
return dev_iommu_priv_get(dev);
|
||||||
|
|
||||||
return fwspec ? fwspec->iommu_priv : NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define TLB_LOOP_TIMEOUT 100 /* 100us */
|
#define TLB_LOOP_TIMEOUT 100 /* 100us */
|
||||||
|
@ -727,14 +725,13 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
|
||||||
static int ipmmu_init_platform_device(struct device *dev,
|
static int ipmmu_init_platform_device(struct device *dev,
|
||||||
struct of_phandle_args *args)
|
struct of_phandle_args *args)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
||||||
struct platform_device *ipmmu_pdev;
|
struct platform_device *ipmmu_pdev;
|
||||||
|
|
||||||
ipmmu_pdev = of_find_device_by_node(args->np);
|
ipmmu_pdev = of_find_device_by_node(args->np);
|
||||||
if (!ipmmu_pdev)
|
if (!ipmmu_pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
|
dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -358,8 +358,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
|
||||||
static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
|
||||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||||
struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
|
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -378,7 +378,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
||||||
static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
|
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return;
|
return;
|
||||||
|
@ -450,7 +450,7 @@ static int mtk_iommu_add_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
||||||
return -ENODEV; /* Not a iommu client device */
|
return -ENODEV; /* Not a iommu client device */
|
||||||
|
|
||||||
data = fwspec->iommu_priv;
|
data = dev_iommu_priv_get(dev);
|
||||||
iommu_device_link(&data->iommu, dev);
|
iommu_device_link(&data->iommu, dev);
|
||||||
|
|
||||||
group = iommu_group_get_for_dev(dev);
|
group = iommu_group_get_for_dev(dev);
|
||||||
|
@ -469,7 +469,7 @@ static void mtk_iommu_remove_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
data = fwspec->iommu_priv;
|
data = dev_iommu_priv_get(dev);
|
||||||
iommu_device_unlink(&data->iommu, dev);
|
iommu_device_unlink(&data->iommu, dev);
|
||||||
|
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
|
@ -496,7 +496,6 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
|
||||||
|
|
||||||
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
||||||
struct platform_device *m4updev;
|
struct platform_device *m4updev;
|
||||||
|
|
||||||
if (args->args_count != 1) {
|
if (args->args_count != 1) {
|
||||||
|
@ -505,13 +504,13 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fwspec->iommu_priv) {
|
if (!dev_iommu_priv_get(dev)) {
|
||||||
/* Get the m4u device */
|
/* Get the m4u device */
|
||||||
m4updev = of_find_device_by_node(args->np);
|
m4updev = of_find_device_by_node(args->np);
|
||||||
if (WARN_ON(!m4updev))
|
if (WARN_ON(!m4updev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fwspec->iommu_priv = platform_get_drvdata(m4updev);
|
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
|
||||||
}
|
}
|
||||||
|
|
||||||
return iommu_fwspec_add_ids(dev, args->args, 1);
|
return iommu_fwspec_add_ids(dev, args->args, 1);
|
||||||
|
|
|
@ -263,8 +263,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
|
||||||
static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
|
||||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||||
struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
|
@ -286,7 +286,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
||||||
static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
|
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return;
|
return;
|
||||||
|
@ -387,20 +387,20 @@ static int mtk_iommu_create_mapping(struct device *dev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fwspec->iommu_priv) {
|
if (!dev_iommu_priv_get(dev)) {
|
||||||
/* Get the m4u device */
|
/* Get the m4u device */
|
||||||
m4updev = of_find_device_by_node(args->np);
|
m4updev = of_find_device_by_node(args->np);
|
||||||
if (WARN_ON(!m4updev))
|
if (WARN_ON(!m4updev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fwspec->iommu_priv = platform_get_drvdata(m4updev);
|
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = iommu_fwspec_add_ids(dev, args->args, 1);
|
ret = iommu_fwspec_add_ids(dev, args->args, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
data = fwspec->iommu_priv;
|
data = dev_iommu_priv_get(dev);
|
||||||
m4udev = data->dev;
|
m4udev = data->dev;
|
||||||
mtk_mapping = m4udev->archdata.iommu;
|
mtk_mapping = m4udev->archdata.iommu;
|
||||||
if (!mtk_mapping) {
|
if (!mtk_mapping) {
|
||||||
|
@ -459,7 +459,7 @@ static int mtk_iommu_add_device(struct device *dev)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
data = fwspec->iommu_priv;
|
data = dev_iommu_priv_get(dev);
|
||||||
mtk_mapping = data->dev->archdata.iommu;
|
mtk_mapping = data->dev->archdata.iommu;
|
||||||
err = arm_iommu_attach_device(dev, mtk_mapping);
|
err = arm_iommu_attach_device(dev, mtk_mapping);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -478,7 +478,7 @@ static void mtk_iommu_remove_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
data = fwspec->iommu_priv;
|
data = dev_iommu_priv_get(dev);
|
||||||
iommu_device_unlink(&data->iommu, dev);
|
iommu_device_unlink(&data->iommu, dev);
|
||||||
|
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
|
|
|
@ -167,7 +167,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
|
||||||
{
|
{
|
||||||
u32 l, pa;
|
u32 l, pa;
|
||||||
|
|
||||||
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
|
if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pa = virt_to_phys(obj->iopgd);
|
pa = virt_to_phys(obj->iopgd);
|
||||||
|
@ -434,7 +434,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
|
||||||
bytes = iopgsz_to_bytes(cr.cam & 3);
|
bytes = iopgsz_to_bytes(cr.cam & 3);
|
||||||
|
|
||||||
if ((start <= da) && (da < start + bytes)) {
|
if ((start <= da) && (da < start + bytes)) {
|
||||||
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
|
dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
|
||||||
__func__, start, da, bytes);
|
__func__, start, da, bytes);
|
||||||
iotlb_load_cr(obj, &cr);
|
iotlb_load_cr(obj, &cr);
|
||||||
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
|
||||||
|
@ -1352,11 +1352,11 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
||||||
|
|
||||||
omap_pgsz = bytes_to_iopgsz(bytes);
|
omap_pgsz = bytes_to_iopgsz(bytes);
|
||||||
if (omap_pgsz < 0) {
|
if (omap_pgsz < 0) {
|
||||||
dev_err(dev, "invalid size to map: %d\n", bytes);
|
dev_err(dev, "invalid size to map: %zu\n", bytes);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
|
dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
|
||||||
|
|
||||||
iotlb_init_entry(&e, da, pa, omap_pgsz);
|
iotlb_init_entry(&e, da, pa, omap_pgsz);
|
||||||
|
|
||||||
|
@ -1393,7 +1393,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
|
dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
|
||||||
|
|
||||||
iommu = omap_domain->iommus;
|
iommu = omap_domain->iommus;
|
||||||
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
|
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
|
||||||
|
|
|
@ -63,7 +63,8 @@
|
||||||
*
|
*
|
||||||
* va to pa translation
|
* va to pa translation
|
||||||
*/
|
*/
|
||||||
static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
|
static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va,
|
||||||
|
dma_addr_t mask)
|
||||||
{
|
{
|
||||||
return (d & mask) | (va & (~mask));
|
return (d & mask) | (va & (~mask));
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct qcom_iommu_dev {
|
||||||
void __iomem *local_base;
|
void __iomem *local_base;
|
||||||
u32 sec_id;
|
u32 sec_id;
|
||||||
u8 num_ctxs;
|
u8 num_ctxs;
|
||||||
struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */
|
struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qcom_iommu_ctx {
|
struct qcom_iommu_ctx {
|
||||||
|
@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
|
||||||
|
|
||||||
static const struct iommu_ops qcom_iommu_ops;
|
static const struct iommu_ops qcom_iommu_ops;
|
||||||
|
|
||||||
static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec)
|
static struct qcom_iommu_dev * to_iommu(struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
|
||||||
if (!fwspec || fwspec->ops != &qcom_iommu_ops)
|
if (!fwspec || fwspec->ops != &qcom_iommu_ops)
|
||||||
return NULL;
|
return NULL;
|
||||||
return fwspec->iommu_priv;
|
|
||||||
|
return dev_iommu_priv_get(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
|
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
|
||||||
{
|
{
|
||||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
|
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||||
if (!qcom_iommu)
|
if (!qcom_iommu)
|
||||||
return NULL;
|
return NULL;
|
||||||
return qcom_iommu->ctxs[asid - 1];
|
return qcom_iommu->ctxs[asid - 1];
|
||||||
|
@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
|
||||||
|
|
||||||
static void qcom_iommu_tlb_sync(void *cookie)
|
static void qcom_iommu_tlb_sync(void *cookie)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = cookie;
|
struct iommu_fwspec *fwspec;
|
||||||
|
struct device *dev = cookie;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
|
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||||
unsigned int val, ret;
|
unsigned int val, ret;
|
||||||
|
|
||||||
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
|
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
|
||||||
|
@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie)
|
||||||
|
|
||||||
static void qcom_iommu_tlb_inv_context(void *cookie)
|
static void qcom_iommu_tlb_inv_context(void *cookie)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = cookie;
|
struct device *dev = cookie;
|
||||||
|
struct iommu_fwspec *fwspec;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
|
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||||
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
|
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
|
||||||
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||||
size_t granule, bool leaf, void *cookie)
|
size_t granule, bool leaf, void *cookie)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = cookie;
|
struct device *dev = cookie;
|
||||||
|
struct iommu_fwspec *fwspec;
|
||||||
unsigned i, reg;
|
unsigned i, reg;
|
||||||
|
|
||||||
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||||
|
|
||||||
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
|
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||||
size_t s = size;
|
size_t s = size;
|
||||||
|
|
||||||
iova = (iova >> 12) << 12;
|
iova = (iova >> 12) << 12;
|
||||||
|
@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
|
||||||
|
|
||||||
static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||||
struct qcom_iommu_dev *qcom_iommu,
|
struct qcom_iommu_dev *qcom_iommu,
|
||||||
struct iommu_fwspec *fwspec)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
||||||
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct io_pgtable_ops *pgtbl_ops;
|
struct io_pgtable_ops *pgtbl_ops;
|
||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||||
};
|
};
|
||||||
|
|
||||||
qcom_domain->iommu = qcom_iommu;
|
qcom_domain->iommu = qcom_iommu;
|
||||||
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec);
|
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
|
||||||
if (!pgtbl_ops) {
|
if (!pgtbl_ops) {
|
||||||
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
|
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||||
domain->geometry.force_aperture = true;
|
domain->geometry.force_aperture = true;
|
||||||
|
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
|
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||||
|
|
||||||
if (!ctx->secure_init) {
|
if (!ctx->secure_init) {
|
||||||
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
|
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
|
||||||
|
@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
|
||||||
|
|
||||||
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
|
|
||||||
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
|
||||||
|
|
||||||
/* Ensure that the domain is finalized */
|
/* Ensure that the domain is finalized */
|
||||||
pm_runtime_get_sync(qcom_iommu->dev);
|
pm_runtime_get_sync(qcom_iommu->dev);
|
||||||
ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec);
|
ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
|
||||||
pm_runtime_put_sync(qcom_iommu->dev);
|
pm_runtime_put_sync(qcom_iommu->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
|
||||||
|
|
||||||
static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
||||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
|
|
||||||
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
||||||
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
|
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (WARN_ON(!qcom_domain->iommu))
|
if (WARN_ON(!qcom_domain->iommu))
|
||||||
|
@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
|
||||||
|
|
||||||
pm_runtime_get_sync(qcom_iommu->dev);
|
pm_runtime_get_sync(qcom_iommu->dev);
|
||||||
for (i = 0; i < fwspec->num_ids; i++) {
|
for (i = 0; i < fwspec->num_ids; i++) {
|
||||||
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
|
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||||
|
|
||||||
/* Disable the context bank: */
|
/* Disable the context bank: */
|
||||||
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
|
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
|
||||||
|
@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap)
|
||||||
|
|
||||||
static int qcom_iommu_add_device(struct device *dev)
|
static int qcom_iommu_add_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
|
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
|
||||||
|
@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev)
|
||||||
|
|
||||||
static void qcom_iommu_remove_device(struct device *dev)
|
static void qcom_iommu_remove_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
|
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||||
|
|
||||||
if (!qcom_iommu)
|
if (!qcom_iommu)
|
||||||
return;
|
return;
|
||||||
|
@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev)
|
||||||
|
|
||||||
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
{
|
{
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
||||||
struct qcom_iommu_dev *qcom_iommu;
|
struct qcom_iommu_dev *qcom_iommu;
|
||||||
struct platform_device *iommu_pdev;
|
struct platform_device *iommu_pdev;
|
||||||
unsigned asid = args->args[0];
|
unsigned asid = args->args[0];
|
||||||
|
@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||||
WARN_ON(asid > qcom_iommu->num_ctxs))
|
WARN_ON(asid > qcom_iommu->num_ctxs))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!fwspec->iommu_priv) {
|
if (!dev_iommu_priv_get(dev)) {
|
||||||
fwspec->iommu_priv = qcom_iommu;
|
dev_iommu_priv_set(dev, qcom_iommu);
|
||||||
} else {
|
} else {
|
||||||
/* make sure devices iommus dt node isn't referring to
|
/* make sure devices iommus dt node isn't referring to
|
||||||
* multiple different iommu devices. Multiple context
|
* multiple different iommu devices. Multiple context
|
||||||
* banks are ok, but multiple devices are not:
|
* banks are ok, but multiple devices are not:
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(qcom_iommu != fwspec->iommu_priv))
|
if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -247,7 +247,7 @@ static int gart_iommu_add_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
|
|
||||||
if (!dev->iommu_fwspec)
|
if (!dev_iommu_fwspec_get(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
group = iommu_group_get_for_dev(dev);
|
group = iommu_group_get_for_dev(dev);
|
||||||
|
|
|
@ -466,7 +466,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
|
||||||
struct virtio_iommu_req_probe *probe;
|
struct virtio_iommu_req_probe *probe;
|
||||||
struct virtio_iommu_probe_property *prop;
|
struct virtio_iommu_probe_property *prop;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct viommu_endpoint *vdev = fwspec->iommu_priv;
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
||||||
|
|
||||||
if (!fwspec->num_ids)
|
if (!fwspec->num_ids)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -607,24 +607,36 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
|
||||||
return &vdomain->domain;
|
return &vdomain->domain;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int viommu_domain_finalise(struct viommu_dev *viommu,
|
static int viommu_domain_finalise(struct viommu_endpoint *vdev,
|
||||||
struct iommu_domain *domain)
|
struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned long viommu_page_size;
|
||||||
|
struct viommu_dev *viommu = vdev->viommu;
|
||||||
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
||||||
|
|
||||||
vdomain->viommu = viommu;
|
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
|
||||||
vdomain->map_flags = viommu->map_flags;
|
if (viommu_page_size > PAGE_SIZE) {
|
||||||
|
dev_err(vdev->dev,
|
||||||
|
"granule 0x%lx larger than system page size 0x%lx\n",
|
||||||
|
viommu_page_size, PAGE_SIZE);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
|
||||||
|
viommu->last_domain, GFP_KERNEL);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
vdomain->id = (unsigned int)ret;
|
||||||
|
|
||||||
domain->pgsize_bitmap = viommu->pgsize_bitmap;
|
domain->pgsize_bitmap = viommu->pgsize_bitmap;
|
||||||
domain->geometry = viommu->geometry;
|
domain->geometry = viommu->geometry;
|
||||||
|
|
||||||
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
|
vdomain->map_flags = viommu->map_flags;
|
||||||
viommu->last_domain, GFP_KERNEL);
|
vdomain->viommu = viommu;
|
||||||
if (ret >= 0)
|
|
||||||
vdomain->id = (unsigned int)ret;
|
|
||||||
|
|
||||||
return ret > 0 ? 0 : ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void viommu_domain_free(struct iommu_domain *domain)
|
static void viommu_domain_free(struct iommu_domain *domain)
|
||||||
|
@ -648,7 +660,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct virtio_iommu_req_attach req;
|
struct virtio_iommu_req_attach req;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||||
struct viommu_endpoint *vdev = fwspec->iommu_priv;
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
||||||
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
||||||
|
|
||||||
mutex_lock(&vdomain->mutex);
|
mutex_lock(&vdomain->mutex);
|
||||||
|
@ -657,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
* Properly initialize the domain now that we know which viommu
|
* Properly initialize the domain now that we know which viommu
|
||||||
* owns it.
|
* owns it.
|
||||||
*/
|
*/
|
||||||
ret = viommu_domain_finalise(vdev->viommu, domain);
|
ret = viommu_domain_finalise(vdev, domain);
|
||||||
} else if (vdomain->viommu != vdev->viommu) {
|
} else if (vdomain->viommu != vdev->viommu) {
|
||||||
dev_err(dev, "cannot attach to foreign vIOMMU\n");
|
dev_err(dev, "cannot attach to foreign vIOMMU\n");
|
||||||
ret = -EXDEV;
|
ret = -EXDEV;
|
||||||
|
@ -807,8 +819,7 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
|
||||||
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
|
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
|
||||||
{
|
{
|
||||||
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
|
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
||||||
struct viommu_endpoint *vdev = fwspec->iommu_priv;
|
|
||||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||||
|
|
||||||
list_for_each_entry(entry, &vdev->resv_regions, list) {
|
list_for_each_entry(entry, &vdev->resv_regions, list) {
|
||||||
|
@ -876,7 +887,7 @@ static int viommu_add_device(struct device *dev)
|
||||||
vdev->dev = dev;
|
vdev->dev = dev;
|
||||||
vdev->viommu = viommu;
|
vdev->viommu = viommu;
|
||||||
INIT_LIST_HEAD(&vdev->resv_regions);
|
INIT_LIST_HEAD(&vdev->resv_regions);
|
||||||
fwspec->iommu_priv = vdev;
|
dev_iommu_priv_set(dev, vdev);
|
||||||
|
|
||||||
if (viommu->probe_size) {
|
if (viommu->probe_size) {
|
||||||
/* Get additional information for this endpoint */
|
/* Get additional information for this endpoint */
|
||||||
|
@ -920,7 +931,7 @@ static void viommu_remove_device(struct device *dev)
|
||||||
if (!fwspec || fwspec->ops != &viommu_ops)
|
if (!fwspec || fwspec->ops != &viommu_ops)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vdev = fwspec->iommu_priv;
|
vdev = dev_iommu_priv_get(dev);
|
||||||
|
|
||||||
iommu_group_remove_device(dev);
|
iommu_group_remove_device(dev);
|
||||||
iommu_device_unlink(&vdev->viommu->iommu, dev);
|
iommu_device_unlink(&vdev->viommu->iommu, dev);
|
||||||
|
@ -1082,7 +1093,6 @@ static int viommu_probe(struct virtio_device *vdev)
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
if (pci_bus_type.iommu_ops != &viommu_ops) {
|
if (pci_bus_type.iommu_ops != &viommu_ops) {
|
||||||
pci_request_acs();
|
|
||||||
ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
|
ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unregister;
|
goto err_unregister;
|
||||||
|
|
|
@ -366,6 +366,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_enable_pasid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_disable_pasid - Disable the PASID capability
|
* pci_disable_pasid - Disable the PASID capability
|
||||||
|
@ -390,6 +391,7 @@ void pci_disable_pasid(struct pci_dev *pdev)
|
||||||
|
|
||||||
pdev->pasid_enabled = 0;
|
pdev->pasid_enabled = 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_disable_pasid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_restore_pasid_state - Restore PASID capabilities
|
* pci_restore_pasid_state - Restore PASID capabilities
|
||||||
|
@ -441,6 +443,7 @@ int pci_pasid_features(struct pci_dev *pdev)
|
||||||
|
|
||||||
return supported;
|
return supported;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_pasid_features);
|
||||||
|
|
||||||
#define PASID_NUMBER_SHIFT 8
|
#define PASID_NUMBER_SHIFT 8
|
||||||
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
|
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
|
||||||
|
@ -469,4 +472,5 @@ int pci_max_pasids(struct pci_dev *pdev)
|
||||||
|
|
||||||
return (1 << supported);
|
return (1 << supported);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_max_pasids);
|
||||||
#endif /* CONFIG_PCI_PASID */
|
#endif /* CONFIG_PCI_PASID */
|
||||||
|
|
|
@ -42,9 +42,8 @@ struct device_node;
|
||||||
struct fwnode_handle;
|
struct fwnode_handle;
|
||||||
struct iommu_ops;
|
struct iommu_ops;
|
||||||
struct iommu_group;
|
struct iommu_group;
|
||||||
struct iommu_fwspec;
|
|
||||||
struct dev_pin_info;
|
struct dev_pin_info;
|
||||||
struct iommu_param;
|
struct dev_iommu;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct subsys_interface - interfaces to device functions
|
* struct subsys_interface - interfaces to device functions
|
||||||
|
@ -513,8 +512,7 @@ struct dev_links_info {
|
||||||
* gone away. This should be set by the allocator of the
|
* gone away. This should be set by the allocator of the
|
||||||
* device (i.e. the bus driver that discovered the device).
|
* device (i.e. the bus driver that discovered the device).
|
||||||
* @iommu_group: IOMMU group the device belongs to.
|
* @iommu_group: IOMMU group the device belongs to.
|
||||||
* @iommu_fwspec: IOMMU-specific properties supplied by firmware.
|
* @iommu: Per device generic IOMMU runtime data
|
||||||
* @iommu_param: Per device generic IOMMU runtime data
|
|
||||||
*
|
*
|
||||||
* @offline_disabled: If set, the device is permanently online.
|
* @offline_disabled: If set, the device is permanently online.
|
||||||
* @offline: Set after successful invocation of bus type's .offline().
|
* @offline: Set after successful invocation of bus type's .offline().
|
||||||
|
@ -613,8 +611,7 @@ struct device {
|
||||||
|
|
||||||
void (*release)(struct device *dev);
|
void (*release)(struct device *dev);
|
||||||
struct iommu_group *iommu_group;
|
struct iommu_group *iommu_group;
|
||||||
struct iommu_fwspec *iommu_fwspec;
|
struct dev_iommu *iommu;
|
||||||
struct iommu_param *iommu_param;
|
|
||||||
|
|
||||||
bool offline_disabled:1;
|
bool offline_disabled:1;
|
||||||
bool offline:1;
|
bool offline:1;
|
||||||
|
|
|
@ -365,17 +365,20 @@ struct iommu_fault_param {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iommu_param - collection of per-device IOMMU data
|
* struct dev_iommu - Collection of per-device IOMMU data
|
||||||
*
|
*
|
||||||
* @fault_param: IOMMU detected device fault reporting data
|
* @fault_param: IOMMU detected device fault reporting data
|
||||||
|
* @fwspec: IOMMU fwspec data
|
||||||
|
* @priv: IOMMU Driver private data
|
||||||
*
|
*
|
||||||
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
|
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
|
||||||
* struct iommu_group *iommu_group;
|
* struct iommu_group *iommu_group;
|
||||||
* struct iommu_fwspec *iommu_fwspec;
|
|
||||||
*/
|
*/
|
||||||
struct iommu_param {
|
struct dev_iommu {
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct iommu_fault_param *fault_param;
|
struct iommu_fault_param *fault_param;
|
||||||
|
struct iommu_fwspec *fwspec;
|
||||||
|
void *priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
int iommu_device_register(struct iommu_device *iommu);
|
int iommu_device_register(struct iommu_device *iommu);
|
||||||
|
@ -588,11 +591,10 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
|
||||||
struct iommu_fwspec {
|
struct iommu_fwspec {
|
||||||
const struct iommu_ops *ops;
|
const struct iommu_ops *ops;
|
||||||
struct fwnode_handle *iommu_fwnode;
|
struct fwnode_handle *iommu_fwnode;
|
||||||
void *iommu_priv;
|
|
||||||
u32 flags;
|
u32 flags;
|
||||||
u32 num_pasid_bits;
|
u32 num_pasid_bits;
|
||||||
unsigned int num_ids;
|
unsigned int num_ids;
|
||||||
u32 ids[1];
|
u32 ids[];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ATS is supported */
|
/* ATS is supported */
|
||||||
|
@ -614,13 +616,26 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
|
||||||
|
|
||||||
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
||||||
{
|
{
|
||||||
return dev->iommu_fwspec;
|
if (dev->iommu)
|
||||||
|
return dev->iommu->fwspec;
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dev_iommu_fwspec_set(struct device *dev,
|
static inline void dev_iommu_fwspec_set(struct device *dev,
|
||||||
struct iommu_fwspec *fwspec)
|
struct iommu_fwspec *fwspec)
|
||||||
{
|
{
|
||||||
dev->iommu_fwspec = fwspec;
|
dev->iommu->fwspec = fwspec;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *dev_iommu_priv_get(struct device *dev)
|
||||||
|
{
|
||||||
|
return dev->iommu->priv;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dev_iommu_priv_set(struct device *dev, void *priv)
|
||||||
|
{
|
||||||
|
dev->iommu->priv = priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iommu_probe_device(struct device *dev);
|
int iommu_probe_device(struct device *dev);
|
||||||
|
@ -1073,6 +1088,10 @@ static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif /* CONFIG_IOMMU_API */
|
#endif /* CONFIG_IOMMU_API */
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_DEBUGFS
|
#ifdef CONFIG_IOMMU_DEBUGFS
|
||||||
|
|
|
@ -18,24 +18,24 @@
|
||||||
#define VIRTIO_IOMMU_F_MMIO 5
|
#define VIRTIO_IOMMU_F_MMIO 5
|
||||||
|
|
||||||
struct virtio_iommu_range_64 {
|
struct virtio_iommu_range_64 {
|
||||||
__le64 start;
|
__u64 start;
|
||||||
__le64 end;
|
__u64 end;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_iommu_range_32 {
|
struct virtio_iommu_range_32 {
|
||||||
__le32 start;
|
__u32 start;
|
||||||
__le32 end;
|
__u32 end;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_iommu_config {
|
struct virtio_iommu_config {
|
||||||
/* Supported page sizes */
|
/* Supported page sizes */
|
||||||
__le64 page_size_mask;
|
__u64 page_size_mask;
|
||||||
/* Supported IOVA range */
|
/* Supported IOVA range */
|
||||||
struct virtio_iommu_range_64 input_range;
|
struct virtio_iommu_range_64 input_range;
|
||||||
/* Max domain ID size */
|
/* Max domain ID size */
|
||||||
struct virtio_iommu_range_32 domain_range;
|
struct virtio_iommu_range_32 domain_range;
|
||||||
/* Probe buffer size */
|
/* Probe buffer size */
|
||||||
__le32 probe_size;
|
__u32 probe_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Request types */
|
/* Request types */
|
||||||
|
|
Loading…
Reference in New Issue