Arm SMMU updates for 5.14
- SMMUv3: * Support stalling faults for platform devices * Decrease defaults sizes for the event and PRI queues - SMMUv2: * Support for a new '->probe_finalize' hook, needed by Nvidia * Even more Qualcomm compatible strings * Avoid Adreno TTBR1 quirk for DB820C platform - Misc: * Trivial cleanups/refactoring -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmDJ1GkQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNPIRCAC5ia+H1CHxjWRDmn7A++6/mNMu3hRKJCFj WqKkz6Af3BcYEbuZeflHEp7BgHUo9r0FdhcMyz7pPRfBDpqP+eRmTxfDtJFThkn4 uwQmL4E8CbYnZUOQC8uvj87JG/7AgtC4Yu1XNpux2i7PYDKIW+BsW1Sla62Sa4aQ LDuowqWZwgAkLyRSfEa4xmmlD8FN9tSrPkZBM3G1t9ixyl2LFBRhrePpirSMe8X8 xolCg209fYq1G5DJlYmKwsuNU+eEAVc8HQJzucnOJlqSbHhxezWbKTiyp3H3FSoV 4B2+CQAPGZCCti86yT0F2Ha+p4LWoKvWb12rb7eFTrJGEA1bVDmi =PNoV -----END PGP SIGNATURE----- Merge tag 'arm-smmu-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu Arm SMMU updates for 5.14 - SMMUv3: * Support stalling faults for platform devices * Decrease defaults sizes for the event and PRI queues - SMMUv2: * Support for a new '->probe_finalize' hook, needed by Nvidia * Even more Qualcomm compatible strings * Avoid Adreno TTBR1 quirk for DB820C platform - Misc: * Trivial cleanups/refactoring
This commit is contained in:
commit
1c461ef9c4
|
@ -92,6 +92,24 @@ Optional properties:
|
|||
tagging DMA transactions with an address space identifier. By default,
|
||||
this is 0, which means that the device only has one address space.
|
||||
|
||||
- dma-can-stall: When present, the master can wait for a transaction to
|
||||
complete for an indefinite amount of time. Upon translation fault some
|
||||
IOMMUs, instead of aborting the translation immediately, may first
|
||||
notify the driver and keep the transaction in flight. This allows the OS
|
||||
to inspect the fault and, for example, make physical pages resident
|
||||
before updating the mappings and completing the transaction. Such IOMMU
|
||||
accepts a limited number of simultaneous stalled transactions before
|
||||
having to either put back-pressure on the master, or abort new faulting
|
||||
transactions.
|
||||
|
||||
Firmware has to opt-in stalling, because most buses and masters don't
|
||||
support it. In particular it isn't compatible with PCI, where
|
||||
transactions have to complete before a time limit. More generally it
|
||||
won't work in systems and masters that haven't been designed for
|
||||
stalling. For example the OS, in order to handle a stalled transaction,
|
||||
may attempt to retrieve pages from secondary storage in a stalled
|
||||
domain, leading to a deadlock.
|
||||
|
||||
|
||||
Notes:
|
||||
======
|
||||
|
|
|
@ -1136,7 +1136,7 @@
|
|||
};
|
||||
|
||||
adreno_smmu: iommu@b40000 {
|
||||
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
|
||||
compatible = "qcom,msm8996-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
|
||||
reg = <0x00b40000 0x10000>;
|
||||
|
||||
#global-interrupts = <1>;
|
||||
|
|
|
@ -968,13 +968,15 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
|
|||
static void iort_named_component_init(struct device *dev,
|
||||
struct acpi_iort_node *node)
|
||||
{
|
||||
struct property_entry props[2] = {};
|
||||
struct property_entry props[3] = {};
|
||||
struct acpi_iort_named_component *nc;
|
||||
|
||||
nc = (struct acpi_iort_named_component *)node->node_data;
|
||||
props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
|
||||
FIELD_GET(ACPI_IORT_NC_PASID_BITS,
|
||||
nc->node_flags));
|
||||
if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
|
||||
props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
|
||||
|
||||
if (device_add_properties(dev, props))
|
||||
dev_warn(dev, "Could not add device properties\n");
|
||||
|
|
|
@ -435,9 +435,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
|
||||
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
|
||||
{
|
||||
return false;
|
||||
/* We're not keeping track of SIDs in fault events */
|
||||
if (master->num_streams != 1)
|
||||
return false;
|
||||
|
||||
return master->stall_enabled;
|
||||
}
|
||||
|
||||
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
|
||||
|
@ -445,8 +449,8 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
|
|||
if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
|
||||
return false;
|
||||
|
||||
/* SSID and IOPF support are mandatory for the moment */
|
||||
return master->ssid_bits && arm_smmu_iopf_supported(master);
|
||||
/* SSID support is mandatory for the moment */
|
||||
return master->ssid_bits;
|
||||
}
|
||||
|
||||
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
|
||||
|
@ -459,13 +463,55 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
|
|||
return enabled;
|
||||
}
|
||||
|
||||
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
|
||||
{
|
||||
int ret;
|
||||
struct device *dev = master->dev;
|
||||
|
||||
/*
|
||||
* Drivers for devices supporting PRI or stall should enable IOPF first.
|
||||
* Others have device-specific fault handlers and don't need IOPF.
|
||||
*/
|
||||
if (!arm_smmu_master_iopf_supported(master))
|
||||
return 0;
|
||||
|
||||
if (!master->iopf_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
|
||||
if (ret) {
|
||||
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
|
||||
{
|
||||
struct device *dev = master->dev;
|
||||
|
||||
if (!master->iopf_enabled)
|
||||
return;
|
||||
|
||||
iommu_unregister_device_fault_handler(dev);
|
||||
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
|
||||
}
|
||||
|
||||
int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
master->sva_enabled = true;
|
||||
ret = arm_smmu_master_sva_enable_iopf(master);
|
||||
if (!ret)
|
||||
master->sva_enabled = true;
|
||||
mutex_unlock(&sva_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
|
||||
|
@ -476,6 +522,7 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
|
|||
mutex_unlock(&sva_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
arm_smmu_master_sva_disable_iopf(master);
|
||||
master->sva_enabled = false;
|
||||
mutex_unlock(&sva_lock);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/amba/bus.h>
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
#include "../../iommu-sva-lib.h"
|
||||
|
||||
static bool disable_bypass = true;
|
||||
module_param(disable_bypass, bool, 0444);
|
||||
|
@ -313,6 +314,11 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
|
|||
}
|
||||
cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
|
||||
break;
|
||||
case CMDQ_OP_RESUME:
|
||||
cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid);
|
||||
cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp);
|
||||
cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag);
|
||||
break;
|
||||
case CMDQ_OP_CMD_SYNC:
|
||||
if (ent->sync.msiaddr) {
|
||||
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
|
||||
|
@ -352,7 +358,7 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
|
|||
|
||||
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
|
||||
{
|
||||
static const char *cerror_str[] = {
|
||||
static const char * const cerror_str[] = {
|
||||
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
|
||||
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
|
||||
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
|
||||
|
@ -876,6 +882,44 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
|
|||
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
|
||||
}
|
||||
|
||||
static int arm_smmu_page_response(struct device *dev,
|
||||
struct iommu_fault_event *unused,
|
||||
struct iommu_page_response *resp)
|
||||
{
|
||||
struct arm_smmu_cmdq_ent cmd = {0};
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
int sid = master->streams[0].id;
|
||||
|
||||
if (master->stall_enabled) {
|
||||
cmd.opcode = CMDQ_OP_RESUME;
|
||||
cmd.resume.sid = sid;
|
||||
cmd.resume.stag = resp->grpid;
|
||||
switch (resp->code) {
|
||||
case IOMMU_PAGE_RESP_INVALID:
|
||||
case IOMMU_PAGE_RESP_FAILURE:
|
||||
cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
|
||||
break;
|
||||
case IOMMU_PAGE_RESP_SUCCESS:
|
||||
cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
|
||||
/*
|
||||
* Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP.
|
||||
* RESUME consumption guarantees that the stalled transaction will be
|
||||
* terminated... at some point in the future. PRI_RESP is fire and
|
||||
* forget.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Context descriptor manipulation functions */
|
||||
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
|
||||
{
|
||||
|
@ -986,7 +1030,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
|
|||
u64 val;
|
||||
bool cd_live;
|
||||
__le64 *cdptr;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
|
||||
if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
|
||||
return -E2BIG;
|
||||
|
@ -1031,8 +1074,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
|
|||
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
|
||||
CTXDESC_CD_0_V;
|
||||
|
||||
/* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
|
||||
if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
|
||||
if (smmu_domain->stall_enabled)
|
||||
val |= CTXDESC_CD_0_S;
|
||||
}
|
||||
|
||||
|
@ -1276,7 +1318,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
|
|||
FIELD_PREP(STRTAB_STE_1_STRW, strw));
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
|
||||
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
|
||||
!master->stall_enabled)
|
||||
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
|
||||
|
||||
val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
|
||||
|
@ -1353,7 +1395,6 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__maybe_unused
|
||||
static struct arm_smmu_master *
|
||||
arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
|
||||
{
|
||||
|
@ -1377,18 +1418,118 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
|
|||
}
|
||||
|
||||
/* IRQ and event handlers */
|
||||
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
|
||||
{
|
||||
int ret;
|
||||
u32 reason;
|
||||
u32 perm = 0;
|
||||
struct arm_smmu_master *master;
|
||||
bool ssid_valid = evt[0] & EVTQ_0_SSV;
|
||||
u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
|
||||
struct iommu_fault_event fault_evt = { };
|
||||
struct iommu_fault *flt = &fault_evt.fault;
|
||||
|
||||
switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
|
||||
case EVT_ID_TRANSLATION_FAULT:
|
||||
reason = IOMMU_FAULT_REASON_PTE_FETCH;
|
||||
break;
|
||||
case EVT_ID_ADDR_SIZE_FAULT:
|
||||
reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
|
||||
break;
|
||||
case EVT_ID_ACCESS_FAULT:
|
||||
reason = IOMMU_FAULT_REASON_ACCESS;
|
||||
break;
|
||||
case EVT_ID_PERMISSION_FAULT:
|
||||
reason = IOMMU_FAULT_REASON_PERMISSION;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Stage-2 is always pinned at the moment */
|
||||
if (evt[1] & EVTQ_1_S2)
|
||||
return -EFAULT;
|
||||
|
||||
if (evt[1] & EVTQ_1_RnW)
|
||||
perm |= IOMMU_FAULT_PERM_READ;
|
||||
else
|
||||
perm |= IOMMU_FAULT_PERM_WRITE;
|
||||
|
||||
if (evt[1] & EVTQ_1_InD)
|
||||
perm |= IOMMU_FAULT_PERM_EXEC;
|
||||
|
||||
if (evt[1] & EVTQ_1_PnU)
|
||||
perm |= IOMMU_FAULT_PERM_PRIV;
|
||||
|
||||
if (evt[1] & EVTQ_1_STALL) {
|
||||
flt->type = IOMMU_FAULT_PAGE_REQ;
|
||||
flt->prm = (struct iommu_fault_page_request) {
|
||||
.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
|
||||
.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
|
||||
.perm = perm,
|
||||
.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
|
||||
};
|
||||
|
||||
if (ssid_valid) {
|
||||
flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
|
||||
}
|
||||
} else {
|
||||
flt->type = IOMMU_FAULT_DMA_UNRECOV;
|
||||
flt->event = (struct iommu_fault_unrecoverable) {
|
||||
.reason = reason,
|
||||
.flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
|
||||
.perm = perm,
|
||||
.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
|
||||
};
|
||||
|
||||
if (ssid_valid) {
|
||||
flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
|
||||
flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&smmu->streams_mutex);
|
||||
master = arm_smmu_find_master(smmu, sid);
|
||||
if (!master) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = iommu_report_device_fault(master->dev, &fault_evt);
|
||||
if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
|
||||
/* Nobody cared, abort the access */
|
||||
struct iommu_page_response resp = {
|
||||
.pasid = flt->prm.pasid,
|
||||
.grpid = flt->prm.grpid,
|
||||
.code = IOMMU_PAGE_RESP_FAILURE,
|
||||
};
|
||||
arm_smmu_page_response(master->dev, &fault_evt, &resp);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&smmu->streams_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
|
||||
{
|
||||
int i;
|
||||
int i, ret;
|
||||
struct arm_smmu_device *smmu = dev;
|
||||
struct arm_smmu_queue *q = &smmu->evtq.q;
|
||||
struct arm_smmu_ll_queue *llq = &q->llq;
|
||||
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||
DEFAULT_RATELIMIT_BURST);
|
||||
u64 evt[EVTQ_ENT_DWORDS];
|
||||
|
||||
do {
|
||||
while (!queue_remove_raw(q, evt)) {
|
||||
u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
|
||||
|
||||
ret = arm_smmu_handle_evt(smmu, evt);
|
||||
if (!ret || !__ratelimit(&rs))
|
||||
continue;
|
||||
|
||||
dev_info(smmu->dev, "event 0x%02x received:\n", id);
|
||||
for (i = 0; i < ARRAY_SIZE(evt); ++i)
|
||||
dev_info(smmu->dev, "\t0x%016llx\n",
|
||||
|
@ -1923,6 +2064,8 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
|
|||
|
||||
cfg->s1cdmax = master->ssid_bits;
|
||||
|
||||
smmu_domain->stall_enabled = master->stall_enabled;
|
||||
|
||||
ret = arm_smmu_alloc_cd_tables(smmu_domain);
|
||||
if (ret)
|
||||
goto out_free_asid;
|
||||
|
@ -2270,6 +2413,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|||
smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
|
||||
smmu_domain->stall_enabled != master->stall_enabled) {
|
||||
dev_err(dev, "cannot attach to stall-%s domain\n",
|
||||
smmu_domain->stall_enabled ? "enabled" : "disabled");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
master->domain = smmu_domain;
|
||||
|
@ -2508,6 +2657,11 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
|
|||
master->ssid_bits = min_t(u8, master->ssid_bits,
|
||||
CTXDESC_LINEAR_CDMAX);
|
||||
|
||||
if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
|
||||
device_property_read_bool(dev, "dma-can-stall")) ||
|
||||
smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
|
||||
master->stall_enabled = true;
|
||||
|
||||
return &smmu->iommu;
|
||||
|
||||
err_free_master:
|
||||
|
@ -2525,7 +2679,8 @@ static void arm_smmu_release_device(struct device *dev)
|
|||
return;
|
||||
|
||||
master = dev_iommu_priv_get(dev);
|
||||
WARN_ON(arm_smmu_master_sva_enabled(master));
|
||||
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
|
||||
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
|
||||
arm_smmu_detach_dev(master);
|
||||
arm_smmu_disable_pasid(master);
|
||||
arm_smmu_remove_master(master);
|
||||
|
@ -2595,6 +2750,8 @@ static bool arm_smmu_dev_has_feature(struct device *dev,
|
|||
return false;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return arm_smmu_master_iopf_supported(master);
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_sva_supported(master);
|
||||
default:
|
||||
|
@ -2611,6 +2768,8 @@ static bool arm_smmu_dev_feature_enabled(struct device *dev,
|
|||
return false;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return master->iopf_enabled;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_sva_enabled(master);
|
||||
default:
|
||||
|
@ -2621,6 +2780,8 @@ static bool arm_smmu_dev_feature_enabled(struct device *dev,
|
|||
static int arm_smmu_dev_enable_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!arm_smmu_dev_has_feature(dev, feat))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -2628,8 +2789,11 @@ static int arm_smmu_dev_enable_feature(struct device *dev,
|
|||
return -EBUSY;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
master->iopf_enabled = true;
|
||||
return 0;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev));
|
||||
return arm_smmu_master_enable_sva(master);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2638,12 +2802,19 @@ static int arm_smmu_dev_enable_feature(struct device *dev,
|
|||
static int arm_smmu_dev_disable_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!arm_smmu_dev_feature_enabled(dev, feat))
|
||||
return -EINVAL;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
if (master->sva_enabled)
|
||||
return -EBUSY;
|
||||
master->iopf_enabled = false;
|
||||
return 0;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev));
|
||||
return arm_smmu_master_disable_sva(master);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2673,6 +2844,7 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.sva_bind = arm_smmu_sva_bind,
|
||||
.sva_unbind = arm_smmu_sva_unbind,
|
||||
.sva_get_pasid = arm_smmu_sva_get_pasid,
|
||||
.page_response = arm_smmu_page_response,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
@ -2771,6 +2943,13 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
|
||||
(smmu->features & ARM_SMMU_FEAT_STALLS)) {
|
||||
smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
|
||||
if (!smmu->evtq.iopf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* priq */
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
|
||||
return 0;
|
||||
|
@ -2788,10 +2967,8 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
|
|||
void *strtab = smmu->strtab_cfg.strtab;
|
||||
|
||||
cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
|
||||
if (!cfg->l1_desc) {
|
||||
dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
|
||||
if (!cfg->l1_desc)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < cfg->num_l1_ents; ++i) {
|
||||
arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
|
||||
|
@ -3582,10 +3759,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
bool bypass;
|
||||
|
||||
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
||||
if (!smmu) {
|
||||
dev_err(dev, "failed to allocate arm_smmu_device\n");
|
||||
if (!smmu)
|
||||
return -ENOMEM;
|
||||
}
|
||||
smmu->dev = dev;
|
||||
|
||||
if (dev->of_node) {
|
||||
|
@ -3669,10 +3844,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register iommu\n");
|
||||
return ret;
|
||||
goto err_sysfs_remove;
|
||||
}
|
||||
|
||||
return arm_smmu_set_bus_ops(&arm_smmu_ops);
|
||||
ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
|
||||
if (ret)
|
||||
goto err_unregister_device;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_device:
|
||||
iommu_device_unregister(&smmu->iommu);
|
||||
err_sysfs_remove:
|
||||
iommu_device_sysfs_remove(&smmu->iommu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_device_remove(struct platform_device *pdev)
|
||||
|
@ -3683,6 +3868,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
|
|||
iommu_device_unregister(&smmu->iommu);
|
||||
iommu_device_sysfs_remove(&smmu->iommu);
|
||||
arm_smmu_device_disable(smmu);
|
||||
iopf_queue_free(smmu->evtq.iopf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -184,6 +184,7 @@
|
|||
#else
|
||||
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
|
||||
#endif
|
||||
#define Q_MIN_SZ_SHIFT (PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Stream table.
|
||||
|
@ -354,6 +355,13 @@
|
|||
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
|
||||
#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
|
||||
|
||||
#define CMDQ_RESUME_0_RESP_TERM 0UL
|
||||
#define CMDQ_RESUME_0_RESP_RETRY 1UL
|
||||
#define CMDQ_RESUME_0_RESP_ABORT 2UL
|
||||
#define CMDQ_RESUME_0_RESP GENMASK_ULL(13, 12)
|
||||
#define CMDQ_RESUME_0_SID GENMASK_ULL(63, 32)
|
||||
#define CMDQ_RESUME_1_STAG GENMASK_ULL(15, 0)
|
||||
|
||||
#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
|
||||
#define CMDQ_SYNC_0_CS_NONE 0
|
||||
#define CMDQ_SYNC_0_CS_IRQ 1
|
||||
|
@ -366,14 +374,33 @@
|
|||
/* Event queue */
|
||||
#define EVTQ_ENT_SZ_SHIFT 5
|
||||
#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
|
||||
#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
|
||||
#define EVTQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
|
||||
|
||||
#define EVTQ_0_ID GENMASK_ULL(7, 0)
|
||||
|
||||
#define EVT_ID_TRANSLATION_FAULT 0x10
|
||||
#define EVT_ID_ADDR_SIZE_FAULT 0x11
|
||||
#define EVT_ID_ACCESS_FAULT 0x12
|
||||
#define EVT_ID_PERMISSION_FAULT 0x13
|
||||
|
||||
#define EVTQ_0_SSV (1UL << 11)
|
||||
#define EVTQ_0_SSID GENMASK_ULL(31, 12)
|
||||
#define EVTQ_0_SID GENMASK_ULL(63, 32)
|
||||
#define EVTQ_1_STAG GENMASK_ULL(15, 0)
|
||||
#define EVTQ_1_STALL (1UL << 31)
|
||||
#define EVTQ_1_PnU (1UL << 33)
|
||||
#define EVTQ_1_InD (1UL << 34)
|
||||
#define EVTQ_1_RnW (1UL << 35)
|
||||
#define EVTQ_1_S2 (1UL << 39)
|
||||
#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
|
||||
#define EVTQ_1_TT_READ (1UL << 44)
|
||||
#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
|
||||
#define EVTQ_3_IPA GENMASK_ULL(51, 12)
|
||||
|
||||
/* PRI queue */
|
||||
#define PRIQ_ENT_SZ_SHIFT 4
|
||||
#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
|
||||
#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
|
||||
#define PRIQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
|
||||
|
||||
#define PRIQ_0_SID GENMASK_ULL(31, 0)
|
||||
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
|
||||
|
@ -462,6 +489,13 @@ struct arm_smmu_cmdq_ent {
|
|||
enum pri_resp resp;
|
||||
} pri;
|
||||
|
||||
#define CMDQ_OP_RESUME 0x44
|
||||
struct {
|
||||
u32 sid;
|
||||
u16 stag;
|
||||
u8 resp;
|
||||
} resume;
|
||||
|
||||
#define CMDQ_OP_CMD_SYNC 0x46
|
||||
struct {
|
||||
u64 msiaddr;
|
||||
|
@ -520,6 +554,7 @@ struct arm_smmu_cmdq_batch {
|
|||
|
||||
struct arm_smmu_evtq {
|
||||
struct arm_smmu_queue q;
|
||||
struct iopf_queue *iopf;
|
||||
u32 max_stalls;
|
||||
};
|
||||
|
||||
|
@ -657,7 +692,9 @@ struct arm_smmu_master {
|
|||
struct arm_smmu_stream *streams;
|
||||
unsigned int num_streams;
|
||||
bool ats_enabled;
|
||||
bool stall_enabled;
|
||||
bool sva_enabled;
|
||||
bool iopf_enabled;
|
||||
struct list_head bonds;
|
||||
unsigned int ssid_bits;
|
||||
};
|
||||
|
@ -675,6 +712,7 @@ struct arm_smmu_domain {
|
|||
struct mutex init_mutex; /* Protects smmu pointer */
|
||||
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
bool stall_enabled;
|
||||
atomic_t nr_ats_masters;
|
||||
|
||||
enum arm_smmu_domain_stage stage;
|
||||
|
@ -716,6 +754,7 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
|
|||
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
|
||||
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
|
||||
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
|
||||
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
|
||||
struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void arm_smmu_sva_unbind(struct iommu_sva *handle);
|
||||
|
@ -747,6 +786,11 @@ static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct iommu_sva *
|
||||
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/adreno-smmu-priv.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
|
@ -130,6 +131,16 @@ static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_doma
|
|||
return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
|
||||
}
|
||||
|
||||
static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
|
||||
{
|
||||
const struct device_node *np = smmu->dev->of_node;
|
||||
|
||||
if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
|
||||
{
|
||||
|
@ -144,7 +155,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
|
|||
* be AARCH64 stage 1 but double check because the arm-smmu code assumes
|
||||
* that is the case when the TTBR1 quirk is enabled
|
||||
*/
|
||||
if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
|
||||
if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
|
||||
(smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
|
||||
(smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
|
||||
pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
|
||||
|
||||
|
@ -166,6 +178,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
|
|||
{ .compatible = "qcom,mdss" },
|
||||
{ .compatible = "qcom,sc7180-mdss" },
|
||||
{ .compatible = "qcom,sc7180-mss-pil" },
|
||||
{ .compatible = "qcom,sc7280-mdss" },
|
||||
{ .compatible = "qcom,sc8180x-mdss" },
|
||||
{ .compatible = "qcom,sdm845-mdss" },
|
||||
{ .compatible = "qcom,sdm845-mss-pil" },
|
||||
|
@ -330,24 +343,48 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
|
|||
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
|
||||
{ .compatible = "qcom,msm8998-smmu-v2" },
|
||||
{ .compatible = "qcom,sc7180-smmu-500" },
|
||||
{ .compatible = "qcom,sc7280-smmu-500" },
|
||||
{ .compatible = "qcom,sc8180x-smmu-500" },
|
||||
{ .compatible = "qcom,sdm630-smmu-v2" },
|
||||
{ .compatible = "qcom,sdm845-smmu-500" },
|
||||
{ .compatible = "qcom,sm6125-smmu-500" },
|
||||
{ .compatible = "qcom,sm8150-smmu-500" },
|
||||
{ .compatible = "qcom,sm8250-smmu-500" },
|
||||
{ .compatible = "qcom,sm8350-smmu-500" },
|
||||
{ }
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static struct acpi_platform_list qcom_acpi_platlist[] = {
|
||||
{ "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
|
||||
{ "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
|
||||
{ }
|
||||
};
|
||||
#endif
|
||||
|
||||
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
|
||||
{
|
||||
const struct device_node *np = smmu->dev->of_node;
|
||||
|
||||
if (of_match_node(qcom_smmu_impl_of_match, np))
|
||||
return qcom_smmu_create(smmu, &qcom_smmu_impl);
|
||||
#ifdef CONFIG_ACPI
|
||||
if (np == NULL) {
|
||||
/* Match platform for ACPI boot */
|
||||
if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
|
||||
return qcom_smmu_create(smmu, &qcom_smmu_impl);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not change this order of implementation, i.e., first adreno
|
||||
* smmu impl and then apss smmu since we can have both implementing
|
||||
* arm,mmu-500 in which case we will miss setting adreno smmu specific
|
||||
* features if the order is changed.
|
||||
*/
|
||||
if (of_device_is_compatible(np, "qcom,adreno-smmu"))
|
||||
return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
|
||||
|
||||
if (of_match_node(qcom_smmu_impl_of_match, np))
|
||||
return qcom_smmu_create(smmu, &qcom_smmu_impl);
|
||||
|
||||
return smmu;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ static bool using_legacy_binding, using_generic_binding;
|
|||
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
|
||||
{
|
||||
if (pm_runtime_enabled(smmu->dev))
|
||||
return pm_runtime_get_sync(smmu->dev);
|
||||
return pm_runtime_resume_and_get(smmu->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1271,6 +1271,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
u64 phys;
|
||||
unsigned long va, flags;
|
||||
int ret, idx = cfg->cbndx;
|
||||
phys_addr_t addr = 0;
|
||||
|
||||
ret = arm_smmu_rpm_get(smmu);
|
||||
if (ret < 0)
|
||||
|
@ -1290,6 +1291,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
dev_err(dev,
|
||||
"iova to phys timed out on %pad. Falling back to software table walk.\n",
|
||||
&iova);
|
||||
arm_smmu_rpm_put(smmu);
|
||||
return ops->iova_to_phys(ops, iova);
|
||||
}
|
||||
|
||||
|
@ -1298,12 +1300,14 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
if (phys & ARM_SMMU_CB_PAR_F) {
|
||||
dev_err(dev, "translation fault!\n");
|
||||
dev_err(dev, "PAR = 0x%llx\n", phys);
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
|
||||
out:
|
||||
arm_smmu_rpm_put(smmu);
|
||||
|
||||
return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
||||
|
@ -1450,6 +1454,18 @@ static void arm_smmu_release_device(struct device *dev)
|
|||
iommu_fwspec_free(dev);
|
||||
}
|
||||
|
||||
static void arm_smmu_probe_finalize(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
struct arm_smmu_device *smmu;
|
||||
|
||||
cfg = dev_iommu_priv_get(dev);
|
||||
smmu = cfg->smmu;
|
||||
|
||||
if (smmu->impl && smmu->impl->probe_finalize)
|
||||
smmu->impl->probe_finalize(smmu, dev);
|
||||
}
|
||||
|
||||
static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||
|
@ -1569,6 +1585,7 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.probe_finalize = arm_smmu_probe_finalize,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
|
@ -2164,7 +2181,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register iommu\n");
|
||||
return err;
|
||||
goto err_sysfs_remove;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, smmu);
|
||||
|
@ -2187,10 +2204,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
* any device which might need it, so we want the bus ops in place
|
||||
* ready to handle default domain setup as soon as any SMMU exists.
|
||||
*/
|
||||
if (!using_legacy_binding)
|
||||
return arm_smmu_bus_init(&arm_smmu_ops);
|
||||
if (!using_legacy_binding) {
|
||||
err = arm_smmu_bus_init(&arm_smmu_ops);
|
||||
if (err)
|
||||
goto err_unregister_device;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_device:
|
||||
iommu_device_unregister(&smmu->iommu);
|
||||
err_sysfs_remove:
|
||||
iommu_device_sysfs_remove(&smmu->iommu);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int arm_smmu_device_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -439,6 +439,7 @@ struct arm_smmu_impl {
|
|||
struct device *dev, int start);
|
||||
void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
|
||||
void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
|
||||
void (*probe_finalize)(struct arm_smmu_device *smmu, struct device *dev);
|
||||
};
|
||||
|
||||
#define INVALID_SMENDX -1
|
||||
|
|
|
@ -850,10 +850,12 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
|
|||
ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register iommu\n");
|
||||
return ret;
|
||||
goto err_sysfs_remove;
|
||||
}
|
||||
|
||||
bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
|
||||
ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
|
||||
if (ret)
|
||||
goto err_unregister_device;
|
||||
|
||||
if (qcom_iommu->local_base) {
|
||||
pm_runtime_get_sync(dev);
|
||||
|
@ -862,6 +864,13 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_device:
|
||||
iommu_device_unregister(&qcom_iommu->iommu);
|
||||
|
||||
err_sysfs_remove:
|
||||
iommu_device_sysfs_remove(&qcom_iommu->iommu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_iommu_device_remove(struct platform_device *pdev)
|
||||
|
|
Loading…
Reference in New Issue