drivers: acpi: Handle IOMMU lookup failure with deferred probing or error
This is an equivalent to the DT's handling of the iommu master's probe with deferred probing when the corrsponding iommu is not probed yet. The lack of a registered IOMMU can be caused by the lack of a driver for the IOMMU, the IOMMU device probe not having been performed yet, having been deferred, or having failed. The first case occurs when the firmware describes the bus master and IOMMU topology correctly but no device driver exists for the IOMMU yet or the device driver has not been compiled in. Return NULL, the caller will configure the device without an IOMMU. The second and third cases are handled by deferring the probe of the bus master device which will eventually get reprobed after the IOMMU. The last case is currently handled by deferring the probe of the bus master device as well. A mechanism to either configure the bus master device without an IOMMU or to fail the bus master device probe depending on whether the IOMMU is optional or mandatory would be a good enhancement. Tested-by: Hanjun Guo <hanjun.guo@linaro.org> Reviewed-by: Robin Murphy <robin.murphy@arm.com> [Lorenzo: Added fixes for dma_coherent_mask overflow, acpi_dma_configure called multiple times for same device] Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Sricharan R <sricharan@codeaurora.org> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
7b07cbefb6
commit
5a1bb638d5
|
@ -543,6 +543,14 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
||||||
const struct iommu_ops *ops = NULL;
|
const struct iommu_ops *ops = NULL;
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
struct fwnode_handle *iort_fwnode;
|
struct fwnode_handle *iort_fwnode;
|
||||||
|
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we already translated the fwspec there
|
||||||
|
* is nothing left to do, return the iommu_ops.
|
||||||
|
*/
|
||||||
|
if (fwspec && fwspec->ops)
|
||||||
|
return fwspec->ops;
|
||||||
|
|
||||||
if (node) {
|
if (node) {
|
||||||
iort_fwnode = iort_get_fwnode(node);
|
iort_fwnode = iort_get_fwnode(node);
|
||||||
|
@ -550,8 +558,17 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ops = iommu_ops_from_fwnode(iort_fwnode);
|
ops = iommu_ops_from_fwnode(iort_fwnode);
|
||||||
|
/*
|
||||||
|
* If the ops look-up fails, this means that either
|
||||||
|
* the SMMU drivers have not been probed yet or that
|
||||||
|
* the SMMU drivers are not built in the kernel;
|
||||||
|
* Depending on whether the SMMU drivers are built-in
|
||||||
|
* in the kernel or not, defer the IOMMU configuration
|
||||||
|
* or just abort it.
|
||||||
|
*/
|
||||||
if (!ops)
|
if (!ops)
|
||||||
return NULL;
|
return iort_iommu_driver_enabled(node->type) ?
|
||||||
|
ERR_PTR(-EPROBE_DEFER) : NULL;
|
||||||
|
|
||||||
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
|
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
|
||||||
}
|
}
|
||||||
|
@ -625,12 +642,26 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||||
|
|
||||||
while (parent) {
|
while (parent) {
|
||||||
ops = iort_iommu_xlate(dev, parent, streamid);
|
ops = iort_iommu_xlate(dev, parent, streamid);
|
||||||
|
if (IS_ERR_OR_NULL(ops))
|
||||||
|
return ops;
|
||||||
|
|
||||||
parent = iort_node_get_id(node, &streamid,
|
parent = iort_node_get_id(node, &streamid,
|
||||||
IORT_IOMMU_TYPE, i++);
|
IORT_IOMMU_TYPE, i++);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have reason to believe the IOMMU driver missed the initial
|
||||||
|
* add_device callback for dev, replay it to get things in order.
|
||||||
|
*/
|
||||||
|
if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
|
||||||
|
dev->bus && !dev->iommu_group) {
|
||||||
|
int err = ops->add_device(dev);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
ops = ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
return ops;
|
return ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1373,20 +1373,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
||||||
* @dev: The pointer to the device
|
* @dev: The pointer to the device
|
||||||
* @attr: device dma attributes
|
* @attr: device dma attributes
|
||||||
*/
|
*/
|
||||||
void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
||||||
{
|
{
|
||||||
const struct iommu_ops *iommu;
|
const struct iommu_ops *iommu;
|
||||||
|
u64 size;
|
||||||
|
|
||||||
iort_set_dma_mask(dev);
|
iort_set_dma_mask(dev);
|
||||||
|
|
||||||
iommu = iort_iommu_configure(dev);
|
iommu = iort_iommu_configure(dev);
|
||||||
|
if (IS_ERR(iommu))
|
||||||
|
return PTR_ERR(iommu);
|
||||||
|
|
||||||
|
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||||
/*
|
/*
|
||||||
* Assume dma valid range starts at 0 and covers the whole
|
* Assume dma valid range starts at 0 and covers the whole
|
||||||
* coherent_dma_mask.
|
* coherent_dma_mask.
|
||||||
*/
|
*/
|
||||||
arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
|
arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
|
||||||
attr == DEV_DMA_COHERENT);
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(acpi_dma_configure);
|
EXPORT_SYMBOL_GPL(acpi_dma_configure);
|
||||||
|
|
||||||
|
|
|
@ -368,7 +368,7 @@ int dma_configure(struct device *dev)
|
||||||
} else if (has_acpi_companion(dma_dev)) {
|
} else if (has_acpi_companion(dma_dev)) {
|
||||||
attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
|
attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
|
||||||
if (attr != DEV_DMA_NOT_SUPPORTED)
|
if (attr != DEV_DMA_NOT_SUPPORTED)
|
||||||
acpi_dma_configure(dev, attr);
|
ret = acpi_dma_configure(dev, attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bridge)
|
if (bridge)
|
||||||
|
|
|
@ -575,7 +575,7 @@ struct acpi_pci_root {
|
||||||
|
|
||||||
bool acpi_dma_supported(struct acpi_device *adev);
|
bool acpi_dma_supported(struct acpi_device *adev);
|
||||||
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
|
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
|
||||||
void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
|
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
|
||||||
void acpi_dma_deconfigure(struct device *dev);
|
void acpi_dma_deconfigure(struct device *dev);
|
||||||
|
|
||||||
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
|
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
|
||||||
|
|
|
@ -762,8 +762,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
||||||
return DEV_DMA_NOT_SUPPORTED;
|
return DEV_DMA_NOT_SUPPORTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void acpi_dma_configure(struct device *dev,
|
static inline int acpi_dma_configure(struct device *dev,
|
||||||
enum dev_dma_attr attr) { }
|
enum dev_dma_attr attr)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void acpi_dma_deconfigure(struct device *dev) { }
|
static inline void acpi_dma_deconfigure(struct device *dev) { }
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue