iommu/vt-d: Duplicate iommu_resv_region objects per device list
intel_iommu_get_resv_regions() aims to return the list of
reserved regions accessible by a given @device. However several
devices can access the same reserved memory region and when
building the list it is not safe to use a single iommu_resv_region
object, whose container is the RMRR. This iommu_resv_region must
be duplicated per device reserved region list.
Let's remove the struct iommu_resv_region from the RMRR unit
and allocate the iommu_resv_region directly in
intel_iommu_get_resv_regions(). We hold the dmar_global_lock instead
of the rcu-lock to allow sleeping.
Fixes: 0659b8dc45
("iommu/vt-d: Implement reserved region get/put callbacks")
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ad0834deda
commit
5f64ce5411
|
@ -324,7 +324,6 @@ struct dmar_rmrr_unit {
|
|||
u64 end_address; /* reserved end address */
|
||||
struct dmar_dev_scope *devices; /* target devices */
|
||||
int devices_cnt; /* target device count */
|
||||
struct iommu_resv_region *resv; /* reserved region handle */
|
||||
};
|
||||
|
||||
struct dmar_atsr_unit {
|
||||
|
@ -4050,7 +4049,6 @@ static inline void init_iommu_pm_ops(void) {}
|
|||
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
|
||||
{
|
||||
struct acpi_dmar_reserved_memory *rmrr;
|
||||
int prot = DMA_PTE_READ|DMA_PTE_WRITE;
|
||||
struct dmar_rmrr_unit *rmrru;
|
||||
size_t length;
|
||||
|
||||
|
@ -4064,22 +4062,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
|
|||
rmrru->end_address = rmrr->end_address;
|
||||
|
||||
length = rmrr->end_address - rmrr->base_address + 1;
|
||||
rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
|
||||
IOMMU_RESV_DIRECT);
|
||||
if (!rmrru->resv)
|
||||
goto free_rmrru;
|
||||
|
||||
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
|
||||
((void *)rmrr) + rmrr->header.length,
|
||||
&rmrru->devices_cnt);
|
||||
if (rmrru->devices_cnt && rmrru->devices == NULL)
|
||||
goto free_all;
|
||||
goto free_rmrru;
|
||||
|
||||
list_add(&rmrru->list, &dmar_rmrr_units);
|
||||
|
||||
return 0;
|
||||
free_all:
|
||||
kfree(rmrru->resv);
|
||||
free_rmrru:
|
||||
kfree(rmrru);
|
||||
out:
|
||||
|
@ -4297,7 +4289,6 @@ static void intel_iommu_free_dmars(void)
|
|||
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
|
||||
list_del(&rmrru->list);
|
||||
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
|
||||
kfree(rmrru->resv);
|
||||
kfree(rmrru);
|
||||
}
|
||||
|
||||
|
@ -5400,22 +5391,33 @@ static void intel_iommu_remove_device(struct device *dev)
|
|||
static void intel_iommu_get_resv_regions(struct device *device,
|
||||
struct list_head *head)
|
||||
{
|
||||
int prot = DMA_PTE_READ | DMA_PTE_WRITE;
|
||||
struct iommu_resv_region *reg;
|
||||
struct dmar_rmrr_unit *rmrr;
|
||||
struct device *i_dev;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
down_read(&dmar_global_lock);
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
|
||||
i, i_dev) {
|
||||
struct iommu_resv_region *resv;
|
||||
size_t length;
|
||||
|
||||
if (i_dev != device)
|
||||
continue;
|
||||
|
||||
list_add_tail(&rmrr->resv->list, head);
|
||||
length = rmrr->end_address - rmrr->base_address + 1;
|
||||
resv = iommu_alloc_resv_region(rmrr->base_address,
|
||||
length, prot,
|
||||
IOMMU_RESV_DIRECT);
|
||||
if (!resv)
|
||||
break;
|
||||
|
||||
list_add_tail(&resv->list, head);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
up_read(&dmar_global_lock);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
|
||||
if (dev_is_pci(device)) {
|
||||
|
@ -5443,10 +5445,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
|
|||
{
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list) {
|
||||
if (entry->type == IOMMU_RESV_MSI)
|
||||
kfree(entry);
|
||||
}
|
||||
list_for_each_entry_safe(entry, next, head, list)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
|
||||
|
|
Loading…
Reference in New Issue