iommu/amd: Remove global amd_iommu_[dev_table/alias_table/last_bdf]

Replace them with per PCI segment device table.
Also remove dev_table_size, alias_table_size, amd_iommu_last_bdf
variables.

Co-developed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20220706113825.25582-28-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2022-07-06 17:08:17 +05:30 committed by Joerg Roedel
parent 56fb79514c
commit 401360ec98
3 changed files with 27 additions and 95 deletions

View File

@ -834,24 +834,9 @@ struct unity_map_entry {
* Data structures for device handling
*/
/*
* Device table used by hardware. Read and write accesses by software are
* locked with the amd_iommu_pd_table lock.
*/
extern struct dev_table_entry *amd_iommu_dev_table;
/*
* Alias table to find requestor ids to device ids. Not locked because only
* read on runtime.
*/
extern u16 *amd_iommu_alias_table;
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
/* largest PCI device id we expect translation requests for */
extern u16 amd_iommu_last_bdf;
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;

View File

@ -160,9 +160,6 @@ static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@ -185,30 +182,12 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
* information about the domain the device belongs to as well as the
* page table root pointer.
*/
struct dev_table_entry *amd_iommu_dev_table;
/*
* The alias table is a driver specific data structure which contains the
* mappings of the PCI device ids to the actual requestor ids on the IOMMU.
* More than one device can share the same requestor id.
*/
u16 *amd_iommu_alias_table;
/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
static u32 dev_table_size; /* size of the device table */
static u32 alias_table_size; /* size of the alias table */
enum iommu_init_state {
IOMMU_START_STATE,
IOMMU_IVRS_DETECTED,
@ -263,16 +242,10 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
amd_iommu_last_bdf = devid;
}
static inline unsigned long tbl_size(int entry_size)
static inline unsigned long tbl_size(int entry_size, int last_bdf)
{
unsigned shift = PAGE_SHIFT +
get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
get_order((last_bdf + 1) * entry_size);
return 1UL << shift;
}
@ -404,10 +377,11 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
u32 dev_table_size = iommu->pci_seg->dev_table_size;
void *dev_table = (void *)get_dev_table(iommu);
BUG_ON(iommu->mmio_base == NULL);
entry = iommu_virt_to_phys(amd_iommu_dev_table);
entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@ -557,14 +531,12 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
switch (dev->type) {
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
update_last_devid(0xffff);
return 0xffff;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
update_last_devid(dev->devid);
if (dev->devid > last_devid)
last_devid = dev->devid;
break;
@ -706,7 +678,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
/*
* let all alias entries point to itself
*/
for (i = 0; i <= amd_iommu_last_bdf; ++i)
for (i = 0; i <= pci_seg->last_bdf; ++i)
pci_seg->alias_table[i] = i;
return 0;
@ -1072,7 +1044,7 @@ static bool __copy_device_table(struct amd_iommu *iommu)
return false;
}
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
@ -1149,12 +1121,6 @@ void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
}
/* Writes the specific IOMMU for a device into the rlookup table */
static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
{
iommu->pci_seg->rlookup_table[devid] = iommu;
}
/*
* This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information
@ -1179,7 +1145,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
amd_iommu_apply_erratum_63(iommu, devid);
set_iommu_for_device(iommu, devid);
amd_iommu_set_rlookup_table(iommu, devid);
}
int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
@ -1339,7 +1305,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
@ -1584,9 +1550,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@ -2469,7 +2435,7 @@ static int __init init_unity_map_range(struct ivmd_header *m,
case ACPI_IVMD_TYPE_ALL:
s = "IVMD_TYPE_ALL\t\t";
e->devid_start = 0;
e->devid_end = amd_iommu_last_bdf;
e->devid_end = pci_seg->last_bdf;
break;
case ACPI_IVMD_TYPE_RANGE:
s = "IVMD_TYPE_RANGE\t\t";
@ -2536,7 +2502,7 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
if (dev_table == NULL)
return;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
}
@ -2550,7 +2516,7 @@ static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
if (dev_table == NULL)
return;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
dev_table[devid].data[0] = 0ULL;
dev_table[devid].data[1] = 0ULL;
}
@ -2565,7 +2531,7 @@ static void init_device_table(void)
return;
for_each_pci_segment(pci_seg) {
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
__set_dev_entry_bit(pci_seg->dev_table,
devid, DEV_ENTRY_IRQ_TBL_EN);
}
@ -2808,14 +2774,6 @@ static void __init free_iommu_resources(void)
kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL;
free_pages((unsigned long)amd_iommu_alias_table,
get_order(alias_table_size));
amd_iommu_alias_table = NULL;
free_pages((unsigned long)amd_iommu_dev_table,
get_order(dev_table_size));
amd_iommu_dev_table = NULL;
free_iommu_all();
free_pci_segments();
}
@ -2944,25 +2902,8 @@ static int __init early_amd_iommu_init(void)
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
amd_iommu_dev_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
get_order(dev_table_size));
if (amd_iommu_dev_table == NULL)
goto out;
/*
* Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
* IOMMU see for that device
*/
amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
get_order(alias_table_size));
if (amd_iommu_alias_table == NULL)
goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,

View File

@ -230,6 +230,7 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
struct amd_iommu *iommu;
struct dev_table_entry *dev_table;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
@ -240,9 +241,10 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
return 0;
amd_iommu_set_rlookup_table(iommu, alias);
memcpy(amd_iommu_dev_table[alias].data,
amd_iommu_dev_table[devid].data,
sizeof(amd_iommu_dev_table[alias].data));
dev_table = get_dev_table(iommu);
memcpy(dev_table[alias].data,
dev_table[devid].data,
sizeof(dev_table[alias].data));
return 0;
}
@ -356,6 +358,8 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
*/
static bool check_device(struct device *dev)
{
struct amd_iommu_pci_seg *pci_seg;
struct amd_iommu *iommu;
int devid;
if (!dev)
@ -365,11 +369,13 @@ static bool check_device(struct device *dev)
if (devid < 0)
return false;
/* Out of our scope? */
if (devid > amd_iommu_last_bdf)
iommu = rlookup_amd_iommu(dev);
if (!iommu)
return false;
if (rlookup_amd_iommu(dev) == NULL)
/* Out of our scope? */
pci_seg = iommu->pci_seg;
if ((devid & 0xffff) > pci_seg->last_bdf)
return false;
return true;