iommu/amd: Introduce per PCI segment old_dev_tbl_cpy
It will remove global old_dev_tbl_cpy. Also update copy_device_table() copy device table for all PCI segments. Co-developed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Link: https://lore.kernel.org/r/20220706113825.25582-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
39a303ba4a
commit
eb21ef0227
|
@ -574,6 +574,12 @@ struct amd_iommu_pci_seg {
|
|||
* device id quickly.
|
||||
*/
|
||||
struct irq_remap_table **irq_lookup_table;
|
||||
|
||||
/*
|
||||
* Pointer to a device table which the content of old device table
|
||||
* will be copied to. It's only be used in kdump kernel.
|
||||
*/
|
||||
struct dev_table_entry *old_dev_tbl_cpy;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -193,11 +193,6 @@ bool amd_iommu_force_isolation __read_mostly;
|
|||
* page table root pointer.
|
||||
*/
|
||||
struct dev_table_entry *amd_iommu_dev_table;
|
||||
/*
|
||||
* Pointer to a device table which the content of old device table
|
||||
* will be copied to. It's only be used in kdump kernel.
|
||||
*/
|
||||
static struct dev_table_entry *old_dev_tbl_cpy;
|
||||
|
||||
/*
|
||||
* The alias table is a driver specific data structure which contains the
|
||||
|
@ -992,39 +987,27 @@ static int get_dev_entry_bit(u16 devid, u8 bit)
|
|||
}
|
||||
|
||||
|
||||
static bool copy_device_table(void)
|
||||
static bool __copy_device_table(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
|
||||
u64 int_ctl, int_tab_len, entry = 0;
|
||||
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
|
||||
struct dev_table_entry *old_devtb = NULL;
|
||||
u32 lo, hi, devid, old_devtb_size;
|
||||
phys_addr_t old_devtb_phys;
|
||||
struct amd_iommu *iommu;
|
||||
u16 dom_id, dte_v, irq_v;
|
||||
gfp_t gfp_flag;
|
||||
u64 tmp;
|
||||
|
||||
if (!amd_iommu_pre_enabled)
|
||||
/* Each IOMMU use separate device table with the same size */
|
||||
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
|
||||
hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
|
||||
entry = (((u64) hi) << 32) + lo;
|
||||
|
||||
old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
|
||||
if (old_devtb_size != dev_table_size) {
|
||||
pr_err("The device table size of IOMMU:%d is not expected!\n",
|
||||
iommu->index);
|
||||
return false;
|
||||
|
||||
pr_warn("Translation is already enabled - trying to copy translation structures\n");
|
||||
for_each_iommu(iommu) {
|
||||
/* All IOMMUs should use the same device table with the same size */
|
||||
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
|
||||
hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
|
||||
entry = (((u64) hi) << 32) + lo;
|
||||
if (last_entry && last_entry != entry) {
|
||||
pr_err("IOMMU:%d should use the same dev table as others!\n",
|
||||
iommu->index);
|
||||
return false;
|
||||
}
|
||||
last_entry = entry;
|
||||
|
||||
old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
|
||||
if (old_devtb_size != dev_table_size) {
|
||||
pr_err("The device table size of IOMMU:%d is not expected!\n",
|
||||
iommu->index);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1047,31 +1030,31 @@ static bool copy_device_table(void)
|
|||
return false;
|
||||
|
||||
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
|
||||
old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
|
||||
get_order(dev_table_size));
|
||||
if (old_dev_tbl_cpy == NULL) {
|
||||
pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
|
||||
get_order(dev_table_size));
|
||||
if (pci_seg->old_dev_tbl_cpy == NULL) {
|
||||
pr_err("Failed to allocate memory for copying old device table!\n");
|
||||
memunmap(old_devtb);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
|
||||
old_dev_tbl_cpy[devid] = old_devtb[devid];
|
||||
pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
|
||||
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
|
||||
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
|
||||
|
||||
if (dte_v && dom_id) {
|
||||
old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
|
||||
old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
|
||||
__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
|
||||
/* If gcr3 table existed, mask it out */
|
||||
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
|
||||
tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
|
||||
tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
|
||||
old_dev_tbl_cpy[devid].data[1] &= ~tmp;
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
|
||||
tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
|
||||
tmp |= DTE_FLAG_GV;
|
||||
old_dev_tbl_cpy[devid].data[0] &= ~tmp;
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1086,7 +1069,7 @@ static bool copy_device_table(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
|
||||
}
|
||||
}
|
||||
memunmap(old_devtb);
|
||||
|
@ -1094,6 +1077,33 @@ static bool copy_device_table(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool copy_device_table(void)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
||||
if (!amd_iommu_pre_enabled)
|
||||
return false;
|
||||
|
||||
pr_warn("Translation is already enabled - trying to copy translation structures\n");
|
||||
|
||||
/*
|
||||
* All IOMMUs within PCI segment shares common device table.
|
||||
* Hence copy device table only once per PCI segment.
|
||||
*/
|
||||
for_each_pci_segment(pci_seg) {
|
||||
for_each_iommu(iommu) {
|
||||
if (pci_seg->id != iommu->pci_seg->id)
|
||||
continue;
|
||||
if (!__copy_device_table(iommu))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void amd_iommu_apply_erratum_63(u16 devid)
|
||||
{
|
||||
int sysmgt;
|
||||
|
@ -2591,7 +2601,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
|
|||
static void early_enable_iommus(void)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
||||
if (!copy_device_table()) {
|
||||
/*
|
||||
|
@ -2601,9 +2611,14 @@ static void early_enable_iommus(void)
|
|||
*/
|
||||
if (amd_iommu_pre_enabled)
|
||||
pr_err("Failed to copy DEV table from previous kernel.\n");
|
||||
if (old_dev_tbl_cpy != NULL)
|
||||
free_pages((unsigned long)old_dev_tbl_cpy,
|
||||
get_order(dev_table_size));
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
if (pci_seg->old_dev_tbl_cpy != NULL) {
|
||||
free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
|
||||
get_order(dev_table_size));
|
||||
pci_seg->old_dev_tbl_cpy = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_iommu(iommu) {
|
||||
clear_translation_pre_enabled(iommu);
|
||||
|
@ -2611,9 +2626,13 @@ static void early_enable_iommus(void)
|
|||
}
|
||||
} else {
|
||||
pr_info("Copied DEV table from previous kernel.\n");
|
||||
free_pages((unsigned long)amd_iommu_dev_table,
|
||||
get_order(dev_table_size));
|
||||
amd_iommu_dev_table = old_dev_tbl_cpy;
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
free_pages((unsigned long)pci_seg->dev_table,
|
||||
get_order(dev_table_size));
|
||||
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
|
||||
}
|
||||
|
||||
for_each_iommu(iommu) {
|
||||
iommu_disable_command_buffer(iommu);
|
||||
iommu_disable_event_buffer(iommu);
|
||||
|
|
Loading…
Reference in New Issue