iommu/amd: Allocate page table using numa locality info
Introduce 'struct protection_domain->nid' variable. It will contain IOMMU NUMA node ID. And allocate page table pages using IOMMU numa locality info. This optimizes page table walk by IOMMU. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Link: https://lore.kernel.org/r/20230321092348.6127-2-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
e8d018dd02
commit
0d571dcbe7
|
@ -122,6 +122,14 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
|
|||
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
|
||||
}
|
||||
|
||||
static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
|
||||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
||||
extern bool translation_pre_enabled(struct amd_iommu *iommu);
|
||||
extern bool amd_iommu_is_attach_deferred(struct device *dev);
|
||||
extern int __init add_special_device(u8 type, u8 id, u32 *devid,
|
||||
|
|
|
@ -549,6 +549,7 @@ struct protection_domain {
|
|||
spinlock_t lock; /* mostly used to lock the page table*/
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int glx; /* Number of levels for GCR3 table */
|
||||
int nid; /* Node ID */
|
||||
u64 *gcr3_tbl; /* Guest CR3 table */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
|
|
|
@ -156,7 +156,7 @@ static bool increase_address_space(struct protection_domain *domain,
|
|||
bool ret = true;
|
||||
u64 *pte;
|
||||
|
||||
pte = (void *)get_zeroed_page(gfp);
|
||||
pte = alloc_pgtable_page(domain->nid, gfp);
|
||||
if (!pte)
|
||||
return false;
|
||||
|
||||
|
@ -250,7 +250,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
|||
|
||||
if (!IOMMU_PTE_PRESENT(__pte) ||
|
||||
pte_level == PAGE_MODE_NONE) {
|
||||
page = (u64 *)get_zeroed_page(gfp);
|
||||
page = alloc_pgtable_page(domain->nid, gfp);
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
|
|
@ -46,11 +46,6 @@ static inline bool is_large_pte(u64 pte)
|
|||
return (pte & IOMMU_PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline void *alloc_pgtable_page(void)
|
||||
{
|
||||
return (void *)get_zeroed_page(GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline u64 set_pgtable_attr(u64 *page)
|
||||
{
|
||||
u64 prot;
|
||||
|
@ -138,8 +133,8 @@ static void free_pgtable(u64 *pt, int level)
|
|||
}
|
||||
|
||||
/* Allocate page table */
|
||||
static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
|
||||
unsigned long pg_size, bool *updated)
|
||||
static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
|
||||
unsigned long pg_size, gfp_t gfp, bool *updated)
|
||||
{
|
||||
u64 *pte, *page;
|
||||
int level, end_level;
|
||||
|
@ -162,7 +157,7 @@ static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
|
|||
}
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(__pte)) {
|
||||
page = alloc_pgtable_page();
|
||||
page = alloc_pgtable_page(nid, gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -262,7 +257,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
|
||||
while (mapped_size < size) {
|
||||
map_size = get_alloc_page_size(pgsize);
|
||||
pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
|
||||
pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
|
||||
iova, map_size, gfp, &updated);
|
||||
if (!pte) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -384,7 +380,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
|||
struct protection_domain *pdom = (struct protection_domain *)cookie;
|
||||
int ret;
|
||||
|
||||
pgtable->pgd = alloc_pgtable_page();
|
||||
pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
|
||||
if (!pgtable->pgd)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1662,6 +1662,10 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
|||
dev_data->domain = domain;
|
||||
list_add(&dev_data->list, &domain->dev_list);
|
||||
|
||||
/* Update NUMA Node ID */
|
||||
if (domain->nid == NUMA_NO_NODE)
|
||||
domain->nid = dev_to_node(dev_data->dev);
|
||||
|
||||
/* Do reference counting */
|
||||
domain->dev_iommu[iommu->index] += 1;
|
||||
domain->dev_cnt += 1;
|
||||
|
@ -2097,6 +2101,8 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
|
|||
if (type == IOMMU_DOMAIN_IDENTITY)
|
||||
return domain;
|
||||
|
||||
domain->nid = NUMA_NO_NODE;
|
||||
|
||||
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
|
||||
if (!pgtbl_ops) {
|
||||
domain_id_free(domain->id);
|
||||
|
|
Loading…
Reference in New Issue