x86/sgx: Add SGX page allocator functions
Add functions for runtime allocation and free. This allocator and its algorithms are as simple as it gets. They do a linear search across all EPC sections and find the first free page. They are not NUMA-aware and only hand out individual pages. The SGX hardware does not support large pages, so something more complicated like a buddy allocator is unwarranted. The free function (sgx_free_epc_page()) implicitly calls ENCLS[EREMOVE], which returns the page to the uninitialized state. This ensures that the page is ready for use at the next allocation. Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Jethro Beekman <jethro@fortanix.com> Link: https://lkml.kernel.org/r/20201112220135.165028-10-jarkko@kernel.org
This commit is contained in:
parent
38853a3039
commit
d2285493be
|
@ -85,6 +85,71 @@ static bool __init sgx_page_reclaimer_init(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
|
||||
{
|
||||
struct sgx_epc_page *page;
|
||||
|
||||
spin_lock(§ion->lock);
|
||||
|
||||
if (list_empty(§ion->page_list)) {
|
||||
spin_unlock(§ion->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = list_first_entry(§ion->page_list, struct sgx_epc_page, list);
|
||||
list_del_init(&page->list);
|
||||
|
||||
spin_unlock(§ion->lock);
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* __sgx_alloc_epc_page() - Allocate an EPC page
|
||||
*
|
||||
* Iterate through EPC sections and borrow a free EPC page to the caller. When a
|
||||
* page is no longer needed it must be released with sgx_free_epc_page().
|
||||
*
|
||||
* Return:
|
||||
* an EPC page,
|
||||
* -errno on error
|
||||
*/
|
||||
struct sgx_epc_page *__sgx_alloc_epc_page(void)
|
||||
{
|
||||
struct sgx_epc_section *section;
|
||||
struct sgx_epc_page *page;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sgx_nr_epc_sections; i++) {
|
||||
section = &sgx_epc_sections[i];
|
||||
|
||||
page = __sgx_alloc_epc_page_from_section(section);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* sgx_free_epc_page() - Free an EPC page
|
||||
* @page: an EPC page
|
||||
*
|
||||
* Call EREMOVE for an EPC page and insert it back to the list of free pages.
|
||||
*/
|
||||
void sgx_free_epc_page(struct sgx_epc_page *page)
|
||||
{
|
||||
struct sgx_epc_section *section = &sgx_epc_sections[page->section];
|
||||
int ret;
|
||||
|
||||
ret = __eremove(sgx_get_epc_virt_addr(page));
|
||||
if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
|
||||
return;
|
||||
|
||||
spin_lock(§ion->lock);
|
||||
list_add_tail(&page->list, §ion->page_list);
|
||||
spin_unlock(§ion->lock);
|
||||
}
|
||||
|
||||
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
|
||||
unsigned long index,
|
||||
struct sgx_epc_section *section)
|
||||
|
|
|
@ -57,4 +57,7 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
|
|||
return section->virt_addr + index * PAGE_SIZE;
|
||||
}
|
||||
|
||||
struct sgx_epc_page *__sgx_alloc_epc_page(void);
|
||||
void sgx_free_epc_page(struct sgx_epc_page *page);
|
||||
|
||||
#endif /* _X86_SGX_H */
|
||||
|
|
Loading…
Reference in New Issue