x86/pgtable/pae: Use separate kernel PMDs for user page-table
When PTI is enabled, separate kernel PMDs in the user page-table are required to map the per-process LDT for user-space. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-33-git-send-email-joro@8bytes.org
This commit is contained in:
parent
4e8537e4a7
commit
f59dbe9ca6
|
@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
|
|||
*/
|
||||
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
|
||||
|
||||
/*
|
||||
* We allocate separate PMDs for the kernel part of the user page-table
|
||||
* when PTI is enabled. We need them to map the per-process LDT into the
|
||||
* user-space page-table.
|
||||
*/
|
||||
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
|
||||
KERNEL_PGD_PTRS : 0)
|
||||
|
||||
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
||||
{
|
||||
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
|
||||
|
@ -202,14 +210,14 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
|||
|
||||
/* No need to prepopulate any pagetable entries in non-PAE modes. */
|
||||
#define PREALLOCATED_PMDS 0
|
||||
|
||||
#define PREALLOCATED_USER_PMDS 0
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
||||
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for(i = 0; i < PREALLOCATED_PMDS; i++)
|
||||
for (i = 0; i < count; i++)
|
||||
if (pmds[i]) {
|
||||
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
|
||||
free_page((unsigned long)pmds[i]);
|
||||
|
@ -217,7 +225,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
|||
}
|
||||
}
|
||||
|
||||
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
||||
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
|
||||
{
|
||||
int i;
|
||||
bool failed = false;
|
||||
|
@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
|||
if (mm == &init_mm)
|
||||
gfp &= ~__GFP_ACCOUNT;
|
||||
|
||||
for(i = 0; i < PREALLOCATED_PMDS; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
|
||||
if (!pmd)
|
||||
failed = true;
|
||||
|
@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
|||
}
|
||||
|
||||
if (failed) {
|
||||
free_pmds(mm, pmds);
|
||||
free_pmds(mm, pmds, count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
|||
* preallocate which never got a corresponding vma will need to be
|
||||
* freed manually.
|
||||
*/
|
||||
static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
|
||||
{
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
if (pgd_val(pgd) != 0) {
|
||||
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
|
||||
|
||||
*pgdp = native_make_pgd(0);
|
||||
|
||||
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
|
||||
pmd_free(mm, pmd);
|
||||
mm_dec_nr_pmds(mm);
|
||||
}
|
||||
}
|
||||
|
||||
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for(i = 0; i < PREALLOCATED_PMDS; i++) {
|
||||
pgd_t pgd = pgdp[i];
|
||||
for (i = 0; i < PREALLOCATED_PMDS; i++)
|
||||
mop_up_one_pmd(mm, &pgdp[i]);
|
||||
|
||||
if (pgd_val(pgd) != 0) {
|
||||
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
|
||||
pgdp[i] = native_make_pgd(0);
|
||||
if (!static_cpu_has(X86_FEATURE_PTI))
|
||||
return;
|
||||
|
||||
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
|
||||
pmd_free(mm, pmd);
|
||||
mm_dec_nr_pmds(mm);
|
||||
}
|
||||
}
|
||||
pgdp = kernel_to_user_pgdp(pgdp);
|
||||
|
||||
for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
|
||||
mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
|
||||
|
@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PAGE_TABLE_ISOLATION
|
||||
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
|
||||
pgd_t *k_pgd, pmd_t *pmds[])
|
||||
{
|
||||
pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
|
||||
pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
|
||||
p4d_t *u_p4d;
|
||||
pud_t *u_pud;
|
||||
int i;
|
||||
|
||||
u_p4d = p4d_offset(u_pgd, 0);
|
||||
u_pud = pud_offset(u_p4d, 0);
|
||||
|
||||
s_pgd += KERNEL_PGD_BOUNDARY;
|
||||
u_pud += KERNEL_PGD_BOUNDARY;
|
||||
|
||||
for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
|
||||
pmd_t *pmd = pmds[i];
|
||||
|
||||
memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
|
||||
sizeof(pmd_t) * PTRS_PER_PMD);
|
||||
|
||||
pud_populate(mm, u_pud, pmd);
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
|
||||
pgd_t *k_pgd, pmd_t *pmds[])
|
||||
{
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
|
||||
* assumes that pgd should be in one page.
|
||||
|
@ -376,6 +431,7 @@ static inline void _pgd_free(pgd_t *pgd)
|
|||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
|
||||
pmd_t *pmds[PREALLOCATED_PMDS];
|
||||
|
||||
pgd = _pgd_alloc();
|
||||
|
@ -385,12 +441,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
|
||||
mm->pgd = pgd;
|
||||
|
||||
if (preallocate_pmds(mm, pmds) != 0)
|
||||
if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
|
||||
goto out_free_pgd;
|
||||
|
||||
if (paravirt_pgd_alloc(mm) != 0)
|
||||
if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
|
||||
goto out_free_pmds;
|
||||
|
||||
if (paravirt_pgd_alloc(mm) != 0)
|
||||
goto out_free_user_pmds;
|
||||
|
||||
/*
|
||||
* Make sure that pre-populating the pmds is atomic with
|
||||
* respect to anything walking the pgd_list, so that they
|
||||
|
@ -400,13 +459,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
|
||||
pgd_ctor(mm, pgd);
|
||||
pgd_prepopulate_pmd(mm, pgd, pmds);
|
||||
pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
|
||||
|
||||
spin_unlock(&pgd_lock);
|
||||
|
||||
return pgd;
|
||||
|
||||
out_free_user_pmds:
|
||||
free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
|
||||
out_free_pmds:
|
||||
free_pmds(mm, pmds);
|
||||
free_pmds(mm, pmds, PREALLOCATED_PMDS);
|
||||
out_free_pgd:
|
||||
_pgd_free(pgd);
|
||||
out:
|
||||
|
|
Loading…
Reference in New Issue