[S390] refactor page table functions for better pgste support
Rework the architecture page table functions to access the bits in the page table extension array (pgste). There are a number of changes: 1) Fix missing pgste update if the attach_count for the mm is <= 1. 2) For every operation that affects the invalid bit in the pte or the rcp byte in the pgste the pcl lock needs to be acquired. The function pgste_get_lock gets the pcl lock and returns the current pgste value for a pte pointer. The function pgste_set_unlock stores the pgste and releases the lock. Between these two calls the bits in the pgste can be shuffled. 3) Define two software bits in the pte _PAGE_SWR and _PAGE_SWC to avoid calling SetPageDirty and SetPageReferenced from pgtable.h. If the host reference backup bit or the host change backup bit has been set the dirty/referenced state is transfered to the pte. The common code will pick up the state from the pte. 4) Add ptep_modify_prot_start and ptep_modify_prot_commit for mprotect. 5) Remove pgd_populate_kernel, pud_populate_kernel, pmd_populate_kernel pgd_clear_kernel, pud_clear_kernel, pmd_clear_kernel and ptep_invalidate. 6) Rename kvm_s390_test_and_clear_page_dirty to ptep_test_and_clear_user_dirty and add ptep_test_and_clear_user_young. 7) Define mm_exclusive() and mm_has_pgste() helper to improve readability. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
2d42552d1c
commit
b2fa47e6bf
|
@ -9,8 +9,10 @@ typedef struct {
|
||||||
unsigned long asce_bits;
|
unsigned long asce_bits;
|
||||||
unsigned long asce_limit;
|
unsigned long asce_limit;
|
||||||
unsigned long vdso_base;
|
unsigned long vdso_base;
|
||||||
int has_pgste; /* The mmu context has extended page tables */
|
/* Cloned contexts will be created with extended page tables. */
|
||||||
int alloc_pgste; /* cloned contexts will have extended page tables */
|
unsigned int alloc_pgste:1;
|
||||||
|
/* The mmu context has extended page tables. */
|
||||||
|
unsigned int has_pgste:1;
|
||||||
} mm_context_t;
|
} mm_context_t;
|
||||||
|
|
||||||
#define INIT_MM_CONTEXT(name) \
|
#define INIT_MM_CONTEXT(name) \
|
||||||
|
|
|
@ -90,6 +90,7 @@ static inline void copy_page(void *to, void *from)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
typedef struct { unsigned long pgprot; } pgprot_t;
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
||||||
|
typedef struct { unsigned long pgste; } pgste_t;
|
||||||
typedef struct { unsigned long pte; } pte_t;
|
typedef struct { unsigned long pte; } pte_t;
|
||||||
typedef struct { unsigned long pmd; } pmd_t;
|
typedef struct { unsigned long pmd; } pmd_t;
|
||||||
typedef struct { unsigned long pud; } pud_t;
|
typedef struct { unsigned long pud; } pud_t;
|
||||||
|
@ -97,13 +98,16 @@ typedef struct { unsigned long pgd; } pgd_t;
|
||||||
typedef pte_t *pgtable_t;
|
typedef pte_t *pgtable_t;
|
||||||
|
|
||||||
#define pgprot_val(x) ((x).pgprot)
|
#define pgprot_val(x) ((x).pgprot)
|
||||||
|
#define pgste_val(x) ((x).pgste)
|
||||||
#define pte_val(x) ((x).pte)
|
#define pte_val(x) ((x).pte)
|
||||||
#define pmd_val(x) ((x).pmd)
|
#define pmd_val(x) ((x).pmd)
|
||||||
#define pud_val(x) ((x).pud)
|
#define pud_val(x) ((x).pud)
|
||||||
#define pgd_val(x) ((x).pgd)
|
#define pgd_val(x) ((x).pgd)
|
||||||
|
|
||||||
|
#define __pgste(x) ((pgste_t) { (x) } )
|
||||||
#define __pte(x) ((pte_t) { (x) } )
|
#define __pte(x) ((pte_t) { (x) } )
|
||||||
#define __pmd(x) ((pmd_t) { (x) } )
|
#define __pmd(x) ((pmd_t) { (x) } )
|
||||||
|
#define __pud(x) ((pud_t) { (x) } )
|
||||||
#define __pgd(x) ((pgd_t) { (x) } )
|
#define __pgd(x) ((pgd_t) { (x) } )
|
||||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||||
|
|
||||||
|
|
|
@ -65,10 +65,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
|
||||||
#define pmd_free(mm, x) do { } while (0)
|
#define pmd_free(mm, x) do { } while (0)
|
||||||
|
|
||||||
#define pgd_populate(mm, pgd, pud) BUG()
|
#define pgd_populate(mm, pgd, pud) BUG()
|
||||||
#define pgd_populate_kernel(mm, pgd, pud) BUG()
|
|
||||||
|
|
||||||
#define pud_populate(mm, pud, pmd) BUG()
|
#define pud_populate(mm, pud, pmd) BUG()
|
||||||
#define pud_populate_kernel(mm, pud, pmd) BUG()
|
|
||||||
|
|
||||||
#else /* __s390x__ */
|
#else /* __s390x__ */
|
||||||
|
|
||||||
|
@ -102,26 +99,14 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
||||||
}
|
}
|
||||||
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
|
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
|
||||||
|
|
||||||
static inline void pgd_populate_kernel(struct mm_struct *mm,
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||||
pgd_t *pgd, pud_t *pud)
|
|
||||||
{
|
{
|
||||||
pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
|
pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
||||||
{
|
|
||||||
pgd_populate_kernel(mm, pgd, pud);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pud_populate_kernel(struct mm_struct *mm,
|
|
||||||
pud_t *pud, pmd_t *pmd)
|
|
||||||
{
|
|
||||||
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||||
{
|
{
|
||||||
pud_populate_kernel(mm, pud, pmd);
|
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
|
@ -134,17 +119,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
|
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
|
||||||
|
|
||||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
static inline void pmd_populate(struct mm_struct *mm,
|
||||||
pmd_t *pmd, pte_t *pte)
|
pmd_t *pmd, pgtable_t pte)
|
||||||
{
|
{
|
||||||
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
|
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_populate(struct mm_struct *mm,
|
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
|
||||||
pmd_t *pmd, pgtable_t pte)
|
|
||||||
{
|
|
||||||
pmd_populate_kernel(mm, pmd, pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pmd_pgtable(pmd) \
|
#define pmd_pgtable(pmd) \
|
||||||
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
|
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
|
||||||
|
|
|
@ -31,9 +31,8 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/mm_types.h>
|
#include <linux/mm_types.h>
|
||||||
#include <asm/bitops.h>
|
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
|
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
|
||||||
extern void paging_init(void);
|
extern void paging_init(void);
|
||||||
|
@ -243,11 +242,13 @@ extern unsigned long VMALLOC_START;
|
||||||
/* Software bits in the page table entry */
|
/* Software bits in the page table entry */
|
||||||
#define _PAGE_SWT 0x001 /* SW pte type bit t */
|
#define _PAGE_SWT 0x001 /* SW pte type bit t */
|
||||||
#define _PAGE_SWX 0x002 /* SW pte type bit x */
|
#define _PAGE_SWX 0x002 /* SW pte type bit x */
|
||||||
#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
|
#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
|
||||||
|
#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
|
||||||
|
#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
|
||||||
#define __HAVE_ARCH_PTE_SPECIAL
|
#define __HAVE_ARCH_PTE_SPECIAL
|
||||||
|
|
||||||
/* Set of bits not changed in pte_modify */
|
/* Set of bits not changed in pte_modify */
|
||||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
|
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
|
||||||
|
|
||||||
/* Six different types of pages. */
|
/* Six different types of pages. */
|
||||||
#define _PAGE_TYPE_EMPTY 0x400
|
#define _PAGE_TYPE_EMPTY 0x400
|
||||||
|
@ -293,14 +294,17 @@ extern unsigned long VMALLOC_START;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Page status table bits for virtualization */
|
/* Page status table bits for virtualization */
|
||||||
#define RCP_PCL_BIT 55
|
#define RCP_ACC_BITS 0xf000000000000000UL
|
||||||
#define RCP_HR_BIT 54
|
#define RCP_FP_BIT 0x0800000000000000UL
|
||||||
#define RCP_HC_BIT 53
|
#define RCP_PCL_BIT 0x0080000000000000UL
|
||||||
#define RCP_GR_BIT 50
|
#define RCP_HR_BIT 0x0040000000000000UL
|
||||||
#define RCP_GC_BIT 49
|
#define RCP_HC_BIT 0x0020000000000000UL
|
||||||
|
#define RCP_GR_BIT 0x0004000000000000UL
|
||||||
|
#define RCP_GC_BIT 0x0002000000000000UL
|
||||||
|
|
||||||
/* User dirty bit for KVM's migration feature */
|
/* User dirty / referenced bit for KVM's migration feature */
|
||||||
#define KVM_UD_BIT 47
|
#define KVM_UR_BIT 0x0000800000000000UL
|
||||||
|
#define KVM_UC_BIT 0x0000400000000000UL
|
||||||
|
|
||||||
#ifndef __s390x__
|
#ifndef __s390x__
|
||||||
|
|
||||||
|
@ -407,17 +411,20 @@ extern unsigned long VMALLOC_START;
|
||||||
#define __S110 PAGE_RW
|
#define __S110 PAGE_RW
|
||||||
#define __S111 PAGE_RW
|
#define __S111 PAGE_RW
|
||||||
|
|
||||||
/*
|
static inline int mm_exclusive(struct mm_struct *mm)
|
||||||
* Certain architectures need to do special things when PTEs
|
|
||||||
* within a page table are directly modified. Thus, the following
|
|
||||||
* hook is made available.
|
|
||||||
*/
|
|
||||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
||||||
pte_t *ptep, pte_t entry)
|
|
||||||
{
|
{
|
||||||
*ptep = entry;
|
return likely(mm == current->active_mm &&
|
||||||
|
atomic_read(&mm->context.attach_count) <= 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int mm_has_pgste(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PGSTE
|
||||||
|
if (unlikely(mm->context.has_pgste))
|
||||||
|
return 1;
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* pgd/pmd/pte query functions
|
* pgd/pmd/pte query functions
|
||||||
*/
|
*/
|
||||||
|
@ -530,50 +537,127 @@ static inline int pte_special(pte_t pte)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTE_SAME
|
#define __HAVE_ARCH_PTE_SAME
|
||||||
#define pte_same(a,b) (pte_val(a) == pte_val(b))
|
static inline int pte_same(pte_t a, pte_t b)
|
||||||
|
|
||||||
static inline void rcp_lock(pte_t *ptep)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PGSTE
|
return pte_val(a) == pte_val(b);
|
||||||
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
|
|
||||||
preempt_disable();
|
|
||||||
while (test_and_set_bit(RCP_PCL_BIT, pgste))
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rcp_unlock(pte_t *ptep)
|
static inline pgste_t pgste_get_lock(pte_t *ptep)
|
||||||
|
{
|
||||||
|
unsigned long new = 0;
|
||||||
|
#ifdef CONFIG_PGSTE
|
||||||
|
unsigned long old;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
asm(
|
||||||
|
" lg %0,%2\n"
|
||||||
|
"0: lgr %1,%0\n"
|
||||||
|
" nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
|
||||||
|
" oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
|
||||||
|
" csg %0,%1,%2\n"
|
||||||
|
" jl 0b\n"
|
||||||
|
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
|
||||||
|
: "Q" (ptep[PTRS_PER_PTE]) : "cc");
|
||||||
|
#endif
|
||||||
|
return __pgste(new);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
|
asm(
|
||||||
clear_bit(RCP_PCL_BIT, pgste);
|
" nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
|
||||||
|
" stg %1,%0\n"
|
||||||
|
: "=Q" (ptep[PTRS_PER_PTE])
|
||||||
|
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <linux/page-flags.h>
|
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
|
||||||
|
|
||||||
static inline void ptep_rcp_copy(pte_t *ptep)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
struct page *page = virt_to_page(pte_val(*ptep));
|
unsigned long pfn, bits;
|
||||||
unsigned int skey;
|
unsigned char skey;
|
||||||
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
|
|
||||||
|
|
||||||
skey = page_get_storage_key(pte_val(*ptep) >> PAGE_SHIFT);
|
pfn = pte_val(*ptep) >> PAGE_SHIFT;
|
||||||
if (skey & _PAGE_CHANGED) {
|
skey = page_get_storage_key(pfn);
|
||||||
set_bit_simple(RCP_GC_BIT, pgste);
|
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
||||||
set_bit_simple(KVM_UD_BIT, pgste);
|
/* Clear page changed & referenced bit in the storage key */
|
||||||
|
if (bits) {
|
||||||
|
skey ^= bits;
|
||||||
|
page_set_storage_key(pfn, skey, 1);
|
||||||
}
|
}
|
||||||
if (skey & _PAGE_REFERENCED)
|
/* Transfer page changed & referenced bit to guest bits in pgste */
|
||||||
set_bit_simple(RCP_GR_BIT, pgste);
|
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
|
||||||
if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
|
/* Get host changed & referenced bits from pgste */
|
||||||
SetPageDirty(page);
|
bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
|
||||||
set_bit_simple(KVM_UD_BIT, pgste);
|
/* Clear host bits in pgste. */
|
||||||
}
|
pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
|
||||||
if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
|
pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
|
||||||
SetPageReferenced(page);
|
/* Copy page access key and fetch protection bit to pgste */
|
||||||
|
pgste_val(pgste) |=
|
||||||
|
(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
|
||||||
|
/* Transfer changed and referenced to kvm user bits */
|
||||||
|
pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
|
||||||
|
/* Transfer changed & referenced to pte sofware bits */
|
||||||
|
pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
|
||||||
#endif
|
#endif
|
||||||
|
return pgste;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PGSTE
|
||||||
|
int young;
|
||||||
|
|
||||||
|
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
|
||||||
|
/* Transfer page referenced bit to pte software bit (host view) */
|
||||||
|
if (young || (pgste_val(pgste) & RCP_HR_BIT))
|
||||||
|
pte_val(*ptep) |= _PAGE_SWR;
|
||||||
|
/* Clear host referenced bit in pgste. */
|
||||||
|
pgste_val(pgste) &= ~RCP_HR_BIT;
|
||||||
|
/* Transfer page referenced bit to guest bit in pgste */
|
||||||
|
pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
|
||||||
|
#endif
|
||||||
|
return pgste;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PGSTE
|
||||||
|
unsigned long pfn;
|
||||||
|
unsigned long okey, nkey;
|
||||||
|
|
||||||
|
pfn = pte_val(*ptep) >> PAGE_SHIFT;
|
||||||
|
okey = nkey = page_get_storage_key(pfn);
|
||||||
|
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
|
||||||
|
/* Set page access key and fetch protection bit from pgste */
|
||||||
|
nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
|
||||||
|
if (okey != nkey)
|
||||||
|
page_set_storage_key(pfn, nkey, 1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Certain architectures need to do special things when PTEs
|
||||||
|
* within a page table are directly modified. Thus, the following
|
||||||
|
* hook is made available.
|
||||||
|
*/
|
||||||
|
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
|
pte_t *ptep, pte_t entry)
|
||||||
|
{
|
||||||
|
pgste_t pgste;
|
||||||
|
|
||||||
|
if (mm_has_pgste(mm)) {
|
||||||
|
pgste = pgste_get_lock(ptep);
|
||||||
|
pgste_set_pte(ptep, pgste);
|
||||||
|
*ptep = entry;
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
} else
|
||||||
|
*ptep = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -587,19 +671,19 @@ static inline int pte_write(pte_t pte)
|
||||||
|
|
||||||
static inline int pte_dirty(pte_t pte)
|
static inline int pte_dirty(pte_t pte)
|
||||||
{
|
{
|
||||||
/* A pte is neither clean nor dirty on s/390. The dirty bit
|
#ifdef CONFIG_PGSTE
|
||||||
* is in the storage key. See page_test_and_clear_dirty for
|
if (pte_val(pte) & _PAGE_SWC)
|
||||||
* details.
|
return 1;
|
||||||
*/
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int pte_young(pte_t pte)
|
static inline int pte_young(pte_t pte)
|
||||||
{
|
{
|
||||||
/* A pte is neither young nor old on s/390. The young bit
|
#ifdef CONFIG_PGSTE
|
||||||
* is in the storage key. See page_test_and_clear_young for
|
if (pte_val(pte) & _PAGE_SWR)
|
||||||
* details.
|
return 1;
|
||||||
*/
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -607,46 +691,27 @@ static inline int pte_young(pte_t pte)
|
||||||
* pgd/pmd/pte modification functions
|
* pgd/pmd/pte modification functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __s390x__
|
static inline void pgd_clear(pgd_t *pgd)
|
||||||
|
|
||||||
#define pgd_clear(pgd) do { } while (0)
|
|
||||||
#define pud_clear(pud) do { } while (0)
|
|
||||||
|
|
||||||
#else /* __s390x__ */
|
|
||||||
|
|
||||||
static inline void pgd_clear_kernel(pgd_t * pgd)
|
|
||||||
{
|
{
|
||||||
|
#ifdef __s390x__
|
||||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
|
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
|
||||||
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
|
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
|
||||||
}
|
#endif
|
||||||
|
|
||||||
static inline void pgd_clear(pgd_t * pgd)
|
|
||||||
{
|
|
||||||
pgd_clear_kernel(pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pud_clear_kernel(pud_t *pud)
|
|
||||||
{
|
|
||||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
|
||||||
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_clear(pud_t *pud)
|
static inline void pud_clear(pud_t *pud)
|
||||||
{
|
{
|
||||||
pud_clear_kernel(pud);
|
#ifdef __s390x__
|
||||||
|
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||||
|
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif /* __s390x__ */
|
|
||||||
|
|
||||||
static inline void pmd_clear_kernel(pmd_t * pmdp)
|
static inline void pmd_clear(pmd_t *pmdp)
|
||||||
{
|
{
|
||||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_clear(pmd_t *pmd)
|
|
||||||
{
|
|
||||||
pmd_clear_kernel(pmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
||||||
|
@ -679,35 +744,27 @@ static inline pte_t pte_mkwrite(pte_t pte)
|
||||||
|
|
||||||
static inline pte_t pte_mkclean(pte_t pte)
|
static inline pte_t pte_mkclean(pte_t pte)
|
||||||
{
|
{
|
||||||
/* The only user of pte_mkclean is the fork() code.
|
#ifdef CONFIG_PGSTE
|
||||||
We must *not* clear the *physical* page dirty bit
|
pte_val(pte) &= ~_PAGE_SWC;
|
||||||
just because fork() wants to clear the dirty bit in
|
#endif
|
||||||
*one* of the page's mappings. So we just do nothing. */
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t pte_mkdirty(pte_t pte)
|
static inline pte_t pte_mkdirty(pte_t pte)
|
||||||
{
|
{
|
||||||
/* We do not explicitly set the dirty bit because the
|
|
||||||
* sske instruction is slow. It is faster to let the
|
|
||||||
* next instruction set the dirty bit.
|
|
||||||
*/
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t pte_mkold(pte_t pte)
|
static inline pte_t pte_mkold(pte_t pte)
|
||||||
{
|
{
|
||||||
/* S/390 doesn't keep its dirty/referenced bit in the pte.
|
#ifdef CONFIG_PGSTE
|
||||||
* There is no point in clearing the real referenced bit.
|
pte_val(pte) &= ~_PAGE_SWR;
|
||||||
*/
|
#endif
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pte_t pte_mkyoung(pte_t pte)
|
static inline pte_t pte_mkyoung(pte_t pte)
|
||||||
{
|
{
|
||||||
/* S/390 doesn't keep its dirty/referenced bit in the pte.
|
|
||||||
* There is no point in setting the real referenced bit.
|
|
||||||
*/
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,64 +802,60 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PGSTE
|
|
||||||
/*
|
/*
|
||||||
* Get (and clear) the user dirty bit for a PTE.
|
* Get (and clear) the user dirty bit for a pte.
|
||||||
*/
|
*/
|
||||||
static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
|
static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
|
||||||
pte_t *ptep)
|
pte_t *ptep)
|
||||||
{
|
{
|
||||||
int dirty;
|
pgste_t pgste;
|
||||||
unsigned long *pgste;
|
int dirty = 0;
|
||||||
unsigned long pfn;
|
|
||||||
struct page *page;
|
|
||||||
unsigned int skey;
|
|
||||||
|
|
||||||
if (!mm->context.has_pgste)
|
if (mm_has_pgste(mm)) {
|
||||||
return -EINVAL;
|
pgste = pgste_get_lock(ptep);
|
||||||
rcp_lock(ptep);
|
pgste = pgste_update_all(ptep, pgste);
|
||||||
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
|
dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
|
||||||
pfn = pte_val(*ptep) >> PAGE_SHIFT;
|
pgste_val(pgste) &= ~KVM_UC_BIT;
|
||||||
page = pfn_to_page(pfn);
|
pgste_set_unlock(ptep, pgste);
|
||||||
skey = page_get_storage_key(pfn);
|
return dirty;
|
||||||
if (skey & _PAGE_CHANGED) {
|
|
||||||
set_bit_simple(RCP_GC_BIT, pgste);
|
|
||||||
set_bit_simple(KVM_UD_BIT, pgste);
|
|
||||||
}
|
}
|
||||||
if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
|
|
||||||
SetPageDirty(page);
|
|
||||||
set_bit_simple(KVM_UD_BIT, pgste);
|
|
||||||
}
|
|
||||||
dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
|
|
||||||
if (skey & _PAGE_CHANGED)
|
|
||||||
page_set_storage_key(pfn, skey & ~_PAGE_CHANGED, 1);
|
|
||||||
rcp_unlock(ptep);
|
|
||||||
return dirty;
|
return dirty;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
/*
|
||||||
|
* Get (and clear) the user referenced bit for a pte.
|
||||||
|
*/
|
||||||
|
static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
|
||||||
|
pte_t *ptep)
|
||||||
|
{
|
||||||
|
pgste_t pgste;
|
||||||
|
int young = 0;
|
||||||
|
|
||||||
|
if (mm_has_pgste(mm)) {
|
||||||
|
pgste = pgste_get_lock(ptep);
|
||||||
|
pgste = pgste_update_young(ptep, pgste);
|
||||||
|
young = !!(pgste_val(pgste) & KVM_UR_BIT);
|
||||||
|
pgste_val(pgste) &= ~KVM_UR_BIT;
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
}
|
||||||
|
return young;
|
||||||
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pte_t *ptep)
|
unsigned long addr, pte_t *ptep)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PGSTE
|
pgste_t pgste;
|
||||||
unsigned long pfn;
|
pte_t pte;
|
||||||
int young;
|
|
||||||
unsigned long *pgste;
|
|
||||||
|
|
||||||
if (!vma->vm_mm->context.has_pgste)
|
if (mm_has_pgste(vma->vm_mm)) {
|
||||||
return 0;
|
pgste = pgste_get_lock(ptep);
|
||||||
pfn = pte_val(*ptep) >> PAGE_SHIFT;
|
pgste = pgste_update_young(ptep, pgste);
|
||||||
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
|
pte = *ptep;
|
||||||
|
*ptep = pte_mkold(pte);
|
||||||
young = ((page_get_storage_key(pfn) & _PAGE_REFERENCED) != 0);
|
pgste_set_unlock(ptep, pgste);
|
||||||
rcp_lock(ptep);
|
return pte_young(pte);
|
||||||
if (young)
|
}
|
||||||
set_bit_simple(RCP_GR_BIT, pgste);
|
|
||||||
young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
|
|
||||||
rcp_unlock(ptep);
|
|
||||||
return young;
|
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -814,10 +867,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
||||||
* On s390 reference bits are in storage key and never in TLB
|
* On s390 reference bits are in storage key and never in TLB
|
||||||
* With virtualization we handle the reference bit, without we
|
* With virtualization we handle the reference bit, without we
|
||||||
* we can simply return */
|
* we can simply return */
|
||||||
#ifdef CONFIG_PGSTE
|
|
||||||
return ptep_test_and_clear_young(vma, address, ptep);
|
return ptep_test_and_clear_young(vma, address, ptep);
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
||||||
|
@ -837,21 +887,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ptep_invalidate(struct mm_struct *mm,
|
|
||||||
unsigned long address, pte_t *ptep)
|
|
||||||
{
|
|
||||||
if (mm->context.has_pgste) {
|
|
||||||
rcp_lock(ptep);
|
|
||||||
__ptep_ipte(address, ptep);
|
|
||||||
ptep_rcp_copy(ptep);
|
|
||||||
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
|
||||||
rcp_unlock(ptep);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
__ptep_ipte(address, ptep);
|
|
||||||
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
|
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
|
||||||
* both clear the TLB for the unmapped pte. The reason is that
|
* both clear the TLB for the unmapped pte. The reason is that
|
||||||
|
@ -866,24 +901,72 @@ static inline void ptep_invalidate(struct mm_struct *mm,
|
||||||
* is a nop.
|
* is a nop.
|
||||||
*/
|
*/
|
||||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||||
#define ptep_get_and_clear(__mm, __address, __ptep) \
|
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||||
({ \
|
unsigned long address, pte_t *ptep)
|
||||||
pte_t __pte = *(__ptep); \
|
{
|
||||||
(__mm)->context.flush_mm = 1; \
|
pgste_t pgste;
|
||||||
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
|
pte_t pte;
|
||||||
(__mm) != current->active_mm) \
|
|
||||||
ptep_invalidate(__mm, __address, __ptep); \
|
mm->context.flush_mm = 1;
|
||||||
else \
|
if (mm_has_pgste(mm))
|
||||||
pte_clear((__mm), (__address), (__ptep)); \
|
pgste = pgste_get_lock(ptep);
|
||||||
__pte; \
|
|
||||||
})
|
pte = *ptep;
|
||||||
|
if (!mm_exclusive(mm))
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
||||||
|
|
||||||
|
if (mm_has_pgste(mm)) {
|
||||||
|
pgste = pgste_update_all(&pte, pgste);
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
}
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
||||||
|
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
|
||||||
|
unsigned long address,
|
||||||
|
pte_t *ptep)
|
||||||
|
{
|
||||||
|
pte_t pte;
|
||||||
|
|
||||||
|
mm->context.flush_mm = 1;
|
||||||
|
if (mm_has_pgste(mm))
|
||||||
|
pgste_get_lock(ptep);
|
||||||
|
|
||||||
|
pte = *ptep;
|
||||||
|
if (!mm_exclusive(mm))
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ptep_modify_prot_commit(struct mm_struct *mm,
|
||||||
|
unsigned long address,
|
||||||
|
pte_t *ptep, pte_t pte)
|
||||||
|
{
|
||||||
|
*ptep = pte;
|
||||||
|
if (mm_has_pgste(mm))
|
||||||
|
pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
|
||||||
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
||||||
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
||||||
unsigned long address, pte_t *ptep)
|
unsigned long address, pte_t *ptep)
|
||||||
{
|
{
|
||||||
pte_t pte = *ptep;
|
pgste_t pgste;
|
||||||
ptep_invalidate(vma->vm_mm, address, ptep);
|
pte_t pte;
|
||||||
|
|
||||||
|
if (mm_has_pgste(vma->vm_mm))
|
||||||
|
pgste = pgste_get_lock(ptep);
|
||||||
|
|
||||||
|
pte = *ptep;
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
||||||
|
|
||||||
|
if (mm_has_pgste(vma->vm_mm)) {
|
||||||
|
pgste = pgste_update_all(&pte, pgste);
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
}
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -896,41 +979,68 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
||||||
*/
|
*/
|
||||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
||||||
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
||||||
unsigned long addr,
|
unsigned long address,
|
||||||
pte_t *ptep, int full)
|
pte_t *ptep, int full)
|
||||||
{
|
{
|
||||||
pte_t pte = *ptep;
|
pgste_t pgste;
|
||||||
|
pte_t pte;
|
||||||
|
|
||||||
if (full)
|
if (mm_has_pgste(mm))
|
||||||
pte_clear(mm, addr, ptep);
|
pgste = pgste_get_lock(ptep);
|
||||||
else
|
|
||||||
ptep_invalidate(mm, addr, ptep);
|
pte = *ptep;
|
||||||
|
if (!full)
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
|
||||||
|
|
||||||
|
if (mm_has_pgste(mm)) {
|
||||||
|
pgste = pgste_update_all(&pte, pgste);
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
}
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||||
#define ptep_set_wrprotect(__mm, __addr, __ptep) \
|
static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
|
||||||
({ \
|
unsigned long address, pte_t *ptep)
|
||||||
pte_t __pte = *(__ptep); \
|
{
|
||||||
if (pte_write(__pte)) { \
|
pgste_t pgste;
|
||||||
(__mm)->context.flush_mm = 1; \
|
pte_t pte = *ptep;
|
||||||
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
|
|
||||||
(__mm) != current->active_mm) \
|
if (pte_write(pte)) {
|
||||||
ptep_invalidate(__mm, __addr, __ptep); \
|
mm->context.flush_mm = 1;
|
||||||
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
|
if (mm_has_pgste(mm))
|
||||||
} \
|
pgste = pgste_get_lock(ptep);
|
||||||
})
|
|
||||||
|
if (!mm_exclusive(mm))
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
*ptep = pte_wrprotect(pte);
|
||||||
|
|
||||||
|
if (mm_has_pgste(mm))
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
}
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||||
#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
|
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
({ \
|
unsigned long address, pte_t *ptep,
|
||||||
int __changed = !pte_same(*(__ptep), __entry); \
|
pte_t entry, int dirty)
|
||||||
if (__changed) { \
|
{
|
||||||
ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
|
pgste_t pgste;
|
||||||
set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
|
|
||||||
} \
|
if (pte_same(*ptep, entry))
|
||||||
__changed; \
|
return 0;
|
||||||
})
|
if (mm_has_pgste(vma->vm_mm))
|
||||||
|
pgste = pgste_get_lock(ptep);
|
||||||
|
|
||||||
|
__ptep_ipte(address, ptep);
|
||||||
|
*ptep = entry;
|
||||||
|
|
||||||
|
if (mm_has_pgste(vma->vm_mm))
|
||||||
|
pgste_set_unlock(ptep, pgste);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Conversion functions: convert a page and protection to a page entry,
|
* Conversion functions: convert a page and protection to a page entry,
|
||||||
|
|
|
@ -175,7 +175,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||||
pmd = pmd_offset(pud, address);
|
pmd = pmd_offset(pud, address);
|
||||||
pte = pte_offset_kernel(pmd, address);
|
pte = pte_offset_kernel(pmd, address);
|
||||||
if (!enable) {
|
if (!enable) {
|
||||||
ptep_invalidate(&init_mm, address, pte);
|
__ptep_ipte(address, pte);
|
||||||
|
pte_val(*pte) = _PAGE_TYPE_EMPTY;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
|
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
|
||||||
|
|
|
@ -28,7 +28,7 @@ static void change_page_attr(unsigned long addr, int numpages,
|
||||||
|
|
||||||
pte = *ptep;
|
pte = *ptep;
|
||||||
pte = set(pte);
|
pte = set(pte);
|
||||||
ptep_invalidate(&init_mm, addr, ptep);
|
__ptep_ipte(addr, ptep);
|
||||||
*ptep = pte;
|
*ptep = pte;
|
||||||
addr += PAGE_SIZE;
|
addr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,7 +95,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||||
pu_dir = vmem_pud_alloc();
|
pu_dir = vmem_pud_alloc();
|
||||||
if (!pu_dir)
|
if (!pu_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
|
pgd_populate(&init_mm, pg_dir, pu_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pu_dir = pud_offset(pg_dir, address);
|
pu_dir = pud_offset(pg_dir, address);
|
||||||
|
@ -103,7 +103,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||||
pm_dir = vmem_pmd_alloc();
|
pm_dir = vmem_pmd_alloc();
|
||||||
if (!pm_dir)
|
if (!pm_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
|
pud_populate(&init_mm, pu_dir, pm_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
|
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
|
||||||
|
@ -123,7 +123,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||||
pt_dir = vmem_pte_alloc();
|
pt_dir = vmem_pte_alloc();
|
||||||
if (!pt_dir)
|
if (!pt_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
|
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||||
|
@ -159,7 +159,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (pmd_huge(*pm_dir)) {
|
if (pmd_huge(*pm_dir)) {
|
||||||
pmd_clear_kernel(pm_dir);
|
pmd_clear(pm_dir);
|
||||||
address += HPAGE_SIZE - PAGE_SIZE;
|
address += HPAGE_SIZE - PAGE_SIZE;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
||||||
pu_dir = vmem_pud_alloc();
|
pu_dir = vmem_pud_alloc();
|
||||||
if (!pu_dir)
|
if (!pu_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
|
pgd_populate(&init_mm, pg_dir, pu_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pu_dir = pud_offset(pg_dir, address);
|
pu_dir = pud_offset(pg_dir, address);
|
||||||
|
@ -200,7 +200,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
||||||
pm_dir = vmem_pmd_alloc();
|
pm_dir = vmem_pmd_alloc();
|
||||||
if (!pm_dir)
|
if (!pm_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pud_populate_kernel(&init_mm, pu_dir, pm_dir);
|
pud_populate(&init_mm, pu_dir, pm_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_dir = pmd_offset(pu_dir, address);
|
pm_dir = pmd_offset(pu_dir, address);
|
||||||
|
@ -208,7 +208,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
||||||
pt_dir = vmem_pte_alloc();
|
pt_dir = vmem_pte_alloc();
|
||||||
if (!pt_dir)
|
if (!pt_dir)
|
||||||
goto out;
|
goto out;
|
||||||
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
|
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||||
|
|
Loading…
Reference in New Issue