|
|
|
@ -226,8 +226,9 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define _PAGE_TYPE 0x002 /* SW pte type bit */
|
|
|
|
|
#define _PAGE_YOUNG 0x004 /* SW pte young bit */
|
|
|
|
|
#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
|
|
|
|
|
#define _PAGE_WRITE 0x010 /* SW pte write bit */
|
|
|
|
|
#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
|
|
|
|
|
#define _PAGE_READ 0x010 /* SW pte read bit */
|
|
|
|
|
#define _PAGE_WRITE 0x020 /* SW pte write bit */
|
|
|
|
|
#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
|
|
|
|
|
#define __HAVE_ARCH_PTE_SPECIAL
|
|
|
|
|
|
|
|
|
|
/* Set of bits not changed in pte_modify */
|
|
|
|
@ -243,19 +244,25 @@ extern unsigned long MODULES_END;
|
|
|
|
|
* The following table gives the different possible bit combinations for
|
|
|
|
|
* the pte hardware and software bits in the last 12 bits of a pte:
|
|
|
|
|
*
|
|
|
|
|
* 842100000000
|
|
|
|
|
* 000084210000
|
|
|
|
|
* 000000008421
|
|
|
|
|
* .IR....wdytp
|
|
|
|
|
* empty .10....00000
|
|
|
|
|
* swap .10....xxx10
|
|
|
|
|
* file .11....xxxx0
|
|
|
|
|
* prot-none, clean .11....00x01
|
|
|
|
|
* prot-none, dirty .10....01x01
|
|
|
|
|
* read-only, clean .01....00x01
|
|
|
|
|
* read-only, dirty .01....01x01
|
|
|
|
|
* read-write, clean .01....10x01
|
|
|
|
|
* read-write, dirty .00....11x01
|
|
|
|
|
* 842100000000
|
|
|
|
|
* 000084210000
|
|
|
|
|
* 000000008421
|
|
|
|
|
* .IR...wrdytp
|
|
|
|
|
* empty .10...000000
|
|
|
|
|
* swap .10...xxxx10
|
|
|
|
|
* file .11...xxxxx0
|
|
|
|
|
* prot-none, clean, old .11...000001
|
|
|
|
|
* prot-none, clean, young .11...000101
|
|
|
|
|
* prot-none, dirty, old .10...001001
|
|
|
|
|
* prot-none, dirty, young .10...001101
|
|
|
|
|
* read-only, clean, old .11...010001
|
|
|
|
|
* read-only, clean, young .01...010101
|
|
|
|
|
* read-only, dirty, old .11...011001
|
|
|
|
|
* read-only, dirty, young .01...011101
|
|
|
|
|
* read-write, clean, old .11...110001
|
|
|
|
|
* read-write, clean, young .01...110101
|
|
|
|
|
* read-write, dirty, old .10...111001
|
|
|
|
|
* read-write, dirty, young .00...111101
|
|
|
|
|
*
|
|
|
|
|
* pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
|
|
|
|
|
* pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
|
|
|
|
@ -273,15 +280,26 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
|
|
|
|
|
|
|
|
|
|
/* Bits in the segment table entry */
|
|
|
|
|
#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
|
|
|
|
|
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
|
|
|
|
|
#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
|
|
|
|
|
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
|
|
|
|
|
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
|
|
|
|
|
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
|
|
|
|
|
#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
|
|
|
|
|
|
|
|
|
|
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
|
|
|
|
|
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Segment table entry encoding (I = invalid, R = read-only bit):
|
|
|
|
|
* ..R...I.....
|
|
|
|
|
* prot-none ..1...1.....
|
|
|
|
|
* read-only ..1...0.....
|
|
|
|
|
* read-write ..0...0.....
|
|
|
|
|
* empty ..0...1.....
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Page status table bits for virtualization */
|
|
|
|
|
#define PGSTE_ACC_BITS 0xf0000000UL
|
|
|
|
|
#define PGSTE_FP_BIT 0x08000000UL
|
|
|
|
@ -290,9 +308,7 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define PGSTE_HC_BIT 0x00200000UL
|
|
|
|
|
#define PGSTE_GR_BIT 0x00040000UL
|
|
|
|
|
#define PGSTE_GC_BIT 0x00020000UL
|
|
|
|
|
#define PGSTE_UR_BIT 0x00008000UL
|
|
|
|
|
#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
|
|
|
|
|
#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
|
|
|
|
|
#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */
|
|
|
|
|
|
|
|
|
|
#else /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
|
@ -331,6 +347,8 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
|
|
|
|
|
|
|
|
|
|
/* Bits in the segment table entry */
|
|
|
|
|
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
|
|
|
|
|
#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL
|
|
|
|
|
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
|
|
|
|
|
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
|
|
|
|
|
#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
|
|
|
|
@ -342,6 +360,21 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
|
|
|
|
|
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
|
|
|
|
|
#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
|
|
|
|
|
#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */
|
|
|
|
|
#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Segment table entry encoding (R = read-only, I = invalid, y = young bit):
|
|
|
|
|
* ..R...I...y.
|
|
|
|
|
* prot-none, old ..0...1...1.
|
|
|
|
|
* prot-none, young ..1...1...1.
|
|
|
|
|
* read-only, old ..1...1...0.
|
|
|
|
|
* read-only, young ..1...0...1.
|
|
|
|
|
* read-write, old ..0...1...0.
|
|
|
|
|
* read-write, young ..0...0...1.
|
|
|
|
|
* The segment table origin is used to distinguish empty (origin==0) from
|
|
|
|
|
* read-write, old segment table entries (origin!=0)
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
|
|
|
|
|
|
|
|
|
@ -357,9 +390,7 @@ extern unsigned long MODULES_END;
|
|
|
|
|
#define PGSTE_HC_BIT 0x0020000000000000UL
|
|
|
|
|
#define PGSTE_GR_BIT 0x0004000000000000UL
|
|
|
|
|
#define PGSTE_GC_BIT 0x0002000000000000UL
|
|
|
|
|
#define PGSTE_UR_BIT 0x0000800000000000UL
|
|
|
|
|
#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
|
|
|
|
|
#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
|
|
|
|
|
#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
|
@ -375,12 +406,17 @@ extern unsigned long MODULES_END;
|
|
|
|
|
* Page protection definitions.
|
|
|
|
|
*/
|
|
|
|
|
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
|
|
|
|
|
#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
|
|
|
|
|
#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
|
|
|
|
|
#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
|
|
|
|
|
_PAGE_INVALID | _PAGE_PROTECT)
|
|
|
|
|
#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
|
|
|
|
_PAGE_INVALID | _PAGE_PROTECT)
|
|
|
|
|
|
|
|
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
|
|
|
|
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
|
|
|
|
|
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
|
|
|
|
|
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
|
|
|
|
_PAGE_YOUNG | _PAGE_DIRTY)
|
|
|
|
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
|
|
|
|
|
_PAGE_YOUNG | _PAGE_DIRTY)
|
|
|
|
|
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
|
|
|
|
|
_PAGE_PROTECT)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* On s390 the page table entry has an invalid bit and a read-only bit.
|
|
|
|
@ -410,9 +446,10 @@ extern unsigned long MODULES_END;
|
|
|
|
|
* Segment entry (large page) protection definitions.
|
|
|
|
|
*/
|
|
|
|
|
#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
|
|
|
|
|
_SEGMENT_ENTRY_NONE)
|
|
|
|
|
#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
|
|
|
|
|
_SEGMENT_ENTRY_PROTECT)
|
|
|
|
|
#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
|
|
|
|
|
#define SEGMENT_WRITE __pgprot(0)
|
|
|
|
|
#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID)
|
|
|
|
|
|
|
|
|
|
static inline int mm_has_pgste(struct mm_struct *mm)
|
|
|
|
|
{
|
|
|
|
@ -520,10 +557,19 @@ static inline int pmd_large(pmd_t pmd)
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int pmd_prot_none(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) &&
|
|
|
|
|
(pmd_val(pmd) & _SEGMENT_ENTRY_NONE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int pmd_bad(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INVALID;
|
|
|
|
|
return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
|
if (pmd_large(pmd))
|
|
|
|
|
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
|
|
|
|
|
#endif
|
|
|
|
|
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
|
|
|
|
@ -542,12 +588,21 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
|
|
|
|
#define __HAVE_ARCH_PMD_WRITE
|
|
|
|
|
static inline int pmd_write(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
if (pmd_prot_none(pmd))
|
|
|
|
|
return 0;
|
|
|
|
|
return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int pmd_young(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
int young = 0;
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
|
if (pmd_prot_none(pmd))
|
|
|
|
|
young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
|
|
|
|
|
else
|
|
|
|
|
young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
|
|
|
|
|
#endif
|
|
|
|
|
return young;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int pte_present(pte_t pte)
|
|
|
|
@ -632,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste)
|
|
|
|
|
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
|
unsigned long address, bits;
|
|
|
|
|
unsigned char skey;
|
|
|
|
|
unsigned long address, bits, skey;
|
|
|
|
|
|
|
|
|
|
if (pte_val(*ptep) & _PAGE_INVALID)
|
|
|
|
|
return pgste;
|
|
|
|
|
address = pte_val(*ptep) & PAGE_MASK;
|
|
|
|
|
skey = page_get_storage_key(address);
|
|
|
|
|
skey = (unsigned long) page_get_storage_key(address);
|
|
|
|
|
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
|
|
|
|
/* Clear page changed & referenced bit in the storage key */
|
|
|
|
|
if (bits & _PAGE_CHANGED)
|
|
|
|
|
if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) {
|
|
|
|
|
/* Transfer dirty + referenced bit to host bits in pgste */
|
|
|
|
|
pgste_val(pgste) |= bits << 52;
|
|
|
|
|
page_set_storage_key(address, skey ^ bits, 0);
|
|
|
|
|
else if (bits)
|
|
|
|
|
} else if (!(pgste_val(pgste) & PGSTE_HR_BIT) &&
|
|
|
|
|
(bits & _PAGE_REFERENCED)) {
|
|
|
|
|
/* Transfer referenced bit to host bit in pgste */
|
|
|
|
|
pgste_val(pgste) |= PGSTE_HR_BIT;
|
|
|
|
|
page_reset_referenced(address);
|
|
|
|
|
}
|
|
|
|
|
/* Transfer page changed & referenced bit to guest bits in pgste */
|
|
|
|
|
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
|
|
|
|
|
/* Get host changed & referenced bits from pgste */
|
|
|
|
|
bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
|
|
|
|
|
/* Transfer page changed & referenced bit to kvm user bits */
|
|
|
|
|
pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
|
|
|
|
|
/* Clear relevant host bits in pgste. */
|
|
|
|
|
pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
|
|
|
|
|
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
|
|
|
|
|
/* Copy page access key and fetch protection bit to pgste */
|
|
|
|
|
pgste_val(pgste) |=
|
|
|
|
|
(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
|
|
|
|
|
/* Transfer referenced bit to pte */
|
|
|
|
|
pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
|
|
|
|
|
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
|
|
|
|
|
pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
|
|
|
|
|
#endif
|
|
|
|
|
return pgste;
|
|
|
|
|
|
|
|
|
@ -667,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
|
|
|
|
|
static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
|
int young;
|
|
|
|
|
|
|
|
|
|
if (pte_val(*ptep) & _PAGE_INVALID)
|
|
|
|
|
return pgste;
|
|
|
|
|
/* Get referenced bit from storage key */
|
|
|
|
|
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
|
|
|
|
|
if (young)
|
|
|
|
|
pgste_val(pgste) |= PGSTE_GR_BIT;
|
|
|
|
|
/* Get host referenced bit from pgste */
|
|
|
|
|
if (pgste_val(pgste) & PGSTE_HR_BIT) {
|
|
|
|
|
pgste_val(pgste) &= ~PGSTE_HR_BIT;
|
|
|
|
|
young = 1;
|
|
|
|
|
}
|
|
|
|
|
/* Transfer referenced bit to kvm user bits and pte */
|
|
|
|
|
if (young) {
|
|
|
|
|
pgste_val(pgste) |= PGSTE_UR_BIT;
|
|
|
|
|
pte_val(*ptep) |= _PAGE_YOUNG;
|
|
|
|
|
}
|
|
|
|
|
if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
|
|
|
|
|
pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT;
|
|
|
|
|
#endif
|
|
|
|
|
return pgste;
|
|
|
|
|
}
|
|
|
|
@ -839,11 +876,7 @@ static inline int pte_dirty(pte_t pte)
|
|
|
|
|
|
|
|
|
|
static inline int pte_young(pte_t pte)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
|
if (pte_val(pte) & _PAGE_YOUNG)
|
|
|
|
|
return 1;
|
|
|
|
|
#endif
|
|
|
|
|
return 0;
|
|
|
|
|
return (pte_val(pte) & _PAGE_YOUNG) != 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -884,6 +917,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
|
|
|
{
|
|
|
|
|
pte_val(pte) &= _PAGE_CHG_MASK;
|
|
|
|
|
pte_val(pte) |= pgprot_val(newprot);
|
|
|
|
|
/*
|
|
|
|
|
* newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
|
|
|
|
|
* invalid bit set, clear it again for readable, young pages
|
|
|
|
|
*/
|
|
|
|
|
if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
|
|
|
|
|
pte_val(pte) &= ~_PAGE_INVALID;
|
|
|
|
|
/*
|
|
|
|
|
* newprot for PAGE_READ and PAGE_WRITE has the page protection
|
|
|
|
|
* bit set, clear it again for writable, dirty pages
|
|
|
|
|
*/
|
|
|
|
|
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
|
|
|
|
|
pte_val(pte) &= ~_PAGE_PROTECT;
|
|
|
|
|
return pte;
|
|
|
|
@ -921,14 +964,16 @@ static inline pte_t pte_mkdirty(pte_t pte)
|
|
|
|
|
|
|
|
|
|
static inline pte_t pte_mkold(pte_t pte)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
|
pte_val(pte) &= ~_PAGE_YOUNG;
|
|
|
|
|
#endif
|
|
|
|
|
pte_val(pte) |= _PAGE_INVALID;
|
|
|
|
|
return pte;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pte_t pte_mkyoung(pte_t pte)
|
|
|
|
|
{
|
|
|
|
|
pte_val(pte) |= _PAGE_YOUNG;
|
|
|
|
|
if (pte_val(pte) & _PAGE_READ)
|
|
|
|
|
pte_val(pte) &= ~_PAGE_INVALID;
|
|
|
|
|
return pte;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -958,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
|
|
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
|
pgste = pgste_update_all(ptep, pgste);
|
|
|
|
|
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
|
|
|
|
|
pgste_val(pgste) &= ~PGSTE_UC_BIT;
|
|
|
|
|
dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
|
|
|
|
|
pgste_val(pgste) &= ~PGSTE_HC_BIT;
|
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
|
return dirty;
|
|
|
|
|
}
|
|
|
|
@ -978,42 +1023,13 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
|
|
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
|
pgste = pgste_update_young(ptep, pgste);
|
|
|
|
|
young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
|
|
|
|
|
pgste_val(pgste) &= ~PGSTE_UR_BIT;
|
|
|
|
|
young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
|
|
|
|
|
pgste_val(pgste) &= ~PGSTE_HR_BIT;
|
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
|
}
|
|
|
|
|
return young;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
|
|
|
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
|
|
{
|
|
|
|
|
pgste_t pgste;
|
|
|
|
|
pte_t pte;
|
|
|
|
|
|
|
|
|
|
if (mm_has_pgste(vma->vm_mm)) {
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
|
pgste = pgste_update_young(ptep, pgste);
|
|
|
|
|
pte = *ptep;
|
|
|
|
|
*ptep = pte_mkold(pte);
|
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
|
return pte_young(pte);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
|
|
|
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
|
|
|
unsigned long address, pte_t *ptep)
|
|
|
|
|
{
|
|
|
|
|
/* No need to flush TLB
|
|
|
|
|
* On s390 reference bits are in storage key and never in TLB
|
|
|
|
|
* With virtualization we handle the reference bit, without we
|
|
|
|
|
* we can simply return */
|
|
|
|
|
return ptep_test_and_clear_young(vma, address, ptep);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
|
|
|
|
{
|
|
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
|
|
|
@ -1042,6 +1058,40 @@ static inline void ptep_flush_lazy(struct mm_struct *mm,
|
|
|
|
|
mm->context.flush_mm = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
|
|
|
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
|
|
{
|
|
|
|
|
pgste_t pgste;
|
|
|
|
|
pte_t pte;
|
|
|
|
|
int young;
|
|
|
|
|
|
|
|
|
|
if (mm_has_pgste(vma->vm_mm)) {
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
|
pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pte = *ptep;
|
|
|
|
|
__ptep_ipte(addr, ptep);
|
|
|
|
|
young = pte_young(pte);
|
|
|
|
|
pte = pte_mkold(pte);
|
|
|
|
|
|
|
|
|
|
if (mm_has_pgste(vma->vm_mm)) {
|
|
|
|
|
pgste_set_pte(ptep, pte);
|
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
|
} else
|
|
|
|
|
*ptep = pte;
|
|
|
|
|
|
|
|
|
|
return young;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
|
|
|
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
|
|
|
unsigned long address, pte_t *ptep)
|
|
|
|
|
{
|
|
|
|
|
return ptep_test_and_clear_young(vma, address, ptep);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
|
|
|
|
|
* both clear the TLB for the unmapped pte. The reason is that
|
|
|
|
@ -1229,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
|
|
|
|
|
{
|
|
|
|
|
pte_t __pte;
|
|
|
|
|
pte_val(__pte) = physpage + pgprot_val(pgprot);
|
|
|
|
|
return __pte;
|
|
|
|
|
return pte_mkyoung(__pte);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
|
|
|
|
@ -1338,10 +1388,41 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
|
|
|
|
|
return pgprot_val(SEGMENT_WRITE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
|
if (pmd_prot_none(pmd)) {
|
|
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
|
|
|
|
|
} else {
|
|
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
|
|
|
|
|
pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkold(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
|
if (pmd_prot_none(pmd)) {
|
|
|
|
|
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
|
|
|
|
|
} else {
|
|
|
|
|
pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
|
|
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
|
|
{
|
|
|
|
|
int young;
|
|
|
|
|
|
|
|
|
|
young = pmd_young(pmd);
|
|
|
|
|
pmd_val(pmd) &= _SEGMENT_CHG_MASK;
|
|
|
|
|
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
|
|
|
|
|
if (young)
|
|
|
|
|
pmd = pmd_mkyoung(pmd);
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1349,13 +1430,13 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
|
|
|
|
|
{
|
|
|
|
|
pmd_t __pmd;
|
|
|
|
|
pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
|
|
|
|
|
return __pmd;
|
|
|
|
|
return pmd_mkyoung(__pmd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
/* Do not clobber PROT_NONE pages! */
|
|
|
|
|
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INVALID))
|
|
|
|
|
/* Do not clobber PROT_NONE segments! */
|
|
|
|
|
if (!pmd_prot_none(pmd))
|
|
|
|
|
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
@ -1391,7 +1472,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
|
|
|
|
|
/* Do not clobber PROT_NONE segments! */
|
|
|
|
|
if (!pmd_prot_none(pmd))
|
|
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1401,50 +1484,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkold(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
/* No referenced bit in the segment table entry. */
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
|
|
|
|
{
|
|
|
|
|
/* No referenced bit in the segment table entry. */
|
|
|
|
|
return pmd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
|
|
|
|
|
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
|
unsigned long address, pmd_t *pmdp)
|
|
|
|
|
{
|
|
|
|
|
unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
|
|
|
|
|
long tmp, rc;
|
|
|
|
|
int counter;
|
|
|
|
|
pmd_t pmd;
|
|
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
|
if (MACHINE_HAS_RRBM) {
|
|
|
|
|
counter = PTRS_PER_PTE >> 6;
|
|
|
|
|
asm volatile(
|
|
|
|
|
"0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
|
|
|
|
|
" ogr %1,%0\n"
|
|
|
|
|
" la %3,0(%4,%3)\n"
|
|
|
|
|
" brct %2,0b\n"
|
|
|
|
|
: "=&d" (tmp), "+&d" (rc), "+d" (counter),
|
|
|
|
|
"+a" (pmd_addr)
|
|
|
|
|
: "a" (64 * 4096UL) : "cc");
|
|
|
|
|
rc = !!rc;
|
|
|
|
|
} else {
|
|
|
|
|
counter = PTRS_PER_PTE;
|
|
|
|
|
asm volatile(
|
|
|
|
|
"0: rrbe 0,%2\n"
|
|
|
|
|
" la %2,0(%3,%2)\n"
|
|
|
|
|
" brc 12,1f\n"
|
|
|
|
|
" lhi %0,1\n"
|
|
|
|
|
"1: brct %1,0b\n"
|
|
|
|
|
: "+d" (rc), "+d" (counter), "+a" (pmd_addr)
|
|
|
|
|
: "a" (4096UL) : "cc");
|
|
|
|
|
}
|
|
|
|
|
return rc;
|
|
|
|
|
pmd = *pmdp;
|
|
|
|
|
__pmd_idte(address, pmdp);
|
|
|
|
|
*pmdp = pmd_mkold(pmd);
|
|
|
|
|
return pmd_young(pmd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
|
|
|
|