x86: pgtable: unify pte accessors
Make various pte accessors common. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
e332870135
commit
195466dc4b
|
@ -33,7 +33,6 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|||
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
|
||||
#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
|
||||
|
||||
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
|
||||
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
|
||||
|
||||
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
|
||||
|
|
|
@ -101,7 +101,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
|||
#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
|
||||
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
|
||||
#define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
|
||||
#define pmd_clear(pmd) native_pmd_clear(pmd)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -116,6 +116,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
|
|||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
* Undefined behaviour if not..
|
||||
|
@ -169,7 +170,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
return __pte(val);
|
||||
}
|
||||
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -178,4 +178,112 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
# include "pgtable_64.h"
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
/*
|
||||
* Rules for using pte_update - it must be called after any PTE update which
|
||||
* has not been done using the set_pte / clear_pte interfaces. It is used by
|
||||
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
|
||||
* updates should either be sets, clears, or set_pte_atomic for P->P
|
||||
* transitions, which means this hook should only be called for user PTEs.
|
||||
* This hook implies a P->P protection or access change has taken place, which
|
||||
* requires a subsequent TLB flush. The notification can optionally be delayed
|
||||
* until the TLB flush event by using the pte_update_defer form of the
|
||||
* interface, but care must be taken to assure that the flush happens while
|
||||
* still holding the same page table lock so that the shadow and primary pages
|
||||
* do not become out of sync on SMP.
|
||||
*/
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* local pte updates need not use xchg for locking */
|
||||
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
|
||||
{
|
||||
pte_t res = *ptep;
|
||||
|
||||
/* Pure native function needs no input for mm, addr */
|
||||
native_pte_clear(NULL, 0, ptep);
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only update the dirty/accessed state if we set
|
||||
* the dirty bit by hand in the kernel, since the hardware
|
||||
* will do the accessed bit for us, and we don't want to
|
||||
* race with other CPU's that might be updating the dirty
|
||||
* bit at the same time.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
|
||||
({ \
|
||||
int __changed = !pte_same(*(ptep), entry); \
|
||||
if (__changed && dirty) { \
|
||||
*ptep = entry; \
|
||||
pte_update_defer((vma)->vm_mm, (address), (ptep)); \
|
||||
flush_tlb_page(vma, address); \
|
||||
} \
|
||||
__changed; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
|
||||
int __ret = 0; \
|
||||
if (pte_young(*(ptep))) \
|
||||
__ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
|
||||
&(ptep)->pte); \
|
||||
if (__ret) \
|
||||
pte_update((vma)->vm_mm, addr, ptep); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
||||
#define ptep_clear_flush_young(vma, address, ptep) \
|
||||
({ \
|
||||
int __young; \
|
||||
__young = ptep_test_and_clear_young((vma), (address), (ptep)); \
|
||||
if (__young) \
|
||||
flush_tlb_page(vma, address); \
|
||||
__young; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = native_ptep_get_and_clear(ptep);
|
||||
pte_update(mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
||||
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
|
||||
{
|
||||
pte_t pte;
|
||||
if (full) {
|
||||
/*
|
||||
* Full address destruction in progress; paravirt does not
|
||||
* care about updates and native needs no locking
|
||||
*/
|
||||
pte = native_local_ptep_get_and_clear(ptep);
|
||||
} else {
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, &ptep->pte);
|
||||
pte_update(mm, addr, ptep);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_H */
|
||||
|
|
|
@ -107,105 +107,6 @@ extern unsigned long pg0[];
|
|||
# include <asm/pgtable-2level.h>
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
/*
|
||||
* Rules for using pte_update - it must be called after any PTE update which
|
||||
* has not been done using the set_pte / clear_pte interfaces. It is used by
|
||||
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
|
||||
* updates should either be sets, clears, or set_pte_atomic for P->P
|
||||
* transitions, which means this hook should only be called for user PTEs.
|
||||
* This hook implies a P->P protection or access change has taken place, which
|
||||
* requires a subsequent TLB flush. The notification can optionally be delayed
|
||||
* until the TLB flush event by using the pte_update_defer form of the
|
||||
* interface, but care must be taken to assure that the flush happens while
|
||||
* still holding the same page table lock so that the shadow and primary pages
|
||||
* do not become out of sync on SMP.
|
||||
*/
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* local pte updates need not use xchg for locking */
|
||||
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
|
||||
{
|
||||
pte_t res = *ptep;
|
||||
|
||||
/* Pure native function needs no input for mm, addr */
|
||||
native_pte_clear(NULL, 0, ptep);
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only update the dirty/accessed state if we set
|
||||
* the dirty bit by hand in the kernel, since the hardware
|
||||
* will do the accessed bit for us, and we don't want to
|
||||
* race with other CPU's that might be updating the dirty
|
||||
* bit at the same time.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
|
||||
({ \
|
||||
int __changed = !pte_same(*(ptep), entry); \
|
||||
if (__changed && dirty) { \
|
||||
(ptep)->pte_low = (entry).pte_low; \
|
||||
pte_update_defer((vma)->vm_mm, (address), (ptep)); \
|
||||
flush_tlb_page(vma, address); \
|
||||
} \
|
||||
__changed; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
|
||||
int __ret = 0; \
|
||||
if (pte_young(*(ptep))) \
|
||||
__ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
|
||||
&(ptep)->pte_low); \
|
||||
if (__ret) \
|
||||
pte_update((vma)->vm_mm, addr, ptep); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
||||
#define ptep_clear_flush_young(vma, address, ptep) \
|
||||
({ \
|
||||
int __young; \
|
||||
__young = ptep_test_and_clear_young((vma), (address), (ptep)); \
|
||||
if (__young) \
|
||||
flush_tlb_page(vma, address); \
|
||||
__young; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = native_ptep_get_and_clear(ptep);
|
||||
pte_update(mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
||||
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
|
||||
{
|
||||
pte_t pte;
|
||||
if (full) {
|
||||
/*
|
||||
* Full address destruction in progress; paravirt does not
|
||||
* care about updates and native needs no locking
|
||||
*/
|
||||
pte = native_local_ptep_get_and_clear(ptep);
|
||||
} else {
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
|
||||
pte_update(mm, addr, ptep);
|
||||
}
|
||||
|
||||
/*
|
||||
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
||||
*
|
||||
|
@ -359,6 +260,4 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _I386_PGTABLE_H */
|
||||
|
|
|
@ -101,18 +101,18 @@ static inline void pgd_clear (pgd_t * pgd)
|
|||
set_pgd(pgd, __pgd(0));
|
||||
}
|
||||
|
||||
#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
|
||||
#define native_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0))
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
|
||||
static inline pte_t native_ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
|
||||
{
|
||||
pte_t pte;
|
||||
if (full) {
|
||||
pte = *ptep;
|
||||
*ptep = __pte(0);
|
||||
} else {
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
pte = native_ptep_get_and_clear(ptep);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
@ -158,26 +158,12 @@ static inline unsigned long pmd_bad(pmd_t pmd)
|
|||
|
||||
#define pte_none(x) (!pte_val(x))
|
||||
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
|
||||
#define native_pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
|
||||
|
||||
struct vm_area_struct;
|
||||
|
||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
|
||||
}
|
||||
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, &ptep->pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
*/
|
||||
|
@ -243,22 +229,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define update_mmu_cache(vma,address,pte) do { } while (0)
|
||||
|
||||
/* We only update the dirty/accessed state if we set
|
||||
* the dirty bit by hand in the kernel, since the hardware
|
||||
* will do the accessed bit for us, and we don't want to
|
||||
* race with other CPU's that might be updating the dirty
|
||||
* bit at the same time. */
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
|
||||
({ \
|
||||
int __changed = !pte_same(*(__ptep), __entry); \
|
||||
if (__changed && __dirty) { \
|
||||
set_pte(__ptep, __entry); \
|
||||
flush_tlb_page(__vma, __address); \
|
||||
} \
|
||||
__changed; \
|
||||
})
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define __swp_type(x) (((x).val >> 1) & 0x3f)
|
||||
#define __swp_offset(x) ((x).val >> 8)
|
||||
|
@ -290,12 +260,7 @@ pte_t *lookup_address(unsigned long addr);
|
|||
#define kc_offset_to_vaddr(o) \
|
||||
(((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
#include <asm-generic/pgtable.h>
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _X86_64_PGTABLE_H */
|
||||
|
|
Loading…
Reference in New Issue