x86/paravirt: Remove paravirt ops pmd_update[_defer] and pte_update_defer
pte_update_defer can be removed as it is always set to the same function as pte_update. So any usage of pte_update_defer() can be replaced by pte_update(). pmd_update and pmd_update_defer are always set to paravirt_nop, so they can just be nuked. Signed-off-by: Juergen Gross <jgross@suse.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: jeremy@goop.org Cc: chrisw@sous-sol.org Cc: akataria@vmware.com Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xen.org Cc: konrad.wilk@oracle.com Cc: david.vrabel@citrix.com Cc: boris.ostrovsky@oracle.com Link: http://lkml.kernel.org/r/1447771879-1806-1-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
4609586592
commit
d6ccc3ec95
|
@ -366,23 +366,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
|
|||
{
|
||||
PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
|
||||
}
|
||||
static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
|
||||
}
|
||||
|
||||
static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
|
||||
}
|
||||
|
||||
static inline pte_t __pte(pteval_t val)
|
||||
{
|
||||
|
|
|
@ -266,12 +266,6 @@ struct pv_mmu_ops {
|
|||
pmd_t *pmdp, pmd_t pmdval);
|
||||
void (*pte_update)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pte_update_defer)(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp);
|
||||
void (*pmd_update_defer)(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp);
|
||||
|
||||
pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
|
|
@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
|||
#define pmd_clear(pmd) native_pmd_clear(pmd)
|
||||
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#define pmd_update(mm, addr, ptep) do { } while (0)
|
||||
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
|
||||
|
||||
#define pgd_val(x) native_pgd_val(x)
|
||||
#define __pgd(x) native_make_pgd(x)
|
||||
|
@ -721,14 +718,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
* updates should either be sets, clears, or set_pte_atomic for P->P
|
||||
* transitions, which means this hook should only be called for user PTEs.
|
||||
* This hook implies a P->P protection or access change has taken place, which
|
||||
* requires a subsequent TLB flush. The notification can optionally be delayed
|
||||
* until the TLB flush event by using the pte_update_defer form of the
|
||||
* interface, but care must be taken to assure that the flush happens while
|
||||
* still holding the same page table lock so that the shadow and primary pages
|
||||
* do not become out of sync on SMP.
|
||||
* requires a subsequent TLB flush.
|
||||
*/
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -820,9 +812,7 @@ static inline int pmd_write(pmd_t pmd)
|
|||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
|
||||
pmd_update(mm, addr, pmdp);
|
||||
return pmd;
|
||||
return native_pmdp_get_and_clear(pmdp);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
|
@ -830,7 +820,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
|
||||
pmd_update(mm, addr, pmdp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -426,9 +426,6 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||
.set_pmd = native_set_pmd,
|
||||
.set_pmd_at = native_set_pmd_at,
|
||||
.pte_update = paravirt_nop,
|
||||
.pte_update_defer = paravirt_nop,
|
||||
.pmd_update = paravirt_nop,
|
||||
.pmd_update_defer = paravirt_nop,
|
||||
|
||||
.ptep_modify_prot_start = __ptep_modify_prot_start,
|
||||
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
||||
|
|
|
@ -1472,7 +1472,6 @@ __init void lguest_init(void)
|
|||
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
|
||||
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
|
||||
pv_mmu_ops.pte_update = lguest_pte_update;
|
||||
pv_mmu_ops.pte_update_defer = lguest_pte_update;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/* APIC read/write intercepts */
|
||||
|
|
|
@ -414,7 +414,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
|
||||
if (changed && dirty) {
|
||||
*ptep = entry;
|
||||
pte_update_defer(vma->vm_mm, address, ptep);
|
||||
pte_update(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
return changed;
|
||||
|
@ -431,7 +431,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
|
|||
|
||||
if (changed && dirty) {
|
||||
*pmdp = entry;
|
||||
pmd_update_defer(vma->vm_mm, address, pmdp);
|
||||
/*
|
||||
* We had a write-protection fault here and changed the pmd
|
||||
* to to more permissive. No need to flush the TLB for that,
|
||||
|
@ -469,9 +468,6 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|||
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
|
||||
(unsigned long *)pmdp);
|
||||
|
||||
if (ret)
|
||||
pmd_update(vma->vm_mm, addr, pmdp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
@ -518,7 +514,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
|
|||
set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
|
||||
(unsigned long *)pmdp);
|
||||
if (set) {
|
||||
pmd_update(vma->vm_mm, address, pmdp);
|
||||
/* need tlb flush only to serialize against gup-fast */
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
|
|
|
@ -2436,7 +2436,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
|||
.flush_tlb_others = xen_flush_tlb_others,
|
||||
|
||||
.pte_update = paravirt_nop,
|
||||
.pte_update_defer = paravirt_nop,
|
||||
|
||||
.pgd_alloc = xen_pgd_alloc,
|
||||
.pgd_free = xen_pgd_free,
|
||||
|
|
Loading…
Reference in New Issue