x86/xen: move paravirt lazy code

Only Xen is using the paravirt lazy mode code, so it can be moved to
Xen specific sources.

This allows to make some of the functions static or to merge them into
their only call sites.

While at it do a rename from "paravirt" to "xen" for all moved
specifiers.

No functional change.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20230913113828.18421-3-jgross@suse.com
Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
Juergen Gross 2023-09-13 13:38:27 +02:00
parent 361239fd14
commit a4a7644c15
7 changed files with 102 additions and 116 deletions

View File

@ -9,13 +9,6 @@ struct paravirt_patch_site {
u8 type; /* type of this instruction */ u8 type; /* type of this instruction */
u8 len; /* length of original instruction */ u8 len; /* length of original instruction */
}; };
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
@ -549,14 +542,6 @@ int paravirt_disable_iospace(void);
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_start_context_switch(struct task_struct *prev);
void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void); void _paravirt_nop(void);
void paravirt_BUG(void); void paravirt_BUG(void);
unsigned long paravirt_ret0(void); unsigned long paravirt_ret0(void);

View File

@ -36,6 +36,7 @@
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
#include <asm/bug.h>
#include <asm/processor.h> #include <asm/processor.h>
#define XEN_SIGNATURE "XenVMMXenVMM" #define XEN_SIGNATURE "XenVMMXenVMM"
@ -63,4 +64,29 @@ void __init xen_pvh_init(struct boot_params *boot_params);
void __init mem_map_via_hcall(struct boot_params *boot_params_p); void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif #endif
/* Lazy mode for batching updates / context switch */
enum xen_lazy_mode {
XEN_LAZY_NONE,
XEN_LAZY_MMU,
XEN_LAZY_CPU,
};
DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
static inline void enter_lazy(enum xen_lazy_mode mode)
{
BUG_ON(this_cpu_read(xen_lazy_mode) != XEN_LAZY_NONE);
this_cpu_write(xen_lazy_mode, mode);
}
static inline void leave_lazy(enum xen_lazy_mode mode)
{
BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
}
enum xen_lazy_mode xen_get_lazy_mode(void);
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -143,66 +143,7 @@ int paravirt_disable_iospace(void)
return request_resource(&ioport_resource, &reserve_ioports); return request_resource(&ioport_resource, &reserve_ioports);
} }
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
static inline void enter_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
this_cpu_write(paravirt_lazy_mode, mode);
}
static void leave_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
}
void paravirt_enter_lazy_mmu(void)
{
enter_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_leave_lazy_mmu(void)
{
leave_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_flush_lazy_mmu(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(PARAVIRT_LAZY_CPU);
}
void paravirt_end_context_switch(struct task_struct *next)
{
BUG_ON(preemptible());
leave_lazy(PARAVIRT_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
static noinstr void pv_native_write_cr2(unsigned long val) static noinstr void pv_native_write_cr2(unsigned long val)
{ {
native_write_cr2(val); native_write_cr2(val);
@ -229,14 +170,6 @@ static noinstr void pv_native_safe_halt(void)
} }
#endif #endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
if (in_interrupt())
return PARAVIRT_LAZY_NONE;
return this_cpu_read(paravirt_lazy_mode);
}
struct pv_info pv_info = { struct pv_info pv_info = {
.name = "bare hardware", .name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL

View File

@ -101,6 +101,16 @@ struct tls_descs {
struct desc_struct desc[3]; struct desc_struct desc[3];
}; };
DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
enum xen_lazy_mode xen_get_lazy_mode(void)
{
if (in_interrupt())
return XEN_LAZY_NONE;
return this_cpu_read(xen_lazy_mode);
}
/* /*
* Updating the 3 TLS descriptors in the GDT on every task switch is * Updating the 3 TLS descriptors in the GDT on every task switch is
* surprisingly expensive so we avoid updating them if they haven't * surprisingly expensive so we avoid updating them if they haven't
@ -362,10 +372,25 @@ static noinstr unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg); return HYPERVISOR_get_debugreg(reg);
} }
static void xen_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(XEN_LAZY_CPU);
}
static void xen_end_context_switch(struct task_struct *next) static void xen_end_context_switch(struct task_struct *next)
{ {
BUG_ON(preemptible());
xen_mc_flush(); xen_mc_flush();
paravirt_end_context_switch(next); leave_lazy(XEN_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
} }
static unsigned long xen_store_tr(void) static unsigned long xen_store_tr(void)
@ -472,7 +497,7 @@ static void xen_set_ldt(const void *addr, unsigned entries)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gdt(const struct desc_ptr *dtr) static void xen_load_gdt(const struct desc_ptr *dtr)
@ -568,7 +593,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
* exception between the new %fs descriptor being loaded and * exception between the new %fs descriptor being loaded and
* %fs being effectively cleared at __switch_to(). * %fs being effectively cleared at __switch_to().
*/ */
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) if (xen_get_lazy_mode() == XEN_LAZY_CPU)
loadsegment(fs, 0); loadsegment(fs, 0);
xen_mc_batch(); xen_mc_batch();
@ -577,7 +602,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
load_TLS_descriptor(t, cpu, 1); load_TLS_descriptor(t, cpu, 1);
load_TLS_descriptor(t, cpu, 2); load_TLS_descriptor(t, cpu, 2);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gs_index(unsigned int idx) static void xen_load_gs_index(unsigned int idx)
@ -909,7 +934,7 @@ static void xen_load_sp0(unsigned long sp0)
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
@ -973,7 +998,7 @@ static void xen_write_cr0(unsigned long cr0)
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_write_cr4(unsigned long cr4) static void xen_write_cr4(unsigned long cr4)
@ -1156,7 +1181,7 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
#endif #endif
.io_delay = xen_io_delay, .io_delay = xen_io_delay,
.start_context_switch = paravirt_start_context_switch, .start_context_switch = xen_start_context_switch,
.end_context_switch = xen_end_context_switch, .end_context_switch = xen_end_context_switch,
}, },
}; };

View File

@ -236,7 +236,7 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
u.val = pmd_val_ma(val); u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -270,7 +270,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{ {
struct mmu_update u; struct mmu_update u;
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false; return false;
xen_mc_batch(); xen_mc_batch();
@ -279,7 +279,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
u.val = pte_val_ma(pteval); u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
return true; return true;
} }
@ -325,7 +325,7 @@ void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
u.val = pte_val_ma(pte); u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
/* Assume pteval_t is equivalent to all the other *val_t types. */ /* Assume pteval_t is equivalent to all the other *val_t types. */
@ -419,7 +419,7 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
u.val = pud_val_ma(val); u.val = pud_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -499,7 +499,7 @@ static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
__xen_set_p4d_hyper(ptr, val); __xen_set_p4d_hyper(ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -531,7 +531,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
if (user_ptr) if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val); __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
#if CONFIG_PGTABLE_LEVELS >= 5 #if CONFIG_PGTABLE_LEVELS >= 5
@ -1245,7 +1245,7 @@ static noinline void xen_flush_tlb(void)
op->cmd = MMUEXT_TLB_FLUSH_LOCAL; op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1265,7 +1265,7 @@ static void xen_flush_tlb_one_user(unsigned long addr)
op->arg1.linear_addr = addr & PAGE_MASK; op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1302,7 +1302,7 @@ static void xen_flush_tlb_multi(const struct cpumask *cpus,
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
static unsigned long xen_read_cr3(void) static unsigned long xen_read_cr3(void)
@ -1361,7 +1361,7 @@ static void xen_write_cr3(unsigned long cr3)
else else
__xen_write_cr3(false, 0); __xen_write_cr3(false, 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
/* /*
@ -1396,7 +1396,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3); __xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
static int xen_pgd_alloc(struct mm_struct *mm) static int xen_pgd_alloc(struct mm_struct *mm)
@ -1557,7 +1557,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
} }
@ -1587,7 +1587,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
__set_pfn_prot(pfn, PAGE_KERNEL); __set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page); ClearPagePinned(page);
} }
@ -1804,7 +1804,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
*/ */
xen_mc_batch(); xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt)); __xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are /* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@ -2083,6 +2083,23 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif #endif
} }
static void xen_enter_lazy_mmu(void)
{
enter_lazy(XEN_LAZY_MMU);
}
static void xen_flush_lazy_mmu(void)
{
preempt_disable();
if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
static void __init xen_post_allocator_init(void) static void __init xen_post_allocator_init(void)
{ {
pv_ops.mmu.set_pte = xen_set_pte; pv_ops.mmu.set_pte = xen_set_pte;
@ -2107,7 +2124,7 @@ static void xen_leave_lazy_mmu(void)
{ {
preempt_disable(); preempt_disable();
xen_mc_flush(); xen_mc_flush();
paravirt_leave_lazy_mmu(); leave_lazy(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -2166,9 +2183,9 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.exit_mmap = xen_exit_mmap, .exit_mmap = xen_exit_mmap,
.lazy_mode = { .lazy_mode = {
.enter = paravirt_enter_lazy_mmu, .enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu, .leave = xen_leave_lazy_mmu,
.flush = paravirt_flush_lazy_mmu, .flush = xen_flush_lazy_mmu,
}, },
.set_fixmap = xen_set_fixmap, .set_fixmap = xen_set_fixmap,
@ -2385,7 +2402,7 @@ static noinline void xen_flush_tlb_all(void)
op->cmd = MMUEXT_TLB_FLUSH_ALL; op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }

View File

@ -26,7 +26,7 @@ static inline void xen_mc_batch(void)
/* need to disable interrupts until this entry is complete */ /* need to disable interrupts until this entry is complete */
local_irq_save(flags); local_irq_save(flags);
trace_xen_mc_batch(paravirt_get_lazy_mode()); trace_xen_mc_batch(xen_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags); __this_cpu_write(xen_mc_irq_flags, flags);
} }
@ -44,7 +44,7 @@ static inline void xen_mc_issue(unsigned mode)
{ {
trace_xen_mc_issue(mode); trace_xen_mc_issue(mode);
if ((paravirt_get_lazy_mode() & mode) == 0) if ((xen_get_lazy_mode() & mode) == 0)
xen_mc_flush(); xen_mc_flush();
/* restore flags saved in xen_mc_batch */ /* restore flags saved in xen_mc_batch */

View File

@ -6,26 +6,26 @@
#define _TRACE_XEN_H #define _TRACE_XEN_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/paravirt_types.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h> #include <asm/xen/trace_types.h>
struct multicall_entry; struct multicall_entry;
/* Multicalls */ /* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch, DECLARE_EVENT_CLASS(xen_mc__batch,
TP_PROTO(enum paravirt_lazy_mode mode), TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode), TP_ARGS(mode),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(enum paravirt_lazy_mode, mode) __field(enum xen_lazy_mode, mode)
), ),
TP_fast_assign(__entry->mode = mode), TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s", TP_printk("start batch LAZY_%s",
(__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
(__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
); );
#define DEFINE_XEN_MC_BATCH(name) \ #define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \ DEFINE_EVENT(xen_mc__batch, name, \
TP_PROTO(enum paravirt_lazy_mode mode), \ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode)) TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch); DEFINE_XEN_MC_BATCH(xen_mc_batch);