x86/paravirt: Drop {read,write}_cr8() hooks
There is a lot of infrastructure for functionality which is used exclusively in __{save,restore}_processor_state() on the suspend/resume path. cr8 is an alias of APIC_TASKPRI, and APIC_TASKPRI is saved/restored by lapic_{suspend,resume}(). Saving and restoring cr8 independently of the rest of the Local APIC state isn't a clever thing to be doing. Delete the suspend/resume cr8 handling, which shrinks the size of struct saved_context, and allows for the removal of both PVOPS. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Juergen Gross <jgross@suse.com> Link: https://lkml.kernel.org/r/20190715151641.29210-1-andrew.cooper3@citrix.com
This commit is contained in:
parent
229b969b3d
commit
83b584d9c6
|
@ -139,18 +139,6 @@ static inline void __write_cr4(unsigned long x)
|
|||
PVOP_VCALL1(cpu.write_cr4, x);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long read_cr8(void)
|
||||
{
|
||||
return PVOP_CALL0(unsigned long, cpu.read_cr8);
|
||||
}
|
||||
|
||||
static inline void write_cr8(unsigned long x)
|
||||
{
|
||||
PVOP_VCALL1(cpu.write_cr8, x);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void arch_safe_halt(void)
|
||||
{
|
||||
PVOP_VCALL0(irq.safe_halt);
|
||||
|
|
|
@ -119,11 +119,6 @@ struct pv_cpu_ops {
|
|||
|
||||
void (*write_cr4)(unsigned long);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long (*read_cr8)(void);
|
||||
void (*write_cr8)(unsigned long);
|
||||
#endif
|
||||
|
||||
/* Segment descriptor handling */
|
||||
void (*load_tr_desc)(void);
|
||||
void (*load_gdt)(const struct desc_ptr *);
|
||||
|
|
|
@ -73,20 +73,6 @@ static inline unsigned long native_read_cr4(void)
|
|||
|
||||
void native_write_cr4(unsigned long val);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long native_read_cr8(void)
|
||||
{
|
||||
unsigned long cr8;
|
||||
asm volatile("movq %%cr8,%0" : "=r" (cr8));
|
||||
return cr8;
|
||||
}
|
||||
|
||||
static inline void native_write_cr8(unsigned long val)
|
||||
{
|
||||
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
static inline u32 rdpkru(void)
|
||||
{
|
||||
|
@ -200,16 +186,6 @@ static inline void wbinvd(void)
|
|||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static inline unsigned long read_cr8(void)
|
||||
{
|
||||
return native_read_cr8();
|
||||
}
|
||||
|
||||
static inline void write_cr8(unsigned long x)
|
||||
{
|
||||
native_write_cr8(x);
|
||||
}
|
||||
|
||||
static inline void load_gs_index(unsigned selector)
|
||||
{
|
||||
native_load_gs_index(selector);
|
||||
|
|
|
@ -34,7 +34,7 @@ struct saved_context {
|
|||
*/
|
||||
unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
|
||||
|
||||
unsigned long cr0, cr2, cr3, cr4, cr8;
|
||||
unsigned long cr0, cr2, cr3, cr4;
|
||||
u64 misc_enable;
|
||||
bool misc_enable_saved;
|
||||
struct saved_msrs saved_msrs;
|
||||
|
|
|
@ -62,7 +62,6 @@ int main(void)
|
|||
ENTRY(cr2);
|
||||
ENTRY(cr3);
|
||||
ENTRY(cr4);
|
||||
ENTRY(cr8);
|
||||
ENTRY(gdt_desc);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
|
|
|
@ -311,10 +311,6 @@ struct paravirt_patch_template pv_ops = {
|
|||
.cpu.read_cr0 = native_read_cr0,
|
||||
.cpu.write_cr0 = native_write_cr0,
|
||||
.cpu.write_cr4 = native_write_cr4,
|
||||
#ifdef CONFIG_X86_64
|
||||
.cpu.read_cr8 = native_read_cr8,
|
||||
.cpu.write_cr8 = native_write_cr8,
|
||||
#endif
|
||||
.cpu.wbinvd = native_wbinvd,
|
||||
.cpu.read_msr = native_read_msr,
|
||||
.cpu.write_msr = native_write_msr,
|
||||
|
|
|
@ -122,9 +122,6 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|||
ctxt->cr2 = read_cr2();
|
||||
ctxt->cr3 = __read_cr3();
|
||||
ctxt->cr4 = __read_cr4();
|
||||
#ifdef CONFIG_X86_64
|
||||
ctxt->cr8 = read_cr8();
|
||||
#endif
|
||||
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
||||
&ctxt->misc_enable);
|
||||
msr_save_context(ctxt);
|
||||
|
@ -207,7 +204,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
#else
|
||||
/* CONFIG X86_64 */
|
||||
wrmsrl(MSR_EFER, ctxt->efer);
|
||||
write_cr8(ctxt->cr8);
|
||||
__write_cr4(ctxt->cr4);
|
||||
#endif
|
||||
write_cr3(ctxt->cr3);
|
||||
|
|
|
@ -877,16 +877,6 @@ static void xen_write_cr4(unsigned long cr4)
|
|||
|
||||
native_write_cr4(cr4);
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long xen_read_cr8(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void xen_write_cr8(unsigned long val)
|
||||
{
|
||||
BUG_ON(val);
|
||||
}
|
||||
#endif
|
||||
|
||||
static u64 xen_read_msr_safe(unsigned int msr, int *err)
|
||||
{
|
||||
|
@ -1023,11 +1013,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
|||
|
||||
.write_cr4 = xen_write_cr4,
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
.read_cr8 = xen_read_cr8,
|
||||
.write_cr8 = xen_write_cr8,
|
||||
#endif
|
||||
|
||||
.wbinvd = native_wbinvd,
|
||||
|
||||
.read_msr = xen_read_msr,
|
||||
|
|
Loading…
Reference in New Issue