FPU: Fix KABI issue for copy_user_generic_string opt patch
Signed-off-by: yuehongwu <yuehongwu@tencent.com> Reviewed-by: caelli <caelli@tencent.com> Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
parent
5e88d58edb
commit
ad73cd8bf1
|
@ -642,9 +642,10 @@ static inline void switch_kernel_fpu_prepare(struct task_struct *prev, int cpu)
|
|||
}
|
||||
|
||||
/* Internal helper for switch_kernel_fpu_finish() and signal frame setup */
|
||||
extern unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int neg_off);
|
||||
static inline void fpregs_restore_kernelregs(struct fpu *kfpu)
|
||||
{
|
||||
kernel_fpu_states_restore(NULL, &kfpu->kernel_state, sizeof(kfpu->kernel_state));
|
||||
kernel_fpu_states_restore(NULL, (void *)get_fpu_registers_pos(kfpu, MAX_FPU_CTX_SIZE), MAX_FPU_CTX_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -300,9 +300,6 @@ struct fpu {
|
|||
*/
|
||||
unsigned long avx512_timestamp;
|
||||
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
union fpregs_state kernel_state;
|
||||
#endif
|
||||
/*
|
||||
* @state:
|
||||
*
|
||||
|
|
|
@ -40,6 +40,9 @@ copy_user_sse2_opt_string(void *to, const void *from, unsigned len);
|
|||
|
||||
#define copy_user_large_memory_generic_string copy_user_sse2_opt_string
|
||||
|
||||
#define MAX_FPU_CTX_SIZE 64
|
||||
#define KERNEL_FPU_NONATOMIC_SIZE (2*(MAX_FPU_CTX_SIZE))
|
||||
|
||||
#endif //CONFIG_USING_SSE2_FOR_LARGE_MEMORY_COPY
|
||||
|
||||
#ifdef CONFIG_USING_AVX2_FOR_LARGE_MEMORY_COPY
|
||||
|
@ -54,6 +57,9 @@ __must_check unsigned long
|
|||
copy_user_avx2_pf64_nt_string(void *to, const void *from, unsigned len);
|
||||
|
||||
#define copy_user_large_memory_generic_stirng copy_user_avx2_pf64_nt_string
|
||||
#define MAX_FPU_CTX_SIZE 256
|
||||
#define KERNEL_FPU_NONATOMIC_SIZE (2*(MAX_FPU_CTX_SIZE))
|
||||
|
||||
#endif //NO DEFINE CONFIG_USING_SSE2_FOR_LARGE_MEMORY_COPY
|
||||
#endif //CONFIG_USING_AVX2_FOR_LAGRE_MEMORY_COPY
|
||||
unsigned long get_nt_block_copy_mini_len(void);
|
||||
|
|
|
@ -137,6 +137,25 @@ void kernel_fpu_end(void)
|
|||
EXPORT_SYMBOL_GPL(kernel_fpu_end);
|
||||
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
extern unsigned int fpu_kernel_nonatomic_xstate_size;
|
||||
unsigned int get_fpustate_free_space(struct fpu *fpu)
|
||||
{
|
||||
if ((fpu_kernel_xstate_size + fpu_kernel_nonatomic_xstate_size) > sizeof(fpu->state))
|
||||
return 0;
|
||||
|
||||
return fpu_kernel_nonatomic_xstate_size;
|
||||
}
|
||||
|
||||
unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int off)
|
||||
{
|
||||
unsigned long addr = 0;
|
||||
if (fpu && (fpu_kernel_nonatomic_xstate_size > off)) {
|
||||
addr = (unsigned long)&fpu->state.__padding[0];
|
||||
addr += fpu_kernel_xstate_size + off;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can call kernel_fpu_begin_nonatomic in non-atomic task context.
|
||||
*/
|
||||
|
@ -148,6 +167,9 @@ int kernel_fpu_begin_nonatomic_mask(unsigned int kfpu_mask)
|
|||
if (test_thread_flag(TIF_USING_FPU_NONATOMIC))
|
||||
goto nested_err;
|
||||
|
||||
if (KERNEL_FPU_NONATOMIC_SIZE > get_fpustate_free_space(¤t->thread.fpu))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* This means we call kernel_fpu_begin_nonatomic after kernel_fpu_begin,
|
||||
* but before kernel_fpu_end.
|
||||
|
@ -209,7 +231,7 @@ EXPORT_SYMBOL_GPL(kernel_fpu_end_nonatomic);
|
|||
|
||||
void save_fpregs_to_fpkernelstate(struct fpu *kfpu)
|
||||
{
|
||||
kernel_fpu_states_save(&kfpu->kernel_state, NULL, sizeof(kfpu->kernel_state));
|
||||
kernel_fpu_states_save((void *)get_fpu_registers_pos(kfpu, MAX_FPU_CTX_SIZE), NULL, MAX_FPU_CTX_SIZE);
|
||||
}
|
||||
#endif //CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
|
||||
|
|
|
@ -138,6 +138,10 @@ static void __init fpu__init_system_generic(void)
|
|||
*/
|
||||
unsigned int fpu_kernel_xstate_size;
|
||||
EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
unsigned int fpu_kernel_nonatomic_xstate_size;
|
||||
EXPORT_SYMBOL_GPL(fpu_kernel_nonatomic_xstate_size);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Enforce that 'MEMBER' is the last field of 'TYPE'.
|
||||
|
@ -168,6 +172,9 @@ static void __init fpu__init_task_struct_size(void)
|
|||
*/
|
||||
task_size += fpu_kernel_xstate_size;
|
||||
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
task_size += fpu_kernel_nonatomic_xstate_size;
|
||||
#endif
|
||||
/*
|
||||
* We dynamically size 'struct fpu', so we require that
|
||||
* it be at the end of 'thread_struct' and that
|
||||
|
|
|
@ -700,6 +700,9 @@ static int __init init_xstate_size(void)
|
|||
/* Recompute the context size for enabled features: */
|
||||
unsigned int possible_xstate_size;
|
||||
unsigned int xsave_size;
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
extern unsigned int fpu_kernel_nonatomic_xstate_size;
|
||||
#endif
|
||||
|
||||
xsave_size = get_xsave_size();
|
||||
|
||||
|
@ -723,6 +726,9 @@ static int __init init_xstate_size(void)
|
|||
* User space is always in standard format.
|
||||
*/
|
||||
fpu_user_xstate_size = xsave_size;
|
||||
#ifdef CONFIG_USING_FPU_IN_KERNEL_NONATOMIC
|
||||
fpu_kernel_nonatomic_xstate_size = KERNEL_FPU_NONATOMIC_SIZE;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue