percpu: Generic support for this_cpu_add, sub, dec, inc_return
Introduce generic support for this_cpu_add_return etc. The fallback is to realize these operations with simpler __this_cpu_ops. tj: - Reformatted __cpu_size_call_return2() to make it more consistent with its neighbors. - Dropped unnecessary temp variable ret__ from __this_cpu_generic_add_return(). Reviewed-by: Tejun Heo <tj@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a3383e8372
commit
a663ffff1d
|
@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
|
|||
pscr_ret__; \
|
||||
})
|
||||
|
||||
#define __pcpu_size_call_return2(stem, variable, ...) \
|
||||
({ \
|
||||
typeof(variable) pscr2_ret__; \
|
||||
__verify_pcpu_ptr(&(variable)); \
|
||||
switch(sizeof(variable)) { \
|
||||
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
|
||||
case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
|
||||
case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
|
||||
case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
|
||||
default: \
|
||||
__bad_size_call_parameter(); break; \
|
||||
} \
|
||||
pscr2_ret__; \
|
||||
})
|
||||
|
||||
#define __pcpu_size_call(stem, variable, ...) \
|
||||
do { \
|
||||
__verify_pcpu_ptr(&(variable)); \
|
||||
|
@ -529,6 +544,62 @@ do { \
|
|||
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
|
||||
#endif
|
||||
|
||||
#define _this_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
typeof(pcp) ret__; \
|
||||
preempt_disable(); \
|
||||
__this_cpu_add(pcp, val); \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
preempt_enable(); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef this_cpu_add_return
|
||||
# ifndef this_cpu_add_return_1
|
||||
# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_2
|
||||
# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_4
|
||||
# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_8
|
||||
# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
|
||||
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
|
||||
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
|
||||
|
||||
#define __this_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
__this_cpu_add(pcp, val); \
|
||||
__this_cpu_read(pcp); \
|
||||
})
|
||||
|
||||
#ifndef __this_cpu_add_return
|
||||
# ifndef __this_cpu_add_return_1
|
||||
# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_2
|
||||
# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_4
|
||||
# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_8
|
||||
# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
|
||||
#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
|
||||
#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
|
||||
|
||||
/*
|
||||
* IRQ safe versions of the per cpu RMW operations. Note that these operations
|
||||
* are *not* safe against modification of the same variable from another
|
||||
|
|
Loading…
Reference in New Issue