2020-10-16 11:10:21 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _LINUX_MINMAX_H
|
|
|
|
#define _LINUX_MINMAX_H
|
|
|
|
|
2021-05-23 08:42:02 +08:00
|
|
|
#include <linux/const.h>
|
minmax: add in_range() macro
Patch series "New page table range API", v6.
This patchset changes the API used by the MM to set up page table entries.
The four APIs are:
set_ptes(mm, addr, ptep, pte, nr)
update_mmu_cache_range(vma, addr, ptep, nr)
flush_dcache_folio(folio)
flush_icache_pages(vma, page, nr)
flush_dcache_folio() isn't technically new, but no architecture
implemented it, so I've done that for them. The old APIs remain around
but are mostly implemented by calling the new interfaces.
The new APIs are based around setting up N page table entries at once.
The N entries belong to the same PMD, the same folio and the same VMA, so
ptep++ is a legitimate operation, and locking is taken care of for you.
Some architectures can do a better job of it than just a loop, but I have
hesitated to make too deep a change to architectures I don't understand
well.
One thing I have changed in every architecture is that PG_arch_1 is now a
per-folio bit instead of a per-page bit when used for dcache clean/dirty
tracking. This was something that would have to happen eventually, and it
makes sense to do it now rather than iterate over every page involved in a
cache flush and figure out if it needs to happen.
The point of all this is better performance, and Fengwei Yin has measured
improvement on x86. I suspect you'll see improvement on your architecture
too. Try the new will-it-scale test mentioned here:
https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/
You'll need to run it on an XFS filesystem and have
CONFIG_TRANSPARENT_HUGEPAGE set.
This patchset is the basis for much of the anonymous large folio work
being done by Ryan, so it's received quite a lot of testing over the last
few months.
This patch (of 38):
Determine if a value lies within a range more efficiently (subtraction +
comparison vs two comparisons and an AND). It also has useful (under some
circumstances) behaviour if the range exceeds the maximum value of the
type. Convert all the conflicting definitions of in_range() within the
kernel; some can use the generic definition while others need their own
definition.
Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-02 23:13:29 +08:00
|
|
|
#include <linux/types.h>
|
2021-05-23 08:42:02 +08:00
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/*
|
|
|
|
* min()/max()/clamp() macros must accomplish three things:
|
|
|
|
*
|
|
|
|
* - avoid multiple evaluations of the arguments (so side-effects like
|
|
|
|
* "x++" happen only once) when non-constant.
|
|
|
|
* - perform strict type-checking (to generate warnings instead of
|
|
|
|
* nasty runtime surprises). See the "unnecessary" pointer comparison
|
|
|
|
* in __typecheck().
|
|
|
|
* - retain result as a constant expressions when called with only
|
|
|
|
* constant expressions (to avoid tripping VLA warnings in stack
|
|
|
|
* allocation usage).
|
|
|
|
*/
|
|
|
|
#define __typecheck(x, y) \
|
|
|
|
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
|
|
|
|
|
|
|
|
#define __no_side_effects(x, y) \
|
|
|
|
(__is_constexpr(x) && __is_constexpr(y))
|
|
|
|
|
|
|
|
#define __safe_cmp(x, y) \
|
|
|
|
(__typecheck(x, y) && __no_side_effects(x, y))
|
|
|
|
|
|
|
|
#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
|
|
|
|
|
|
|
|
#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
|
|
|
|
typeof(x) unique_x = (x); \
|
|
|
|
typeof(y) unique_y = (y); \
|
|
|
|
__cmp(unique_x, unique_y, op); })
|
|
|
|
|
|
|
|
#define __careful_cmp(x, y, op) \
|
|
|
|
__builtin_choose_expr(__safe_cmp(x, y), \
|
|
|
|
__cmp(x, y, op), \
|
|
|
|
__cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
|
|
|
|
|
2022-09-26 21:34:34 +08:00
|
|
|
#define __clamp(val, lo, hi) \
|
minmax: clamp more efficiently by avoiding extra comparison
Currently the clamp algorithm does:
if (val > hi)
val = hi;
if (val < lo)
val = lo;
But since hi > lo by definition, this can be made more efficient with:
if (val > hi)
val = hi;
else if (val < lo)
val = lo;
So fix up the clamp and clamp_t functions to do this, adding the same
argument checking as for min and min_t.
For simple cases, code generation on x86_64 and aarch64 stay about the
same:
before:
cmp edi, edx
mov eax, esi
cmova edi, edx
cmp edi, esi
cmovnb eax, edi
ret
after:
cmp edi, esi
mov eax, edx
cmovnb esi, edi
cmp edi, edx
cmovb eax, esi
ret
before:
cmp w0, w2
csel w8, w0, w2, lo
cmp w8, w1
csel w0, w8, w1, hi
ret
after:
cmp w0, w1
csel w8, w0, w1, hi
cmp w0, w2
csel w0, w8, w2, lo
ret
On MIPS64, however, code generation improves, by removing arithmetic in
the second branch:
before:
sltu $3,$6,$4
bne $3,$0,.L2
move $2,$6
move $2,$4
.L2:
sltu $3,$2,$5
bnel $3,$0,.L7
move $2,$5
.L7:
jr $31
nop
after:
sltu $3,$4,$6
beq $3,$0,.L13
move $2,$6
sltu $3,$4,$5
bne $3,$0,.L12
move $2,$4
.L13:
jr $31
nop
.L12:
jr $31
move $2,$5
For more complex cases with surrounding code, the effects are a bit
more complicated. For example, consider this simplified version of
timestamp_truncate() from fs/inode.c on x86_64:
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
unsigned int gran = sb->s_time_gran;
t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
if (t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)
t.tv_nsec = 0;
return t;
}
before:
mov r8, rdx
mov rdx, rsi
mov rcx, QWORD PTR [r8]
mov rax, QWORD PTR [rcx+8]
mov rcx, QWORD PTR [rcx+16]
cmp rax, rdi
mov r8, rcx
cmovge rdi, rax
cmp rdi, rcx
cmovle r8, rdi
cmp rax, r8
je .L4
cmp rdi, rcx
jge .L4
mov rax, r8
ret
.L4:
xor edx, edx
mov rax, r8
ret
after:
mov rax, QWORD PTR [rdx]
mov rdx, QWORD PTR [rax+8]
mov rax, QWORD PTR [rax+16]
cmp rax, rdi
jg .L6
mov r8, rax
xor edx, edx
.L2:
mov rax, r8
ret
.L6:
cmp rdx, rdi
mov r8, rdi
cmovge r8, rdx
cmp rax, r8
je .L4
xor eax, eax
cmp rdx, rdi
cmovl rax, rsi
mov rdx, rax
mov rax, r8
ret
.L4:
xor edx, edx
jmp .L2
In this case, we actually gain a branch, unfortunately, because the
compiler's replacement axioms no longer as cleanly apply.
So all and all, this change is a bit of a mixed bag.
Link: https://lkml.kernel.org/r/20220926133435.1333846-2-Jason@zx2c4.com
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-09-26 21:34:35 +08:00
|
|
|
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
|
2022-09-26 21:34:34 +08:00
|
|
|
|
|
|
|
#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
|
|
|
|
typeof(val) unique_val = (val); \
|
|
|
|
typeof(lo) unique_lo = (lo); \
|
|
|
|
typeof(hi) unique_hi = (hi); \
|
|
|
|
__clamp(unique_val, unique_lo, unique_hi); })
|
|
|
|
|
|
|
|
#define __clamp_input_check(lo, hi) \
|
|
|
|
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
|
|
|
|
__is_constexpr((lo) > (hi)), (lo) > (hi), false)))
|
|
|
|
|
|
|
|
#define __careful_clamp(val, lo, hi) ({ \
|
|
|
|
__clamp_input_check(lo, hi) + \
|
|
|
|
__builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
|
|
|
|
__typecheck(hi, lo) && __is_constexpr(val) && \
|
|
|
|
__is_constexpr(lo) && __is_constexpr(hi), \
|
|
|
|
__clamp(val, lo, hi), \
|
|
|
|
__clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
|
|
|
|
__UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* min - return minimum of two values of the same or compatible types
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define min(x, y) __careful_cmp(x, y, <)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max - return maximum of two values of the same or compatible types
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define max(x, y) __careful_cmp(x, y, >)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min3 - return minimum of three values
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
* @z: third value
|
|
|
|
*/
|
|
|
|
#define min3(x, y, z) min((typeof(x))min(x, y), z)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max3 - return maximum of three values
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
* @z: third value
|
|
|
|
*/
|
|
|
|
#define max3(x, y, z) max((typeof(x))max(x, y), z)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
|
|
|
|
* @x: value1
|
|
|
|
* @y: value2
|
|
|
|
*/
|
|
|
|
#define min_not_zero(x, y) ({ \
|
|
|
|
typeof(x) __x = (x); \
|
|
|
|
typeof(y) __y = (y); \
|
|
|
|
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
|
|
|
|
|
|
|
|
/**
|
|
|
|
* clamp - return a value clamped to a given range with strict typechecking
|
|
|
|
* @val: current value
|
|
|
|
* @lo: lowest allowable value
|
|
|
|
* @hi: highest allowable value
|
|
|
|
*
|
|
|
|
* This macro does strict typechecking of @lo/@hi to make sure they are of the
|
|
|
|
* same type as @val. See the unnecessary pointer comparisons.
|
|
|
|
*/
|
2022-09-26 21:34:34 +08:00
|
|
|
#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ..and if you can't take the strict
|
|
|
|
* types, you can specify one yourself.
|
|
|
|
*
|
|
|
|
* Or not use min/max/clamp at all, of course.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_t - return minimum of two values, using the specified type
|
|
|
|
* @type: data type to use
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max_t - return maximum of two values, using the specified type
|
|
|
|
* @type: data type to use
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
|
|
|
|
|
2023-06-23 16:58:24 +08:00
|
|
|
/*
|
|
|
|
* Remove a const qualifier from integer types
|
|
|
|
* _Generic(foo, type-name: association, ..., default: association) performs a
|
|
|
|
* comparison against the foo type (not the qualified type).
|
|
|
|
* Do not use the const keyword in the type-name as it will not match the
|
|
|
|
* unqualified type of foo.
|
|
|
|
*/
|
|
|
|
#define __unconst_integer_type_cases(type) \
|
|
|
|
unsigned type: (unsigned type)0, \
|
|
|
|
signed type: (signed type)0
|
|
|
|
|
|
|
|
#define __unconst_integer_typeof(x) typeof( \
|
|
|
|
_Generic((x), \
|
|
|
|
char: (char)0, \
|
|
|
|
__unconst_integer_type_cases(char), \
|
|
|
|
__unconst_integer_type_cases(short), \
|
|
|
|
__unconst_integer_type_cases(int), \
|
|
|
|
__unconst_integer_type_cases(long), \
|
|
|
|
__unconst_integer_type_cases(long long), \
|
|
|
|
default: (x)))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not check the array parameter using __must_be_array().
|
|
|
|
* In the following legit use-case where the "array" passed is a simple pointer,
|
|
|
|
* __must_be_array() will return a failure.
|
|
|
|
* --- 8< ---
|
|
|
|
* int *buff
|
|
|
|
* ...
|
|
|
|
* min = min_array(buff, nb_items);
|
|
|
|
* --- 8< ---
|
|
|
|
*
|
|
|
|
* The first typeof(&(array)[0]) is needed in order to support arrays of both
|
|
|
|
* 'int *buff' and 'int buff[N]' types.
|
|
|
|
*
|
|
|
|
* The array can be an array of const items.
|
|
|
|
* typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order
|
|
|
|
* to discard the const qualifier for the __element variable.
|
|
|
|
*/
|
|
|
|
#define __minmax_array(op, array, len) ({ \
|
|
|
|
typeof(&(array)[0]) __array = (array); \
|
|
|
|
typeof(len) __len = (len); \
|
|
|
|
__unconst_integer_typeof(__array[0]) __element = __array[--__len]; \
|
|
|
|
while (__len--) \
|
|
|
|
__element = op(__element, __array[__len]); \
|
|
|
|
__element; })
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_array - return minimum of values present in an array
|
|
|
|
* @array: array
|
|
|
|
* @len: array length
|
|
|
|
*
|
|
|
|
* Note that @len must not be zero (empty array).
|
|
|
|
*/
|
|
|
|
#define min_array(array, len) __minmax_array(min, array, len)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max_array - return maximum of values present in an array
|
|
|
|
* @array: array
|
|
|
|
* @len: array length
|
|
|
|
*
|
|
|
|
* Note that @len must not be zero (empty array).
|
|
|
|
*/
|
|
|
|
#define max_array(array, len) __minmax_array(max, array, len)
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* clamp_t - return a value clamped to a given range using a given type
|
|
|
|
* @type: the type of variable to use
|
|
|
|
* @val: current value
|
|
|
|
* @lo: minimum allowable value
|
|
|
|
* @hi: maximum allowable value
|
|
|
|
*
|
|
|
|
* This macro does no typechecking and uses temporary variables of type
|
|
|
|
* @type to make all the comparisons.
|
|
|
|
*/
|
2022-09-26 21:34:34 +08:00
|
|
|
#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* clamp_val - return a value clamped to a given range using val's type
|
|
|
|
* @val: current value
|
|
|
|
* @lo: minimum allowable value
|
|
|
|
* @hi: maximum allowable value
|
|
|
|
*
|
|
|
|
* This macro does no typechecking and uses temporary variables of whatever
|
|
|
|
* type the input argument @val is. This is useful when @val is an unsigned
|
|
|
|
* type and @lo and @hi are literals that will otherwise be assigned a signed
|
|
|
|
* integer type.
|
|
|
|
*/
|
|
|
|
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
|
|
|
|
|
minmax: add in_range() macro
Patch series "New page table range API", v6.
This patchset changes the API used by the MM to set up page table entries.
The four APIs are:
set_ptes(mm, addr, ptep, pte, nr)
update_mmu_cache_range(vma, addr, ptep, nr)
flush_dcache_folio(folio)
flush_icache_pages(vma, page, nr)
flush_dcache_folio() isn't technically new, but no architecture
implemented it, so I've done that for them. The old APIs remain around
but are mostly implemented by calling the new interfaces.
The new APIs are based around setting up N page table entries at once.
The N entries belong to the same PMD, the same folio and the same VMA, so
ptep++ is a legitimate operation, and locking is taken care of for you.
Some architectures can do a better job of it than just a loop, but I have
hesitated to make too deep a change to architectures I don't understand
well.
One thing I have changed in every architecture is that PG_arch_1 is now a
per-folio bit instead of a per-page bit when used for dcache clean/dirty
tracking. This was something that would have to happen eventually, and it
makes sense to do it now rather than iterate over every page involved in a
cache flush and figure out if it needs to happen.
The point of all this is better performance, and Fengwei Yin has measured
improvement on x86. I suspect you'll see improvement on your architecture
too. Try the new will-it-scale test mentioned here:
https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/
You'll need to run it on an XFS filesystem and have
CONFIG_TRANSPARENT_HUGEPAGE set.
This patchset is the basis for much of the anonymous large folio work
being done by Ryan, so it's received quite a lot of testing over the last
few months.
This patch (of 38):
Determine if a value lies within a range more efficiently (subtraction +
comparison vs two comparisons and an AND). It also has useful (under some
circumstances) behaviour if the range exceeds the maximum value of the
type. Convert all the conflicting definitions of in_range() within the
kernel; some can use the generic definition while others need their own
definition.
Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-02 23:13:29 +08:00
|
|
|
static inline bool in_range64(u64 val, u64 start, u64 len)
|
|
|
|
{
|
|
|
|
return (val - start) < len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool in_range32(u32 val, u32 start, u32 len)
|
|
|
|
{
|
|
|
|
return (val - start) < len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* in_range - Determine if a value lies within a range.
|
|
|
|
* @val: Value to test.
|
|
|
|
* @start: First value in range.
|
|
|
|
* @len: Number of values in range.
|
|
|
|
*
|
|
|
|
* This is more efficient than "if (start <= val && val < (start + len))".
|
|
|
|
* It also gives a different answer if @start + @len overflows the size of
|
|
|
|
* the type by a sufficient amount to encompass @val. Decide for yourself
|
|
|
|
* which behaviour you want, or prove that start + len never overflow.
|
|
|
|
* Do not blindly replace one form with the other.
|
|
|
|
*/
|
|
|
|
#define in_range(val, start, len) \
|
|
|
|
((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
|
|
|
|
in_range32(val, start, len) : in_range64(val, start, len))
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* swap - swap values of @a and @b
|
|
|
|
* @a: first value
|
|
|
|
* @b: second value
|
|
|
|
*/
|
|
|
|
#define swap(a, b) \
|
|
|
|
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
|
|
|
|
|
|
|
#endif /* _LINUX_MINMAX_H */
|