2020-10-16 11:10:21 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _LINUX_MINMAX_H
|
|
|
|
#define _LINUX_MINMAX_H
|
|
|
|
|
2023-09-12 17:23:55 +08:00
|
|
|
#include <linux/build_bug.h>
|
|
|
|
#include <linux/compiler.h>
|
2021-05-23 08:42:02 +08:00
|
|
|
#include <linux/const.h>
|
minmax: add in_range() macro
Patch series "New page table range API", v6.
This patchset changes the API used by the MM to set up page table entries.
The four APIs are:
set_ptes(mm, addr, ptep, pte, nr)
update_mmu_cache_range(vma, addr, ptep, nr)
flush_dcache_folio(folio)
flush_icache_pages(vma, page, nr)
flush_dcache_folio() isn't technically new, but no architecture
implemented it, so I've done that for them. The old APIs remain around
but are mostly implemented by calling the new interfaces.
The new APIs are based around setting up N page table entries at once.
The N entries belong to the same PMD, the same folio and the same VMA, so
ptep++ is a legitimate operation, and locking is taken care of for you.
Some architectures can do a better job of it than just a loop, but I have
hesitated to make too deep a change to architectures I don't understand
well.
One thing I have changed in every architecture is that PG_arch_1 is now a
per-folio bit instead of a per-page bit when used for dcache clean/dirty
tracking. This was something that would have to happen eventually, and it
makes sense to do it now rather than iterate over every page involved in a
cache flush and figure out if it needs to happen.
The point of all this is better performance, and Fengwei Yin has measured
improvement on x86. I suspect you'll see improvement on your architecture
too. Try the new will-it-scale test mentioned here:
https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/
You'll need to run it on an XFS filesystem and have
CONFIG_TRANSPARENT_HUGEPAGE set.
This patchset is the basis for much of the anonymous large folio work
being done by Ryan, so it's received quite a lot of testing over the last
few months.
This patch (of 38):
Determine if a value lies within a range more efficiently (subtraction +
comparison vs two comparisons and an AND). It also has useful (under some
circumstances) behaviour if the range exceeds the maximum value of the
type. Convert all the conflicting definitions of in_range() within the
kernel; some can use the generic definition while others need their own
definition.
Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-02 23:13:29 +08:00
|
|
|
#include <linux/types.h>
|
2021-05-23 08:42:02 +08:00
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/*
|
|
|
|
* min()/max()/clamp() macros must accomplish three things:
|
|
|
|
*
|
2023-09-18 16:19:25 +08:00
|
|
|
* - Avoid multiple evaluations of the arguments (so side-effects like
|
2020-10-16 11:10:21 +08:00
|
|
|
* "x++" happen only once) when non-constant.
|
2023-09-18 16:19:25 +08:00
|
|
|
* - Retain result as a constant expressions when called with only
|
2020-10-16 11:10:21 +08:00
|
|
|
* constant expressions (to avoid tripping VLA warnings in stack
|
|
|
|
* allocation usage).
|
2023-09-18 16:19:25 +08:00
|
|
|
* - Perform signed v unsigned type-checking (to generate compile
|
|
|
|
* errors instead of nasty runtime surprises).
|
|
|
|
* - Unsigned char/short are always promoted to signed int and can be
|
|
|
|
* compared against signed or unsigned arguments.
|
|
|
|
* - Unsigned arguments can be compared against non-negative signed constants.
|
|
|
|
* - Comparison of a signed argument against an unsigned constant fails
|
|
|
|
* even if the constant is below __INT_MAX__ and could be cast to int.
|
2020-10-16 11:10:21 +08:00
|
|
|
*/
|
|
|
|
#define __typecheck(x, y) \
|
|
|
|
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
|
|
|
|
|
2023-09-18 16:17:15 +08:00
|
|
|
/* is_signed_type() isn't a constexpr for pointer types */
|
|
|
|
#define __is_signed(x) \
|
|
|
|
__builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
|
|
|
|
is_signed_type(typeof(x)), 0)
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2023-09-18 16:19:25 +08:00
|
|
|
/* True for a non-negative signed int constant */
|
|
|
|
#define __is_noneg_int(x) \
|
|
|
|
(__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
|
|
|
|
|
|
|
|
#define __types_ok(x, y) \
|
|
|
|
(__is_signed(x) == __is_signed(y) || \
|
|
|
|
__is_signed((x) + 0) == __is_signed((y) + 0) || \
|
|
|
|
__is_noneg_int(x) || __is_noneg_int(y))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2023-09-18 16:17:15 +08:00
|
|
|
#define __cmp_op_min <
|
|
|
|
#define __cmp_op_max >
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2023-09-18 16:17:15 +08:00
|
|
|
#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
|
|
|
|
|
|
|
|
#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
|
2023-09-18 16:17:57 +08:00
|
|
|
typeof(x) unique_x = (x); \
|
|
|
|
typeof(y) unique_y = (y); \
|
|
|
|
static_assert(__types_ok(x, y), \
|
|
|
|
#op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
|
|
|
|
__cmp(op, unique_x, unique_y); })
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2023-09-18 16:17:15 +08:00
|
|
|
#define __careful_cmp(op, x, y) \
|
|
|
|
__builtin_choose_expr(__is_constexpr((x) - (y)), \
|
|
|
|
__cmp(op, x, y), \
|
|
|
|
__cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2022-09-26 21:34:34 +08:00
|
|
|
#define __clamp(val, lo, hi) \
|
minmax: clamp more efficiently by avoiding extra comparison
Currently the clamp algorithm does:
if (val > hi)
val = hi;
if (val < lo)
val = lo;
But since hi > lo by definition, this can be made more efficient with:
if (val > hi)
val = hi;
else if (val < lo)
val = lo;
So fix up the clamp and clamp_t functions to do this, adding the same
argument checking as for min and min_t.
For simple cases, code generation on x86_64 and aarch64 stay about the
same:
before:
cmp edi, edx
mov eax, esi
cmova edi, edx
cmp edi, esi
cmovnb eax, edi
ret
after:
cmp edi, esi
mov eax, edx
cmovnb esi, edi
cmp edi, edx
cmovb eax, esi
ret
before:
cmp w0, w2
csel w8, w0, w2, lo
cmp w8, w1
csel w0, w8, w1, hi
ret
after:
cmp w0, w1
csel w8, w0, w1, hi
cmp w0, w2
csel w0, w8, w2, lo
ret
On MIPS64, however, code generation improves, by removing arithmetic in
the second branch:
before:
sltu $3,$6,$4
bne $3,$0,.L2
move $2,$6
move $2,$4
.L2:
sltu $3,$2,$5
bnel $3,$0,.L7
move $2,$5
.L7:
jr $31
nop
after:
sltu $3,$4,$6
beq $3,$0,.L13
move $2,$6
sltu $3,$4,$5
bne $3,$0,.L12
move $2,$4
.L13:
jr $31
nop
.L12:
jr $31
move $2,$5
For more complex cases with surrounding code, the effects are a bit
more complicated. For example, consider this simplified version of
timestamp_truncate() from fs/inode.c on x86_64:
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
unsigned int gran = sb->s_time_gran;
t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
if (t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)
t.tv_nsec = 0;
return t;
}
before:
mov r8, rdx
mov rdx, rsi
mov rcx, QWORD PTR [r8]
mov rax, QWORD PTR [rcx+8]
mov rcx, QWORD PTR [rcx+16]
cmp rax, rdi
mov r8, rcx
cmovge rdi, rax
cmp rdi, rcx
cmovle r8, rdi
cmp rax, r8
je .L4
cmp rdi, rcx
jge .L4
mov rax, r8
ret
.L4:
xor edx, edx
mov rax, r8
ret
after:
mov rax, QWORD PTR [rdx]
mov rdx, QWORD PTR [rax+8]
mov rax, QWORD PTR [rax+16]
cmp rax, rdi
jg .L6
mov r8, rax
xor edx, edx
.L2:
mov rax, r8
ret
.L6:
cmp rdx, rdi
mov r8, rdi
cmovge r8, rdx
cmp rax, r8
je .L4
xor eax, eax
cmp rdx, rdi
cmovl rax, rsi
mov rdx, rax
mov rax, r8
ret
.L4:
xor edx, edx
jmp .L2
In this case, we actually gain a branch, unfortunately, because the
compiler's replacement axioms no longer as cleanly apply.
So all and all, this change is a bit of a mixed bag.
Link: https://lkml.kernel.org/r/20220926133435.1333846-2-Jason@zx2c4.com
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-09-26 21:34:35 +08:00
|
|
|
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
|
2022-09-26 21:34:34 +08:00
|
|
|
|
2023-09-18 16:17:57 +08:00
|
|
|
#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
|
|
|
|
typeof(val) unique_val = (val); \
|
|
|
|
typeof(lo) unique_lo = (lo); \
|
|
|
|
typeof(hi) unique_hi = (hi); \
|
|
|
|
static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
|
|
|
|
(lo) <= (hi), true), \
|
|
|
|
"clamp() low limit " #lo " greater than high limit " #hi); \
|
|
|
|
static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
|
|
|
|
static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
|
|
|
|
__clamp(unique_val, unique_lo, unique_hi); })
|
2022-09-26 21:34:34 +08:00
|
|
|
|
|
|
|
#define __careful_clamp(val, lo, hi) ({ \
|
2023-09-18 16:17:15 +08:00
|
|
|
__builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
|
2022-09-26 21:34:34 +08:00
|
|
|
__clamp(val, lo, hi), \
|
|
|
|
__clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
|
|
|
|
__UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* min - return minimum of two values of the same or compatible types
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
2023-09-18 16:17:15 +08:00
|
|
|
#define min(x, y) __careful_cmp(min, x, y)
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* max - return maximum of two values of the same or compatible types
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
2023-09-18 16:17:15 +08:00
|
|
|
#define max(x, y) __careful_cmp(max, x, y)
|
2020-10-16 11:10:21 +08:00
|
|
|
|
minmax: add umin(a, b) and umax(a, b)
commit 80fcac55385ccb710d33a20dc1caaef29bd5a921 upstream.
Patch series "minmax: Relax type checks in min() and max()", v4.
The min() (etc) functions in minmax.h require that the arguments have
exactly the same types.
However when the type check fails, rather than look at the types and fix
the type of a variable/constant, everyone seems to jump on min_t(). In
reality min_t() ought to be rare - when something unusual is being done,
not normality.
The orginal min() (added in 2.4.9) replaced several inline functions and
included the type - so matched the implicit casting of the function call.
This was renamed min_t() in 2.4.10 and the current min() added. There is
no actual indication that the conversion of negatve values to large
unsigned values has ever been an actual problem.
A quick grep shows 5734 min() and 4597 min_t(). Having the casts on
almost half of the calls shows that something is clearly wrong.
If the wrong type is picked (and it is far too easy to pick the type of
the result instead of the larger input) then significant bits can get
discarded.
Pretty much the worst example is in the derived clamp_val(), consider:
unsigned char x = 200u;
y = clamp_val(x, 10u, 300u);
I also suspect that many of the min_t(u16, ...) are actually wrong. For
example copy_data() in printk_ringbuffer.c contains:
data_size = min_t(u16, buf_size, len);
Here buf_size is 'unsigned int' and len 'u16', pass a 64k buffer (can you
prove that doesn't happen?) and no data is returned. Apparantly it did -
and has since been fixed.
The only reason that most of the min_t() are 'fine' is that pretty much
all the values in the kernel are between 0 and INT_MAX.
Patch 1 adds umin(), this uses integer promotions to convert both
arguments to 'unsigned long long'. It can be used to compare a signed
type that is known to contain a non-negative value with an unsigned type.
The compiler typically optimises it all away. Added first so that it can
be referred to in patch 2.
Patch 2 replaces the 'same type' check with a 'same signedness' one. This
makes min(unsigned_int_var, sizeof()) be ok. The error message is also
improved and will contain the expanded form of both arguments (useful for
seeing how constants are defined).
Patch 3 just fixes some whitespace.
Patch 4 allows comparisons of 'unsigned char' and 'unsigned short' to
signed types. The integer promotion rules convert them both to 'signed
int' prior to the comparison so they can never cause a negative value be
converted to a large positive one.
Patch 5 (rewritted for v4) allows comparisons of unsigned values against
non-negative constant integer expressions. This makes
min(unsigned_int_var, 4) be ok.
The only common case that is still errored is the comparison of signed
values against unsigned constant integer expressions below __INT_MAX__.
Typcally min(int_val, sizeof (foo)), the real fix for this is casting the
constant: min(int_var, (int)sizeof (foo)).
With all the patches applied pretty much all the min_t() could be replaced
by min(), and most of the rest by umin(). However they all need careful
inspection due to code like:
sz = min_t(unsigned char, sz - 1, LIM - 1) + 1;
which converts 0 to LIM.
This patch (of 6):
umin() and umax() can be used when min()/max() errors a signed v unsigned
compare when the signed value is known to be non-negative.
Unlike min_t(some_unsigned_type, a, b) umin() will never mask off high
bits if an inappropriate type is selected.
The '+ 0u + 0ul + 0ull' may look strange.
The '+ 0u' is needed for 'signed int' on 64bit systems.
The '+ 0ul' is needed for 'signed long' on 32bit systems.
The '+ 0ull' is needed for 'signed long long'.
Link: https://lkml.kernel.org/r/b97faef60ad24922b530241c5d7c933c@AcuMS.aculab.com
Link: https://lkml.kernel.org/r/41d93ca827a248698ec64bf57e0c05a5@AcuMS.aculab.com
Signed-off-by: David Laight <david.laight@aculab.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-09-18 16:16:30 +08:00
|
|
|
/**
|
|
|
|
* umin - return minimum of two non-negative values
|
|
|
|
* Signed types are zero extended to match a larger unsigned type.
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define umin(x, y) \
|
2023-09-18 16:17:15 +08:00
|
|
|
__careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
|
minmax: add umin(a, b) and umax(a, b)
commit 80fcac55385ccb710d33a20dc1caaef29bd5a921 upstream.
Patch series "minmax: Relax type checks in min() and max()", v4.
The min() (etc) functions in minmax.h require that the arguments have
exactly the same types.
However when the type check fails, rather than look at the types and fix
the type of a variable/constant, everyone seems to jump on min_t(). In
reality min_t() ought to be rare - when something unusual is being done,
not normality.
The orginal min() (added in 2.4.9) replaced several inline functions and
included the type - so matched the implicit casting of the function call.
This was renamed min_t() in 2.4.10 and the current min() added. There is
no actual indication that the conversion of negatve values to large
unsigned values has ever been an actual problem.
A quick grep shows 5734 min() and 4597 min_t(). Having the casts on
almost half of the calls shows that something is clearly wrong.
If the wrong type is picked (and it is far too easy to pick the type of
the result instead of the larger input) then significant bits can get
discarded.
Pretty much the worst example is in the derived clamp_val(), consider:
unsigned char x = 200u;
y = clamp_val(x, 10u, 300u);
I also suspect that many of the min_t(u16, ...) are actually wrong. For
example copy_data() in printk_ringbuffer.c contains:
data_size = min_t(u16, buf_size, len);
Here buf_size is 'unsigned int' and len 'u16', pass a 64k buffer (can you
prove that doesn't happen?) and no data is returned. Apparantly it did -
and has since been fixed.
The only reason that most of the min_t() are 'fine' is that pretty much
all the values in the kernel are between 0 and INT_MAX.
Patch 1 adds umin(), this uses integer promotions to convert both
arguments to 'unsigned long long'. It can be used to compare a signed
type that is known to contain a non-negative value with an unsigned type.
The compiler typically optimises it all away. Added first so that it can
be referred to in patch 2.
Patch 2 replaces the 'same type' check with a 'same signedness' one. This
makes min(unsigned_int_var, sizeof()) be ok. The error message is also
improved and will contain the expanded form of both arguments (useful for
seeing how constants are defined).
Patch 3 just fixes some whitespace.
Patch 4 allows comparisons of 'unsigned char' and 'unsigned short' to
signed types. The integer promotion rules convert them both to 'signed
int' prior to the comparison so they can never cause a negative value be
converted to a large positive one.
Patch 5 (rewritted for v4) allows comparisons of unsigned values against
non-negative constant integer expressions. This makes
min(unsigned_int_var, 4) be ok.
The only common case that is still errored is the comparison of signed
values against unsigned constant integer expressions below __INT_MAX__.
Typcally min(int_val, sizeof (foo)), the real fix for this is casting the
constant: min(int_var, (int)sizeof (foo)).
With all the patches applied pretty much all the min_t() could be replaced
by min(), and most of the rest by umin(). However they all need careful
inspection due to code like:
sz = min_t(unsigned char, sz - 1, LIM - 1) + 1;
which converts 0 to LIM.
This patch (of 6):
umin() and umax() can be used when min()/max() errors a signed v unsigned
compare when the signed value is known to be non-negative.
Unlike min_t(some_unsigned_type, a, b) umin() will never mask off high
bits if an inappropriate type is selected.
The '+ 0u + 0ul + 0ull' may look strange.
The '+ 0u' is needed for 'signed int' on 64bit systems.
The '+ 0ul' is needed for 'signed long' on 32bit systems.
The '+ 0ull' is needed for 'signed long long'.
Link: https://lkml.kernel.org/r/b97faef60ad24922b530241c5d7c933c@AcuMS.aculab.com
Link: https://lkml.kernel.org/r/41d93ca827a248698ec64bf57e0c05a5@AcuMS.aculab.com
Signed-off-by: David Laight <david.laight@aculab.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-09-18 16:16:30 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* umax - return maximum of two non-negative values
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
|
|
|
#define umax(x, y) \
|
2023-09-18 16:17:15 +08:00
|
|
|
__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
|
minmax: add umin(a, b) and umax(a, b)
commit 80fcac55385ccb710d33a20dc1caaef29bd5a921 upstream.
Patch series "minmax: Relax type checks in min() and max()", v4.
The min() (etc) functions in minmax.h require that the arguments have
exactly the same types.
However when the type check fails, rather than look at the types and fix
the type of a variable/constant, everyone seems to jump on min_t(). In
reality min_t() ought to be rare - when something unusual is being done,
not normality.
The orginal min() (added in 2.4.9) replaced several inline functions and
included the type - so matched the implicit casting of the function call.
This was renamed min_t() in 2.4.10 and the current min() added. There is
no actual indication that the conversion of negatve values to large
unsigned values has ever been an actual problem.
A quick grep shows 5734 min() and 4597 min_t(). Having the casts on
almost half of the calls shows that something is clearly wrong.
If the wrong type is picked (and it is far too easy to pick the type of
the result instead of the larger input) then significant bits can get
discarded.
Pretty much the worst example is in the derived clamp_val(), consider:
unsigned char x = 200u;
y = clamp_val(x, 10u, 300u);
I also suspect that many of the min_t(u16, ...) are actually wrong. For
example copy_data() in printk_ringbuffer.c contains:
data_size = min_t(u16, buf_size, len);
Here buf_size is 'unsigned int' and len 'u16', pass a 64k buffer (can you
prove that doesn't happen?) and no data is returned. Apparantly it did -
and has since been fixed.
The only reason that most of the min_t() are 'fine' is that pretty much
all the values in the kernel are between 0 and INT_MAX.
Patch 1 adds umin(), this uses integer promotions to convert both
arguments to 'unsigned long long'. It can be used to compare a signed
type that is known to contain a non-negative value with an unsigned type.
The compiler typically optimises it all away. Added first so that it can
be referred to in patch 2.
Patch 2 replaces the 'same type' check with a 'same signedness' one. This
makes min(unsigned_int_var, sizeof()) be ok. The error message is also
improved and will contain the expanded form of both arguments (useful for
seeing how constants are defined).
Patch 3 just fixes some whitespace.
Patch 4 allows comparisons of 'unsigned char' and 'unsigned short' to
signed types. The integer promotion rules convert them both to 'signed
int' prior to the comparison so they can never cause a negative value be
converted to a large positive one.
Patch 5 (rewritted for v4) allows comparisons of unsigned values against
non-negative constant integer expressions. This makes
min(unsigned_int_var, 4) be ok.
The only common case that is still errored is the comparison of signed
values against unsigned constant integer expressions below __INT_MAX__.
Typcally min(int_val, sizeof (foo)), the real fix for this is casting the
constant: min(int_var, (int)sizeof (foo)).
With all the patches applied pretty much all the min_t() could be replaced
by min(), and most of the rest by umin(). However they all need careful
inspection due to code like:
sz = min_t(unsigned char, sz - 1, LIM - 1) + 1;
which converts 0 to LIM.
This patch (of 6):
umin() and umax() can be used when min()/max() errors a signed v unsigned
compare when the signed value is known to be non-negative.
Unlike min_t(some_unsigned_type, a, b) umin() will never mask off high
bits if an inappropriate type is selected.
The '+ 0u + 0ul + 0ull' may look strange.
The '+ 0u' is needed for 'signed int' on 64bit systems.
The '+ 0ul' is needed for 'signed long' on 32bit systems.
The '+ 0ull' is needed for 'signed long long'.
Link: https://lkml.kernel.org/r/b97faef60ad24922b530241c5d7c933c@AcuMS.aculab.com
Link: https://lkml.kernel.org/r/41d93ca827a248698ec64bf57e0c05a5@AcuMS.aculab.com
Signed-off-by: David Laight <david.laight@aculab.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-09-18 16:16:30 +08:00
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* min3 - return minimum of three values
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
* @z: third value
|
|
|
|
*/
|
|
|
|
#define min3(x, y, z) min((typeof(x))min(x, y), z)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max3 - return maximum of three values
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
* @z: third value
|
|
|
|
*/
|
|
|
|
#define max3(x, y, z) max((typeof(x))max(x, y), z)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
|
|
|
|
* @x: value1
|
|
|
|
* @y: value2
|
|
|
|
*/
|
|
|
|
#define min_not_zero(x, y) ({ \
|
|
|
|
typeof(x) __x = (x); \
|
|
|
|
typeof(y) __y = (y); \
|
|
|
|
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
|
|
|
|
|
|
|
|
/**
|
|
|
|
* clamp - return a value clamped to a given range with strict typechecking
|
|
|
|
* @val: current value
|
|
|
|
* @lo: lowest allowable value
|
|
|
|
* @hi: highest allowable value
|
|
|
|
*
|
|
|
|
* This macro does strict typechecking of @lo/@hi to make sure they are of the
|
|
|
|
* same type as @val. See the unnecessary pointer comparisons.
|
|
|
|
*/
|
2022-09-26 21:34:34 +08:00
|
|
|
#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ..and if you can't take the strict
|
|
|
|
* types, you can specify one yourself.
|
|
|
|
*
|
|
|
|
* Or not use min/max/clamp at all, of course.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_t - return minimum of two values, using the specified type
|
|
|
|
* @type: data type to use
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
2023-09-18 16:17:15 +08:00
|
|
|
#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* max_t - return maximum of two values, using the specified type
|
|
|
|
* @type: data type to use
|
|
|
|
* @x: first value
|
|
|
|
* @y: second value
|
|
|
|
*/
|
2023-09-18 16:17:15 +08:00
|
|
|
#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
2023-06-23 16:58:24 +08:00
|
|
|
/*
|
|
|
|
* Do not check the array parameter using __must_be_array().
|
|
|
|
* In the following legit use-case where the "array" passed is a simple pointer,
|
|
|
|
* __must_be_array() will return a failure.
|
|
|
|
* --- 8< ---
|
|
|
|
* int *buff
|
|
|
|
* ...
|
|
|
|
* min = min_array(buff, nb_items);
|
|
|
|
* --- 8< ---
|
|
|
|
*
|
|
|
|
* The first typeof(&(array)[0]) is needed in order to support arrays of both
|
|
|
|
* 'int *buff' and 'int buff[N]' types.
|
|
|
|
*
|
|
|
|
* The array can be an array of const items.
|
2023-09-11 23:49:13 +08:00
|
|
|
* typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
|
2023-06-23 16:58:24 +08:00
|
|
|
* to discard the const qualifier for the __element variable.
|
|
|
|
*/
|
|
|
|
#define __minmax_array(op, array, len) ({ \
|
|
|
|
typeof(&(array)[0]) __array = (array); \
|
|
|
|
typeof(len) __len = (len); \
|
2023-09-11 23:49:13 +08:00
|
|
|
__unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
|
2023-06-23 16:58:24 +08:00
|
|
|
while (__len--) \
|
|
|
|
__element = op(__element, __array[__len]); \
|
|
|
|
__element; })
|
|
|
|
|
|
|
|
/**
|
|
|
|
* min_array - return minimum of values present in an array
|
|
|
|
* @array: array
|
|
|
|
* @len: array length
|
|
|
|
*
|
|
|
|
* Note that @len must not be zero (empty array).
|
|
|
|
*/
|
|
|
|
#define min_array(array, len) __minmax_array(min, array, len)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* max_array - return maximum of values present in an array
|
|
|
|
* @array: array
|
|
|
|
* @len: array length
|
|
|
|
*
|
|
|
|
* Note that @len must not be zero (empty array).
|
|
|
|
*/
|
|
|
|
#define max_array(array, len) __minmax_array(max, array, len)
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* clamp_t - return a value clamped to a given range using a given type
|
|
|
|
* @type: the type of variable to use
|
|
|
|
* @val: current value
|
|
|
|
* @lo: minimum allowable value
|
|
|
|
* @hi: maximum allowable value
|
|
|
|
*
|
|
|
|
* This macro does no typechecking and uses temporary variables of type
|
|
|
|
* @type to make all the comparisons.
|
|
|
|
*/
|
2022-09-26 21:34:34 +08:00
|
|
|
#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
|
2020-10-16 11:10:21 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* clamp_val - return a value clamped to a given range using val's type
|
|
|
|
* @val: current value
|
|
|
|
* @lo: minimum allowable value
|
|
|
|
* @hi: maximum allowable value
|
|
|
|
*
|
|
|
|
* This macro does no typechecking and uses temporary variables of whatever
|
|
|
|
* type the input argument @val is. This is useful when @val is an unsigned
|
|
|
|
* type and @lo and @hi are literals that will otherwise be assigned a signed
|
|
|
|
* integer type.
|
|
|
|
*/
|
|
|
|
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
|
|
|
|
|
minmax: add in_range() macro
Patch series "New page table range API", v6.
This patchset changes the API used by the MM to set up page table entries.
The four APIs are:
set_ptes(mm, addr, ptep, pte, nr)
update_mmu_cache_range(vma, addr, ptep, nr)
flush_dcache_folio(folio)
flush_icache_pages(vma, page, nr)
flush_dcache_folio() isn't technically new, but no architecture
implemented it, so I've done that for them. The old APIs remain around
but are mostly implemented by calling the new interfaces.
The new APIs are based around setting up N page table entries at once.
The N entries belong to the same PMD, the same folio and the same VMA, so
ptep++ is a legitimate operation, and locking is taken care of for you.
Some architectures can do a better job of it than just a loop, but I have
hesitated to make too deep a change to architectures I don't understand
well.
One thing I have changed in every architecture is that PG_arch_1 is now a
per-folio bit instead of a per-page bit when used for dcache clean/dirty
tracking. This was something that would have to happen eventually, and it
makes sense to do it now rather than iterate over every page involved in a
cache flush and figure out if it needs to happen.
The point of all this is better performance, and Fengwei Yin has measured
improvement on x86. I suspect you'll see improvement on your architecture
too. Try the new will-it-scale test mentioned here:
https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/
You'll need to run it on an XFS filesystem and have
CONFIG_TRANSPARENT_HUGEPAGE set.
This patchset is the basis for much of the anonymous large folio work
being done by Ryan, so it's received quite a lot of testing over the last
few months.
This patch (of 38):
Determine if a value lies within a range more efficiently (subtraction +
comparison vs two comparisons and an AND). It also has useful (under some
circumstances) behaviour if the range exceeds the maximum value of the
type. Convert all the conflicting definitions of in_range() within the
kernel; some can use the generic definition while others need their own
definition.
Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-02 23:13:29 +08:00
|
|
|
static inline bool in_range64(u64 val, u64 start, u64 len)
|
|
|
|
{
|
|
|
|
return (val - start) < len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool in_range32(u32 val, u32 start, u32 len)
|
|
|
|
{
|
|
|
|
return (val - start) < len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* in_range - Determine if a value lies within a range.
|
|
|
|
* @val: Value to test.
|
|
|
|
* @start: First value in range.
|
|
|
|
* @len: Number of values in range.
|
|
|
|
*
|
|
|
|
* This is more efficient than "if (start <= val && val < (start + len))".
|
|
|
|
* It also gives a different answer if @start + @len overflows the size of
|
|
|
|
* the type by a sufficient amount to encompass @val. Decide for yourself
|
|
|
|
* which behaviour you want, or prove that start + len never overflow.
|
|
|
|
* Do not blindly replace one form with the other.
|
|
|
|
*/
|
|
|
|
#define in_range(val, start, len) \
|
|
|
|
((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
|
|
|
|
in_range32(val, start, len) : in_range64(val, start, len))
|
|
|
|
|
2020-10-16 11:10:21 +08:00
|
|
|
/**
|
|
|
|
* swap - swap values of @a and @b
|
|
|
|
* @a: first value
|
|
|
|
* @b: second value
|
|
|
|
*/
|
|
|
|
#define swap(a, b) \
|
|
|
|
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
|
|
|
|
|
|
|
#endif /* _LINUX_MINMAX_H */
|