Bitmap patches for v6.0-rc1
This branch consists of: Qu Wenruo: lib: bitmap: fix the duplicated comments on bitmap_to_arr64() https://lore.kernel.org/lkml/0d85e1dbad52ad7fb5787c4432bdb36cbd24f632.1656063005.git.wqu@suse.com/ Alexander Lobakin: bitops: let optimize out non-atomic bitops on compile-time constants https://lore.kernel.org/lkml/20220624121313.2382500-1-alexandr.lobakin@intel.com/T/ Yury Norov: lib: cleanup bitmap-related headers https://lore.kernel.org/linux-arm-kernel/YtCVeOGLiQ4gNPSf@yury-laptop/T/#m305522194c4d38edfdaffa71fcaaf2e2ca00a961 Alexander Lobakin: x86/olpc: fix 'logical not is only applied to the left hand side' https://www.spinics.net/lists/kernel/msg4440064.html Yury Norov: lib/nodemask: inline wrappers around bitmap https://lore.kernel.org/all/20220723214537.2054208-1-yury.norov@gmail.com/ -----BEGIN PGP SIGNATURE----- iQGzBAABCgAdFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmLpVvwACgkQsUSA/Tof vsiAHgwAwS9pl8GJ+fKYnue2CYo9349d2oT6BBUs/Rv8uqYEa4QkpYsR7NS733TG pos0hhoRvSOzrUP4qppXUjfJ+NkzLgpnKFOeWfFoNAKlHuaaMRvF3Y0Q/P8g0/Kg HPWcCQLHyCH9Wjs3e2TTgRjxTrHuruD2VJ401/PX/lw0DicUhmev5mUFa10uwFkP ZJRprjoFn9HJ0Hk16pFZDi36d3YumhACOcWRiJdoBDrEPV3S6lm9EeOy/yHBNp5k 9bKj+RboeT2t70KaZcKv+M5j1nu0cAhl7kRkjcxcmGyimI0l82Vgq9yFxhGqvWg8 RnCrJ5EaO08FGCAKG9GEwzdiNa24Gdq5XZSpQA7JZHmhmchpnnlNenJicyv0gOQi abChZeWSEsyA+78l2+kk9nezfVKUOnKDEZQxBVTOyWsmZYxHZV94oam340VjQDaY 4/fETdOy/qqPIxnpxAeFGWxZjcVaYiYPLj7KLPMsB0aAAF7pZrem465vSfgbrE81 +gCdqrWd =4dTW -----END PGP SIGNATURE----- Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux Pull bitmap updates from Yury Norov: - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo) - optimize out non-atomic bitops on compile-time constants (Alexander Lobakin) - cleanup bitmap-related headers (Yury Norov) - x86/olpc: fix 'logical not is only applied to the left hand side' (Alexander Lobakin) - lib/nodemask: inline wrappers around bitmap (Yury Norov) * tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits) lib/nodemask: inline next_node_in() and node_random() powerpc: drop dependency on <asm/machdep.h> in archrandom.h x86/olpc: fix 'logical not is only applied to the left hand side' lib/cpumask: move some one-line wrappers to header file headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h> headers/deps: mm: Optimize <linux/gfp.h> header dependencies lib/cpumask: move trivial wrappers around find_bit to the header lib/cpumask: change return types to unsigned where appropriate cpumask: change return types to bool where appropriate lib/bitmap: change type of bitmap_weight to unsigned long lib/bitmap: change return types to bool where appropriate arm: align find_bit declarations with generic kernel iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE) lib/test_bitmap: test the tail after bitmap_to_arr64() lib/bitmap: fix off-by-one in bitmap_to_arr64() lib: test_bitmap: add compile-time optimization/evaluations assertions bitmap: don't assume compiler evaluates small mem*() builtins calls net/ice: fix initializing the bitmap in the switch code bitops: let optimize out non-atomic bitops on compile-time constants ...
This commit is contained in:
commit
4e23eeebb2
|
@ -22,16 +22,16 @@ Memory Allocation Controls
|
|||
.. kernel-doc:: include/linux/gfp.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Page mobility and placement hints
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Watermark modifiers
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Reclaim modifiers
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Useful GFP flag combinations
|
||||
|
||||
The Slab Cache
|
||||
|
|
|
@ -3603,7 +3603,6 @@ F: lib/bitmap.c
|
|||
F: lib/cpumask.c
|
||||
F: lib/find_bit.c
|
||||
F: lib/find_bit_benchmark.c
|
||||
F: lib/nodemask.c
|
||||
F: lib/test_bitmap.c
|
||||
F: tools/include/linux/bitmap.h
|
||||
F: tools/include/linux/find.h
|
||||
|
@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org
|
|||
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
|
||||
F: include/linux/gfp.h
|
||||
F: include/linux/gfp_types.h
|
||||
F: include/linux/memory_hotplug.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mmzone.h
|
||||
|
|
|
@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline void
|
||||
__set_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
|
@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
|
@ -94,7 +94,7 @@ static inline void
|
|||
__clear_bit_unlock(unsigned long nr, volatile void * addr)
|
||||
{
|
||||
smp_mb();
|
||||
__clear_bit(nr, addr);
|
||||
arch___clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ void
|
||||
__change_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
|
@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline int
|
||||
__test_and_set_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline int
|
||||
__test_and_clear_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
|
|||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_change_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
|
|||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_bit(int nr, const volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
|
||||
}
|
||||
|
@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
|
|||
return __ffs(tmp) + ofs;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
|
|
@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
|
|||
/*
|
||||
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
|
||||
*/
|
||||
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
|
||||
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
|
||||
unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_zero_bit_le(const unsigned long *p,
|
||||
unsigned long size, unsigned long offset);
|
||||
unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
|
||||
|
||||
/*
|
||||
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
|
||||
*/
|
||||
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
|
||||
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
|
||||
unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_zero_bit_be(const unsigned long *p,
|
||||
unsigned long size, unsigned long offset);
|
||||
unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
|
|
|
@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
|
|||
* be atomic, particularly for things like slab_lock and slab_unlock.
|
||||
*
|
||||
*/
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
/* Apparently, at least some of these are allowed to be non-atomic */
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_bit(int nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
int retval;
|
||||
|
||||
|
@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
|
|||
return retval;
|
||||
}
|
||||
|
||||
#define test_bit(nr, addr) __test_bit(nr, addr)
|
||||
|
||||
/*
|
||||
* ffz - find first zero in word.
|
||||
* @word: The word to search
|
||||
|
@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
|
|||
}
|
||||
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
|
|
@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* arch___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__set_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __clear_bit - Clears a bit in memory (non-atomic version)
|
||||
* arch___clear_bit - Clears a bit in memory (non-atomic version)
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* arch___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to toggle
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__change_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
|
|||
#define test_and_set_bit_lock test_and_set_bit
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* arch___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_set_bit (int nr, volatile void *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
|
@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* arch___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_clear_bit(int nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
|
@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_change_bit - Change a bit and return its old value
|
||||
* arch___test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_change_bit (int nr, void *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 old, bit = (1 << (nr & 31));
|
||||
__u32 *m = (__u32 *) addr + (nr >> 5);
|
||||
|
@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
|
|||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int
|
||||
test_bit (int nr, const volatile void *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
|
||||
}
|
||||
|
@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
|
|
@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
|
|||
{
|
||||
unsigned int reg = vector / 64;
|
||||
unsigned int bit = vector % 64;
|
||||
u64 irr;
|
||||
unsigned long irr;
|
||||
|
||||
switch (reg) {
|
||||
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
|
||||
|
|
|
@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
|
|||
bfset_mem_set_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
|
||||
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
|
||||
{
|
||||
|
@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
|
|||
bfclr_mem_clear_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
|
||||
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
|
||||
{
|
||||
|
@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
|
|||
bfchg_mem_change_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
|
||||
|
||||
|
||||
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
}
|
||||
|
||||
static inline int bset_reg_test_and_set_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
|
@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr,
|
|||
bfset_mem_test_and_set_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int bclr_reg_test_and_clear_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
|
@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
|
|||
bfclr_mem_test_and_clear_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int bchg_reg_test_and_change_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
|
@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
|
|||
bfchg_mem_test_and_change_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* The true 68020 and more advanced processors support the "bfffo"
|
||||
|
@ -522,6 +540,7 @@ static inline unsigned long __fls(unsigned long x)
|
|||
#define clear_bit_unlock clear_bit
|
||||
#define __clear_bit_unlock clear_bit_unlock
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
#include <asm-generic/bitops/ext2-atomic.h>
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
|
|
@ -2,19 +2,12 @@
|
|||
#ifndef _ASM_POWERPC_ARCHRANDOM_H
|
||||
#define _ASM_POWERPC_ARCHRANDOM_H
|
||||
|
||||
#include <asm/machdep.h>
|
||||
|
||||
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
|
||||
|
||||
#ifdef CONFIG_PPC_POWERNV
|
||||
int pnv_get_random_long(unsigned long *v);
|
||||
|
|
|
@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off);
|
|||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL_GPL(pm_power_off);
|
||||
|
||||
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_get_random_seed_longs);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
machine_shutdown();
|
||||
|
|
|
@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
|
|||
return old & mask;
|
||||
}
|
||||
|
||||
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr |= mask;
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static inline void arch___clear_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr &= ~mask;
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
static inline void arch___change_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr ^= mask;
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_set_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr |= mask;
|
||||
old = *p;
|
||||
*p |= mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_clear_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr &= ~mask;
|
||||
old = *p;
|
||||
*p &= ~mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_change_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr ^= mask;
|
||||
old = *p;
|
||||
*p ^= mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch_test_bit(unsigned long nr,
|
||||
const volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const volatile unsigned long *addr = __bitops_word(nr, ptr);
|
||||
const volatile unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
return *addr & mask;
|
||||
return *p & mask;
|
||||
}
|
||||
|
||||
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef __ASM_SH_BITOPS_OP32_H
|
||||
#define __ASM_SH_BITOPS_OP32_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
/*
|
||||
* The bit modifying instructions on SH-2A are only capable of working
|
||||
* with a 3-bit immediate, which signifies the shift position for the bit
|
||||
|
@ -16,7 +18,8 @@
|
|||
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
|
||||
#endif
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
|
@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
|
@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* arch___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
|
@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* arch___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* arch___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr,
|
|||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* arch_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_OP32_H */
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask);
|
||||
|
||||
/*
|
||||
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
|
||||
|
@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___set_bit(ADDR, mask) != 0;
|
||||
return sp32___set_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___set_bit(ADDR, mask);
|
||||
(void) sp32___set_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___clear_bit(ADDR, mask) != 0;
|
||||
return sp32___clear_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___clear_bit(ADDR, mask);
|
||||
(void) sp32___clear_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___change_bit(ADDR, mask) != 0;
|
||||
return sp32___change_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
|||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___change_bit(ADDR, mask);
|
||||
(void) sp32___change_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
|
|
@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i)
|
|||
}
|
||||
EXPORT_SYMBOL(arch_atomic_set);
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
|
@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
|||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___set_bit);
|
||||
EXPORT_SYMBOL(sp32___set_bit);
|
||||
|
||||
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
|
@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
|||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___clear_bit);
|
||||
EXPORT_SYMBOL(sp32___clear_bit);
|
||||
|
||||
unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
|
@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
|||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___change_bit);
|
||||
EXPORT_SYMBOL(sp32___change_bit);
|
||||
|
||||
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
|
||||
{
|
||||
|
|
|
@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___set_bit(long nr, volatile unsigned long *addr)
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___clear_bit(long nr, volatile unsigned long *addr)
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___change_bit(long nr, volatile unsigned long *addr)
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
|
@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
|||
* this without also updating arch/x86/kernel/kvm.c
|
||||
*/
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
|
@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
|
@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
|
|||
return oldbit;
|
||||
}
|
||||
|
||||
#define arch_test_bit(nr, addr) \
|
||||
(__builtin_constant_p((nr)) \
|
||||
? constant_test_bit((nr), (addr)) \
|
||||
: variable_test_bit((nr), (addr)))
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
|
|
|
@ -80,7 +80,7 @@ static void send_ebook_state(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state)
|
||||
if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
|
||||
return; /* Nothing new to report. */
|
||||
|
||||
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
|
||||
|
|
|
@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
|
|||
if (drhd->reg_base_addr == rhsa->base_address) {
|
||||
int node = pxm_to_node(rhsa->proximity_domain);
|
||||
|
||||
if (!node_online(node))
|
||||
if (node != NUMA_NO_NODE && !node_online(node))
|
||||
node = NUMA_NO_NODE;
|
||||
drhd->iommu->node = node;
|
||||
return 0;
|
||||
|
|
|
@ -4971,7 +4971,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
|
|||
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
|
||||
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
|
||||
|
||||
bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
|
||||
bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
|
||||
|
||||
/* For each profile we are going to associate the recipe with, add the
|
||||
* recipes that are associated with that profile. This will give us
|
||||
|
|
|
@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
|||
|
||||
field = min(
|
||||
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
|
||||
dev->caps.num_ports);
|
||||
(unsigned int) dev->caps.num_ports);
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
|
||||
|
||||
size = dev->caps.function_caps; /* set PF behaviours */
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
|
||||
#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic definitions for bit operations, should not be used in regular code
|
||||
* directly.
|
||||
*/
|
||||
|
||||
/**
|
||||
* generic___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline void
|
||||
generic___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline void
|
||||
generic___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static __always_inline bool
|
||||
generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
/*
|
||||
* Unlike the bitops with the '__' prefix above, this one *is* atomic,
|
||||
* so `volatile` must always stay here with no cast-aways. See
|
||||
* `Documentation/atomic_bitops.txt` for the details.
|
||||
*/
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
/*
|
||||
* const_*() definitions provide good compile-time optimizations when
|
||||
* the passed arguments can be resolved at compile time.
|
||||
*/
|
||||
#define const___set_bit generic___set_bit
|
||||
#define const___clear_bit generic___clear_bit
|
||||
#define const___change_bit generic___change_bit
|
||||
#define const___test_and_set_bit generic___test_and_set_bit
|
||||
#define const___test_and_clear_bit generic___test_and_clear_bit
|
||||
#define const___test_and_change_bit generic___test_and_change_bit
|
||||
|
||||
/**
|
||||
* const_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* A version of generic_test_bit() which discards the `volatile` qualifier to
|
||||
* allow a compiler to optimize code harder. Non-atomic and to be called only
|
||||
* for testing compile-time constants, e.g. by the corresponding macros, not
|
||||
* directly from "regular" code.
|
||||
*/
|
||||
static __always_inline bool
|
||||
const_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long val = *p;
|
||||
|
||||
return !!(val & mask);
|
||||
}
|
||||
|
||||
#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/instrumented.h>
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* ___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -22,14 +22,15 @@
|
|||
* region of memory concurrently, the effect may be that only one operation
|
||||
* succeeds.
|
||||
*/
|
||||
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch___set_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __clear_bit - Clears a bit in memory
|
||||
* ___clear_bit - Clears a bit in memory
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -37,14 +38,15 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
|
|||
* region of memory concurrently, the effect may be that only one operation
|
||||
* succeeds.
|
||||
*/
|
||||
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch___clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* ___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
|||
* region of memory concurrently, the effect may be that only one operation
|
||||
* succeeds.
|
||||
*/
|
||||
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch___change_bit(nr, addr);
|
||||
|
@ -83,53 +86,57 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* ___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic. If two instances of this operation race, one
|
||||
* can appear to succeed but actually fail.
|
||||
*/
|
||||
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__instrument_read_write_bitop(nr, addr);
|
||||
return arch___test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* ___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic. If two instances of this operation race, one
|
||||
* can appear to succeed but actually fail.
|
||||
*/
|
||||
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__instrument_read_write_bitop(nr, addr);
|
||||
return arch___test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_change_bit - Change a bit and return its old value
|
||||
* ___test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic. If two instances of this operation race, one
|
||||
* can appear to succeed but actually fail.
|
||||
*/
|
||||
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__instrument_read_write_bitop(nr, addr);
|
||||
return arch___test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* _test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline bool test_bit(long nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_bit(nr, addr);
|
||||
|
|
|
@ -2,121 +2,18 @@
|
|||
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm-generic/bitops/generic-non-atomic.h>
|
||||
|
||||
/**
|
||||
* arch___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
#define arch___set_bit generic___set_bit
|
||||
#define arch___clear_bit generic___clear_bit
|
||||
#define arch___change_bit generic___change_bit
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
#define __set_bit arch___set_bit
|
||||
#define arch___test_and_set_bit generic___test_and_set_bit
|
||||
#define arch___test_and_clear_bit generic___test_and_clear_bit
|
||||
#define arch___test_and_change_bit generic___test_and_change_bit
|
||||
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
#define arch_test_bit generic_test_bit
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
#define __clear_bit arch___clear_bit
|
||||
|
||||
/**
|
||||
* arch___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline
|
||||
void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
#define __change_bit arch___change_bit
|
||||
|
||||
/**
|
||||
* arch___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline int
|
||||
arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
#define __test_and_set_bit arch___test_and_set_bit
|
||||
|
||||
/**
|
||||
* arch___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline int
|
||||
arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
#define __test_and_clear_bit arch___test_and_clear_bit
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static __always_inline int
|
||||
arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
#define __test_and_change_bit arch___test_and_change_bit
|
||||
|
||||
/**
|
||||
* arch_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline int
|
||||
arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
#define test_bit arch_test_bit
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
|
||||
#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
|
||||
|
||||
#define ___set_bit arch___set_bit
|
||||
#define ___clear_bit arch___clear_bit
|
||||
#define ___change_bit arch___change_bit
|
||||
|
||||
#define ___test_and_set_bit arch___test_and_set_bit
|
||||
#define ___test_and_clear_bit arch___test_and_clear_bit
|
||||
#define ___test_and_change_bit arch___test_and_change_bit
|
||||
|
||||
#define _test_bit arch_test_bit
|
||||
|
||||
#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
|
|
@ -71,9 +71,9 @@ struct device;
|
|||
* bitmap_release_region(bitmap, pos, order) Free specified bit region
|
||||
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
|
||||
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
|
||||
* bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
|
||||
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
|
||||
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
|
||||
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
|
||||
* bitmap_get_value8(map, start) Get 8bit value from map at start
|
||||
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
|
||||
*
|
||||
|
@ -148,13 +148,13 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
|
|||
unsigned int shift, unsigned int nbits);
|
||||
void bitmap_cut(unsigned long *dst, const unsigned long *src,
|
||||
unsigned int first, unsigned int cut, unsigned int nbits);
|
||||
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
void __bitmap_replace(unsigned long *dst,
|
||||
const unsigned long *old, const unsigned long *new,
|
||||
|
@ -163,7 +163,7 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
|
|||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
bool __bitmap_subset(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||
void __bitmap_set(unsigned long *map, unsigned int start, int len);
|
||||
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||
|
||||
|
@ -238,20 +238,32 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
|
|||
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
|
||||
{
|
||||
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memset(dst, 0, len);
|
||||
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = 0;
|
||||
else
|
||||
memset(dst, 0, len);
|
||||
}
|
||||
|
||||
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
|
||||
{
|
||||
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memset(dst, 0xff, len);
|
||||
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = ~0UL;
|
||||
else
|
||||
memset(dst, 0xff, len);
|
||||
}
|
||||
|
||||
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
unsigned int nbits)
|
||||
{
|
||||
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memcpy(dst, src, len);
|
||||
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src;
|
||||
else
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -303,7 +315,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
|
|||
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
|
||||
#endif
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
|
@ -329,7 +341,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
|||
__bitmap_xor(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
|
@ -419,7 +431,8 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
|
|||
return find_first_zero_bit(src, nbits) == nbits;
|
||||
}
|
||||
|
||||
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
static __always_inline
|
||||
unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
|
@ -431,6 +444,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
|
|||
{
|
||||
if (__builtin_constant_p(nbits) && nbits == 1)
|
||||
__set_bit(start, map);
|
||||
else if (small_const_nbits(start + nbits))
|
||||
*map |= GENMASK(start + nbits - 1, start);
|
||||
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
|
||||
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
|
||||
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
|
||||
|
@ -445,6 +460,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
|
|||
{
|
||||
if (__builtin_constant_p(nbits) && nbits == 1)
|
||||
__clear_bit(start, map);
|
||||
else if (small_const_nbits(start + nbits))
|
||||
*map &= ~GENMASK(start + nbits - 1, start);
|
||||
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
|
||||
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
|
||||
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
|
||||
|
|
|
@ -26,12 +26,62 @@ extern unsigned int __sw_hweight16(unsigned int w);
|
|||
extern unsigned int __sw_hweight32(unsigned int w);
|
||||
extern unsigned long __sw_hweight64(__u64 w);
|
||||
|
||||
/*
|
||||
* Defined here because those may be needed by architecture-specific static
|
||||
* inlines.
|
||||
*/
|
||||
|
||||
#include <asm-generic/bitops/generic-non-atomic.h>
|
||||
|
||||
/*
|
||||
* Many architecture-specific non-atomic bitops contain inline asm code and due
|
||||
* to that the compiler can't optimize them to compile-time expressions or
|
||||
* constants. In contrary, generic_*() helpers are defined in pure C and
|
||||
* compilers optimize them just well.
|
||||
* Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
|
||||
* equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
|
||||
* the arguments can be resolved at compile time. That expression itself is a
|
||||
* constant and doesn't bring any functional changes to the rest of cases.
|
||||
* The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
|
||||
* passing a bitmap from .bss or .data (-> `!!addr` is always true).
|
||||
*/
|
||||
#define bitop(op, nr, addr) \
|
||||
((__builtin_constant_p(nr) && \
|
||||
__builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
|
||||
(uintptr_t)(addr) != (uintptr_t)NULL && \
|
||||
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
|
||||
const##op(nr, addr) : op(nr, addr))
|
||||
|
||||
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
|
||||
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
|
||||
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
|
||||
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
|
||||
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
|
||||
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
|
||||
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
|
||||
|
||||
/*
|
||||
* Include this here because some architectures need generic_ffs/fls in
|
||||
* scope
|
||||
*/
|
||||
#include <asm/bitops.h>
|
||||
|
||||
/* Check that the bitops prototypes are sane */
|
||||
#define __check_bitop_pr(name) \
|
||||
static_assert(__same_type(arch_##name, generic_##name) && \
|
||||
__same_type(const_##name, generic_##name) && \
|
||||
__same_type(_##name, generic_##name))
|
||||
|
||||
__check_bitop_pr(__set_bit);
|
||||
__check_bitop_pr(__clear_bit);
|
||||
__check_bitop_pr(__change_bit);
|
||||
__check_bitop_pr(__test_and_set_bit);
|
||||
__check_bitop_pr(__test_and_clear_bit);
|
||||
__check_bitop_pr(__test_and_change_bit);
|
||||
__check_bitop_pr(test_bit);
|
||||
|
||||
#undef __check_bitop_pr
|
||||
|
||||
static inline int get_bitmask_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/gfp_types.h>
|
||||
#include <linux/numa.h>
|
||||
|
||||
/* Don't assign or return these: may not be this big! */
|
||||
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
|
||||
|
@ -162,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
|
|||
return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
|
||||
}
|
||||
|
||||
unsigned int __pure cpumask_next(int n, const struct cpumask *srcp);
|
||||
/**
|
||||
* cpumask_next - get the next cpu in a cpumask
|
||||
* @n: the cpu prior to the place to search (ie. return will be > @n)
|
||||
* @srcp: the cpumask pointer
|
||||
*
|
||||
* Returns >= nr_cpu_ids if no further cpus set.
|
||||
*/
|
||||
static inline
|
||||
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpumask_check(n);
|
||||
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpumask_next_zero - get the next unset cpu in a cpumask
|
||||
|
@ -179,9 +195,6 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
|||
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
|
||||
}
|
||||
|
||||
int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
|
||||
int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
||||
|
||||
#if NR_CPUS == 1
|
||||
/* Uniprocessor: there is only one valid CPU */
|
||||
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
|
||||
|
@ -200,11 +213,30 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp)
|
|||
}
|
||||
#else
|
||||
unsigned int cpumask_local_spread(unsigned int i, int node);
|
||||
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p);
|
||||
int cpumask_any_distribute(const struct cpumask *srcp);
|
||||
unsigned int cpumask_any_distribute(const struct cpumask *srcp);
|
||||
#endif /* NR_CPUS */
|
||||
|
||||
/**
|
||||
* cpumask_next_and - get the next cpu in *src1p & *src2p
|
||||
* @n: the cpu prior to the place to search (ie. return will be > @n)
|
||||
* @src1p: the first cpumask pointer
|
||||
* @src2p: the second cpumask pointer
|
||||
*
|
||||
* Returns >= nr_cpu_ids if no further cpus set in both.
|
||||
*/
|
||||
static inline
|
||||
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpumask_check(n);
|
||||
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||
nr_cpumask_bits, n + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_cpu - iterate over every cpu in a mask
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
|
@ -229,7 +261,7 @@ int cpumask_any_distribute(const struct cpumask *srcp);
|
|||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
|
||||
/**
|
||||
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||
|
@ -265,6 +297,26 @@ int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
|
|||
(cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
/**
|
||||
* cpumask_any_but - return a "random" in a cpumask, but not this one.
|
||||
* @mask: the cpumask to search
|
||||
* @cpu: the cpu to ignore.
|
||||
*
|
||||
* Often used to find any cpu but smp_processor_id() in a mask.
|
||||
* Returns >= nr_cpu_ids if no cpus set.
|
||||
*/
|
||||
static inline
|
||||
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
cpumask_check(cpu);
|
||||
for_each_cpu(i, mask)
|
||||
if (i != cpu)
|
||||
break;
|
||||
return i;
|
||||
}
|
||||
|
||||
#define CPU_BITS_NONE \
|
||||
{ \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
|
||||
|
@ -311,9 +363,9 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
|||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
* @cpumask: the cpumask pointer
|
||||
*
|
||||
* Returns 1 if @cpu is set in @cpumask, else returns 0
|
||||
* Returns true if @cpu is set in @cpumask, else returns false
|
||||
*/
|
||||
static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
|
||||
static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
|
||||
{
|
||||
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
|
||||
}
|
||||
|
@ -323,11 +375,11 @@ static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpuma
|
|||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
* @cpumask: the cpumask pointer
|
||||
*
|
||||
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
|
||||
* Returns true if @cpu is set in old bitmap of @cpumask, else returns false
|
||||
*
|
||||
* test_and_set_bit wrapper for cpumasks.
|
||||
*/
|
||||
static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
|
||||
static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
|
||||
{
|
||||
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
|
||||
}
|
||||
|
@ -337,11 +389,11 @@ static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpu
|
|||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
* @cpumask: the cpumask pointer
|
||||
*
|
||||
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
|
||||
* Returns true if @cpu is set in old bitmap of @cpumask, else returns false
|
||||
*
|
||||
* test_and_clear_bit wrapper for cpumasks.
|
||||
*/
|
||||
static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
|
||||
static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
|
||||
{
|
||||
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
|
||||
}
|
||||
|
@ -370,9 +422,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
|
|||
* @src1p: the first input
|
||||
* @src2p: the second input
|
||||
*
|
||||
* If *@dstp is empty, returns 0, else returns 1
|
||||
* If *@dstp is empty, returns false, else returns true
|
||||
*/
|
||||
static inline int cpumask_and(struct cpumask *dstp,
|
||||
static inline bool cpumask_and(struct cpumask *dstp,
|
||||
const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
|
@ -413,9 +465,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
|
|||
* @src1p: the first input
|
||||
* @src2p: the second input
|
||||
*
|
||||
* If *@dstp is empty, returns 0, else returns 1
|
||||
* If *@dstp is empty, returns false, else returns true
|
||||
*/
|
||||
static inline int cpumask_andnot(struct cpumask *dstp,
|
||||
static inline bool cpumask_andnot(struct cpumask *dstp,
|
||||
const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
|
@ -478,9 +530,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
|
|||
* @src1p: the first input
|
||||
* @src2p: the second input
|
||||
*
|
||||
* Returns 1 if *@src1p is a subset of *@src2p, else returns 0
|
||||
* Returns true if *@src1p is a subset of *@src2p, else returns false
|
||||
*/
|
||||
static inline int cpumask_subset(const struct cpumask *src1p,
|
||||
static inline bool cpumask_subset(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||
|
@ -682,9 +734,35 @@ typedef struct cpumask *cpumask_var_t;
|
|||
#define __cpumask_var_read_mostly __read_mostly
|
||||
|
||||
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
|
||||
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
|
||||
|
||||
static inline
|
||||
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
|
||||
{
|
||||
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_cpumask_var - allocate a struct cpumask
|
||||
* @mask: pointer to cpumask_var_t where the cpumask is returned
|
||||
* @flags: GFP_ flags
|
||||
*
|
||||
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
|
||||
* a nop returning a constant 1 (in <linux/cpumask.h>).
|
||||
*
|
||||
* See alloc_cpumask_var_node.
|
||||
*/
|
||||
static inline
|
||||
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline
|
||||
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
|
||||
}
|
||||
|
||||
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
|
||||
void free_cpumask_var(cpumask_var_t mask);
|
||||
void free_bootmem_cpumask_var(cpumask_var_t mask);
|
||||
|
|
|
@ -2,357 +2,13 @@
|
|||
#ifndef __LINUX_GFP_H
|
||||
#define __LINUX_GFP_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/gfp_types.h>
|
||||
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/topology.h>
|
||||
|
||||
/* The typedef is in types.h but we want the documentation here */
|
||||
#if 0
|
||||
/**
|
||||
* typedef gfp_t - Memory allocation flags.
|
||||
*
|
||||
* GFP flags are commonly used throughout Linux to indicate how memory
|
||||
* should be allocated. The GFP acronym stands for get_free_pages(),
|
||||
* the underlying memory allocation function. Not every GFP flag is
|
||||
* supported by every function which may allocate memory. Most users
|
||||
* will want to use a plain ``GFP_KERNEL``.
|
||||
*/
|
||||
typedef unsigned int __bitwise gfp_t;
|
||||
#endif
|
||||
|
||||
struct vm_area_struct;
|
||||
|
||||
/*
|
||||
* In case of changes, please don't forget to update
|
||||
* include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
|
||||
*/
|
||||
|
||||
/* Plain integer GFP bitmasks. Do not use this directly. */
|
||||
#define ___GFP_DMA 0x01u
|
||||
#define ___GFP_HIGHMEM 0x02u
|
||||
#define ___GFP_DMA32 0x04u
|
||||
#define ___GFP_MOVABLE 0x08u
|
||||
#define ___GFP_RECLAIMABLE 0x10u
|
||||
#define ___GFP_HIGH 0x20u
|
||||
#define ___GFP_IO 0x40u
|
||||
#define ___GFP_FS 0x80u
|
||||
#define ___GFP_ZERO 0x100u
|
||||
#define ___GFP_ATOMIC 0x200u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x800u
|
||||
#define ___GFP_WRITE 0x1000u
|
||||
#define ___GFP_NOWARN 0x2000u
|
||||
#define ___GFP_RETRY_MAYFAIL 0x4000u
|
||||
#define ___GFP_NOFAIL 0x8000u
|
||||
#define ___GFP_NORETRY 0x10000u
|
||||
#define ___GFP_MEMALLOC 0x20000u
|
||||
#define ___GFP_COMP 0x40000u
|
||||
#define ___GFP_NOMEMALLOC 0x80000u
|
||||
#define ___GFP_HARDWALL 0x100000u
|
||||
#define ___GFP_THISNODE 0x200000u
|
||||
#define ___GFP_ACCOUNT 0x400000u
|
||||
#define ___GFP_ZEROTAGS 0x800000u
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define ___GFP_SKIP_ZERO 0x1000000u
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
|
||||
#define ___GFP_SKIP_KASAN_POISON 0x4000000u
|
||||
#else
|
||||
#define ___GFP_SKIP_ZERO 0
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0
|
||||
#define ___GFP_SKIP_KASAN_POISON 0
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x8000000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
|
||||
|
||||
/*
|
||||
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
|
||||
*
|
||||
* Do not put any conditional on these. If necessary modify the definitions
|
||||
* without the underscores and use them consistently. The definitions here may
|
||||
* be used in bit comparisons.
|
||||
*/
|
||||
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
|
||||
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
|
||||
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
|
||||
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
|
||||
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
|
||||
|
||||
/**
|
||||
* DOC: Page mobility and placement hints
|
||||
*
|
||||
* Page mobility and placement hints
|
||||
* ---------------------------------
|
||||
*
|
||||
* These flags provide hints about how mobile the page is. Pages with similar
|
||||
* mobility are placed within the same pageblocks to minimise problems due
|
||||
* to external fragmentation.
|
||||
*
|
||||
* %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
|
||||
* moved by page migration during memory compaction or can be reclaimed.
|
||||
*
|
||||
* %__GFP_RECLAIMABLE is used for slab allocations that specify
|
||||
* SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
|
||||
*
|
||||
* %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
|
||||
* these pages will be spread between local zones to avoid all the dirty
|
||||
* pages being in one zone (fair zone allocation policy).
|
||||
*
|
||||
* %__GFP_HARDWALL enforces the cpuset memory allocation policy.
|
||||
*
|
||||
* %__GFP_THISNODE forces the allocation to be satisfied from the requested
|
||||
* node with no fallbacks or placement policy enforcements.
|
||||
*
|
||||
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
|
||||
*/
|
||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
|
||||
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
|
||||
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
|
||||
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
|
||||
|
||||
/**
|
||||
* DOC: Watermark modifiers
|
||||
*
|
||||
* Watermark modifiers -- controls access to emergency reserves
|
||||
* ------------------------------------------------------------
|
||||
*
|
||||
* %__GFP_HIGH indicates that the caller is high-priority and that granting
|
||||
* the request is necessary before the system can make forward progress.
|
||||
* For example, creating an IO context to clean pages.
|
||||
*
|
||||
* %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
|
||||
* high priority. Users are typically interrupt handlers. This may be
|
||||
* used in conjunction with %__GFP_HIGH
|
||||
*
|
||||
* %__GFP_MEMALLOC allows access to all memory. This should only be used when
|
||||
* the caller guarantees the allocation will allow more memory to be freed
|
||||
* very shortly e.g. process exiting or swapping. Users either should
|
||||
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
|
||||
* Users of this flag have to be extremely careful to not deplete the reserve
|
||||
* completely and implement a throttling mechanism which controls the
|
||||
* consumption of the reserve based on the amount of freed memory.
|
||||
* Usage of a pre-allocated pool (e.g. mempool) should be always considered
|
||||
* before using this flag.
|
||||
*
|
||||
* %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
|
||||
* This takes precedence over the %__GFP_MEMALLOC flag if both are set.
|
||||
*/
|
||||
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
|
||||
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
|
||||
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
|
||||
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
|
||||
|
||||
/**
|
||||
* DOC: Reclaim modifiers
|
||||
*
|
||||
* Reclaim modifiers
|
||||
* -----------------
|
||||
* Please note that all the following flags are only applicable to sleepable
|
||||
* allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
|
||||
*
|
||||
* %__GFP_IO can start physical IO.
|
||||
*
|
||||
* %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
|
||||
* allocator recursing into the filesystem which might already be holding
|
||||
* locks.
|
||||
*
|
||||
* %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
|
||||
* This flag can be cleared to avoid unnecessary delays when a fallback
|
||||
* option is available.
|
||||
*
|
||||
* %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
|
||||
* the low watermark is reached and have it reclaim pages until the high
|
||||
* watermark is reached. A caller may wish to clear this flag when fallback
|
||||
* options are available and the reclaim is likely to disrupt the system. The
|
||||
* canonical example is THP allocation where a fallback is cheap but
|
||||
* reclaim/compaction may cause indirect stalls.
|
||||
*
|
||||
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
|
||||
*
|
||||
* The default allocator behavior depends on the request size. We have a concept
|
||||
* of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
|
||||
* !costly allocations are too essential to fail so they are implicitly
|
||||
* non-failing by default (with some exceptions like OOM victims might fail so
|
||||
* the caller still has to check for failures) while costly requests try to be
|
||||
* not disruptive and back off even without invoking the OOM killer.
|
||||
* The following three modifiers might be used to override some of these
|
||||
* implicit rules
|
||||
*
|
||||
* %__GFP_NORETRY: The VM implementation will try only very lightweight
|
||||
* memory direct reclaim to get some memory under memory pressure (thus
|
||||
* it can sleep). It will avoid disruptive actions like OOM killer. The
|
||||
* caller must handle the failure which is quite likely to happen under
|
||||
* heavy memory pressure. The flag is suitable when failure can easily be
|
||||
* handled at small cost, such as reduced throughput
|
||||
*
|
||||
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
|
||||
* procedures that have previously failed if there is some indication
|
||||
* that progress has been made else where. It can wait for other
|
||||
* tasks to attempt high level approaches to freeing memory such as
|
||||
* compaction (which removes fragmentation) and page-out.
|
||||
* There is still a definite limit to the number of retries, but it is
|
||||
* a larger limit than with %__GFP_NORETRY.
|
||||
* Allocations with this flag may fail, but only when there is
|
||||
* genuinely little unused memory. While these allocations do not
|
||||
* directly trigger the OOM killer, their failure indicates that
|
||||
* the system is likely to need to use the OOM killer soon. The
|
||||
* caller must handle failure, but can reasonably do so by failing
|
||||
* a higher-level request, or completing it only in a much less
|
||||
* efficient manner.
|
||||
* If the allocation does fail, and the caller is in a position to
|
||||
* free some non-essential memory, doing so could benefit the system
|
||||
* as a whole.
|
||||
*
|
||||
* %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
|
||||
* cannot handle allocation failures. The allocation could block
|
||||
* indefinitely but will never return with failure. Testing for
|
||||
* failure is pointless.
|
||||
* New users should be evaluated carefully (and the flag should be
|
||||
* used only when there is no reasonable failure policy) but it is
|
||||
* definitely preferable to use the flag rather than opencode endless
|
||||
* loop around allocator.
|
||||
* Using this flag for costly allocations is _highly_ discouraged.
|
||||
*/
|
||||
#define __GFP_IO ((__force gfp_t)___GFP_IO)
|
||||
#define __GFP_FS ((__force gfp_t)___GFP_FS)
|
||||
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
|
||||
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
|
||||
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
|
||||
#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
|
||||
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
|
||||
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
|
||||
|
||||
/**
|
||||
* DOC: Action modifiers
|
||||
*
|
||||
* Action modifiers
|
||||
* ----------------
|
||||
*
|
||||
* %__GFP_NOWARN suppresses allocation failure reports.
|
||||
*
|
||||
* %__GFP_COMP address compound page metadata.
|
||||
*
|
||||
* %__GFP_ZERO returns a zeroed page on success.
|
||||
*
|
||||
* %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
|
||||
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
|
||||
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
|
||||
* memory tags at the same time as zeroing memory has minimal additional
|
||||
* performace impact.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
|
||||
* Only effective in HW_TAGS mode.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
|
||||
* Typically, used for userspace pages. Only effective in HW_TAGS mode.
|
||||
*/
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
|
||||
#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
|
||||
#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
|
||||
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
|
||||
|
||||
/* Disable lockdep for GFP context tracking */
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/**
|
||||
* DOC: Useful GFP flag combinations
|
||||
*
|
||||
* Useful GFP flag combinations
|
||||
* ----------------------------
|
||||
*
|
||||
* Useful GFP flag combinations that are commonly used. It is recommended
|
||||
* that subsystems start with one of these combinations and then set/clear
|
||||
* %__GFP_FOO flags as necessary.
|
||||
*
|
||||
* %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
|
||||
* watermark is applied to allow access to "atomic reserves".
|
||||
* The current implementation doesn't support NMI and few other strict
|
||||
* non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
|
||||
*
|
||||
* %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
|
||||
* %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
|
||||
*
|
||||
* %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
|
||||
* accounted to kmemcg.
|
||||
*
|
||||
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
|
||||
* reclaim, start physical IO or use any filesystem callback.
|
||||
*
|
||||
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
|
||||
* that do not require the starting of any physical IO.
|
||||
* Please try to avoid using this flag directly and instead use
|
||||
* memalloc_noio_{save,restore} to mark the whole scope which cannot
|
||||
* perform any IO with a short explanation why. All allocation requests
|
||||
* will inherit GFP_NOIO implicitly.
|
||||
*
|
||||
* %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
|
||||
* Please try to avoid using this flag directly and instead use
|
||||
* memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
|
||||
* recurse into the FS layer with a short explanation why. All allocation
|
||||
* requests will inherit GFP_NOFS implicitly.
|
||||
*
|
||||
* %GFP_USER is for userspace allocations that also need to be directly
|
||||
* accessibly by the kernel or hardware. It is typically used by hardware
|
||||
* for buffers that are mapped to userspace (e.g. graphics) that hardware
|
||||
* still must DMA to. cpuset limits are enforced for these allocations.
|
||||
*
|
||||
* %GFP_DMA exists for historical reasons and should be avoided where possible.
|
||||
* The flags indicates that the caller requires that the lowest zone be
|
||||
* used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
|
||||
* it would require careful auditing as some users really require it and
|
||||
* others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
|
||||
* lowest zone as a type of emergency reserve.
|
||||
*
|
||||
* %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
|
||||
* address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
|
||||
* because the DMA32 kmalloc cache array is not implemented.
|
||||
* (Reason: there is no such user in kernel).
|
||||
*
|
||||
* %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
|
||||
* do not need to be directly accessible by the kernel but that cannot
|
||||
* move once in use. An example may be a hardware allocation that maps
|
||||
* data directly into userspace but has no addressing limitations.
|
||||
*
|
||||
* %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
|
||||
* need direct access to but can use kmap() when access is required. They
|
||||
* are expected to be movable via page reclaim or page migration. Typically,
|
||||
* pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
|
||||
*
|
||||
* %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
|
||||
* are compound allocations that will generally fail quickly if memory is not
|
||||
* available and will not wake kswapd/kcompactd on failure. The _LIGHT
|
||||
* version does not attempt reclaim/compaction at all and is by default used
|
||||
* in page fault path, while the non-light is used by khugepaged.
|
||||
*/
|
||||
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
|
||||
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
|
||||
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_NOIO (__GFP_RECLAIM)
|
||||
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
|
||||
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
|
||||
#define GFP_DMA __GFP_DMA
|
||||
#define GFP_DMA32 __GFP_DMA32
|
||||
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
|
||||
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
|
||||
__GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
|
||||
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
|
||||
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
|
||||
|
||||
/* Convert GFP flags to their corresponding migrate type */
|
||||
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
|
||||
#define GFP_MOVABLE_SHIFT 3
|
||||
|
|
|
@ -0,0 +1,348 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_GFP_TYPES_H
|
||||
#define __LINUX_GFP_TYPES_H
|
||||
|
||||
/* The typedef is in types.h but we want the documentation here */
|
||||
#if 0
|
||||
/**
|
||||
* typedef gfp_t - Memory allocation flags.
|
||||
*
|
||||
* GFP flags are commonly used throughout Linux to indicate how memory
|
||||
* should be allocated. The GFP acronym stands for get_free_pages(),
|
||||
* the underlying memory allocation function. Not every GFP flag is
|
||||
* supported by every function which may allocate memory. Most users
|
||||
* will want to use a plain ``GFP_KERNEL``.
|
||||
*/
|
||||
typedef unsigned int __bitwise gfp_t;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* In case of changes, please don't forget to update
|
||||
* include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
|
||||
*/
|
||||
|
||||
/* Plain integer GFP bitmasks. Do not use this directly. */
|
||||
#define ___GFP_DMA 0x01u
|
||||
#define ___GFP_HIGHMEM 0x02u
|
||||
#define ___GFP_DMA32 0x04u
|
||||
#define ___GFP_MOVABLE 0x08u
|
||||
#define ___GFP_RECLAIMABLE 0x10u
|
||||
#define ___GFP_HIGH 0x20u
|
||||
#define ___GFP_IO 0x40u
|
||||
#define ___GFP_FS 0x80u
|
||||
#define ___GFP_ZERO 0x100u
|
||||
#define ___GFP_ATOMIC 0x200u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x800u
|
||||
#define ___GFP_WRITE 0x1000u
|
||||
#define ___GFP_NOWARN 0x2000u
|
||||
#define ___GFP_RETRY_MAYFAIL 0x4000u
|
||||
#define ___GFP_NOFAIL 0x8000u
|
||||
#define ___GFP_NORETRY 0x10000u
|
||||
#define ___GFP_MEMALLOC 0x20000u
|
||||
#define ___GFP_COMP 0x40000u
|
||||
#define ___GFP_NOMEMALLOC 0x80000u
|
||||
#define ___GFP_HARDWALL 0x100000u
|
||||
#define ___GFP_THISNODE 0x200000u
|
||||
#define ___GFP_ACCOUNT 0x400000u
|
||||
#define ___GFP_ZEROTAGS 0x800000u
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define ___GFP_SKIP_ZERO 0x1000000u
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
|
||||
#define ___GFP_SKIP_KASAN_POISON 0x4000000u
|
||||
#else
|
||||
#define ___GFP_SKIP_ZERO 0
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0
|
||||
#define ___GFP_SKIP_KASAN_POISON 0
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x8000000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
|
||||
|
||||
/*
|
||||
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
|
||||
*
|
||||
* Do not put any conditional on these. If necessary modify the definitions
|
||||
* without the underscores and use them consistently. The definitions here may
|
||||
* be used in bit comparisons.
|
||||
*/
|
||||
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
|
||||
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
|
||||
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
|
||||
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
|
||||
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
|
||||
|
||||
/**
|
||||
* DOC: Page mobility and placement hints
|
||||
*
|
||||
* Page mobility and placement hints
|
||||
* ---------------------------------
|
||||
*
|
||||
* These flags provide hints about how mobile the page is. Pages with similar
|
||||
* mobility are placed within the same pageblocks to minimise problems due
|
||||
* to external fragmentation.
|
||||
*
|
||||
* %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
|
||||
* moved by page migration during memory compaction or can be reclaimed.
|
||||
*
|
||||
* %__GFP_RECLAIMABLE is used for slab allocations that specify
|
||||
* SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
|
||||
*
|
||||
* %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
|
||||
* these pages will be spread between local zones to avoid all the dirty
|
||||
* pages being in one zone (fair zone allocation policy).
|
||||
*
|
||||
* %__GFP_HARDWALL enforces the cpuset memory allocation policy.
|
||||
*
|
||||
* %__GFP_THISNODE forces the allocation to be satisfied from the requested
|
||||
* node with no fallbacks or placement policy enforcements.
|
||||
*
|
||||
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
|
||||
*/
|
||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
|
||||
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
|
||||
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
|
||||
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
|
||||
|
||||
/**
|
||||
* DOC: Watermark modifiers
|
||||
*
|
||||
* Watermark modifiers -- controls access to emergency reserves
|
||||
* ------------------------------------------------------------
|
||||
*
|
||||
* %__GFP_HIGH indicates that the caller is high-priority and that granting
|
||||
* the request is necessary before the system can make forward progress.
|
||||
* For example, creating an IO context to clean pages.
|
||||
*
|
||||
* %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
|
||||
* high priority. Users are typically interrupt handlers. This may be
|
||||
* used in conjunction with %__GFP_HIGH
|
||||
*
|
||||
* %__GFP_MEMALLOC allows access to all memory. This should only be used when
|
||||
* the caller guarantees the allocation will allow more memory to be freed
|
||||
* very shortly e.g. process exiting or swapping. Users either should
|
||||
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
|
||||
* Users of this flag have to be extremely careful to not deplete the reserve
|
||||
* completely and implement a throttling mechanism which controls the
|
||||
* consumption of the reserve based on the amount of freed memory.
|
||||
* Usage of a pre-allocated pool (e.g. mempool) should be always considered
|
||||
* before using this flag.
|
||||
*
|
||||
* %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
|
||||
* This takes precedence over the %__GFP_MEMALLOC flag if both are set.
|
||||
*/
|
||||
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
|
||||
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
|
||||
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
|
||||
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
|
||||
|
||||
/**
|
||||
* DOC: Reclaim modifiers
|
||||
*
|
||||
* Reclaim modifiers
|
||||
* -----------------
|
||||
* Please note that all the following flags are only applicable to sleepable
|
||||
* allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
|
||||
*
|
||||
* %__GFP_IO can start physical IO.
|
||||
*
|
||||
* %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
|
||||
* allocator recursing into the filesystem which might already be holding
|
||||
* locks.
|
||||
*
|
||||
* %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
|
||||
* This flag can be cleared to avoid unnecessary delays when a fallback
|
||||
* option is available.
|
||||
*
|
||||
* %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
|
||||
* the low watermark is reached and have it reclaim pages until the high
|
||||
* watermark is reached. A caller may wish to clear this flag when fallback
|
||||
* options are available and the reclaim is likely to disrupt the system. The
|
||||
* canonical example is THP allocation where a fallback is cheap but
|
||||
* reclaim/compaction may cause indirect stalls.
|
||||
*
|
||||
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
|
||||
*
|
||||
* The default allocator behavior depends on the request size. We have a concept
|
||||
* of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
|
||||
* !costly allocations are too essential to fail so they are implicitly
|
||||
* non-failing by default (with some exceptions like OOM victims might fail so
|
||||
* the caller still has to check for failures) while costly requests try to be
|
||||
* not disruptive and back off even without invoking the OOM killer.
|
||||
* The following three modifiers might be used to override some of these
|
||||
* implicit rules
|
||||
*
|
||||
* %__GFP_NORETRY: The VM implementation will try only very lightweight
|
||||
* memory direct reclaim to get some memory under memory pressure (thus
|
||||
* it can sleep). It will avoid disruptive actions like OOM killer. The
|
||||
* caller must handle the failure which is quite likely to happen under
|
||||
* heavy memory pressure. The flag is suitable when failure can easily be
|
||||
* handled at small cost, such as reduced throughput
|
||||
*
|
||||
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
|
||||
* procedures that have previously failed if there is some indication
|
||||
* that progress has been made else where. It can wait for other
|
||||
* tasks to attempt high level approaches to freeing memory such as
|
||||
* compaction (which removes fragmentation) and page-out.
|
||||
* There is still a definite limit to the number of retries, but it is
|
||||
* a larger limit than with %__GFP_NORETRY.
|
||||
* Allocations with this flag may fail, but only when there is
|
||||
* genuinely little unused memory. While these allocations do not
|
||||
* directly trigger the OOM killer, their failure indicates that
|
||||
* the system is likely to need to use the OOM killer soon. The
|
||||
* caller must handle failure, but can reasonably do so by failing
|
||||
* a higher-level request, or completing it only in a much less
|
||||
* efficient manner.
|
||||
* If the allocation does fail, and the caller is in a position to
|
||||
* free some non-essential memory, doing so could benefit the system
|
||||
* as a whole.
|
||||
*
|
||||
* %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
|
||||
* cannot handle allocation failures. The allocation could block
|
||||
* indefinitely but will never return with failure. Testing for
|
||||
* failure is pointless.
|
||||
* New users should be evaluated carefully (and the flag should be
|
||||
* used only when there is no reasonable failure policy) but it is
|
||||
* definitely preferable to use the flag rather than opencode endless
|
||||
* loop around allocator.
|
||||
* Using this flag for costly allocations is _highly_ discouraged.
|
||||
*/
|
||||
#define __GFP_IO ((__force gfp_t)___GFP_IO)
|
||||
#define __GFP_FS ((__force gfp_t)___GFP_FS)
|
||||
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
|
||||
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
|
||||
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
|
||||
#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
|
||||
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
|
||||
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
|
||||
|
||||
/**
|
||||
* DOC: Action modifiers
|
||||
*
|
||||
* Action modifiers
|
||||
* ----------------
|
||||
*
|
||||
* %__GFP_NOWARN suppresses allocation failure reports.
|
||||
*
|
||||
* %__GFP_COMP address compound page metadata.
|
||||
*
|
||||
* %__GFP_ZERO returns a zeroed page on success.
|
||||
*
|
||||
* %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
|
||||
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
|
||||
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
|
||||
* memory tags at the same time as zeroing memory has minimal additional
|
||||
* performace impact.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
|
||||
* Only effective in HW_TAGS mode.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
|
||||
* Typically, used for userspace pages. Only effective in HW_TAGS mode.
|
||||
*/
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
|
||||
#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
|
||||
#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
|
||||
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
|
||||
|
||||
/* Disable lockdep for GFP context tracking */
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/**
|
||||
* DOC: Useful GFP flag combinations
|
||||
*
|
||||
* Useful GFP flag combinations
|
||||
* ----------------------------
|
||||
*
|
||||
* Useful GFP flag combinations that are commonly used. It is recommended
|
||||
* that subsystems start with one of these combinations and then set/clear
|
||||
* %__GFP_FOO flags as necessary.
|
||||
*
|
||||
* %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
|
||||
* watermark is applied to allow access to "atomic reserves".
|
||||
* The current implementation doesn't support NMI and few other strict
|
||||
* non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
|
||||
*
|
||||
* %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
|
||||
* %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
|
||||
*
|
||||
* %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
|
||||
* accounted to kmemcg.
|
||||
*
|
||||
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
|
||||
* reclaim, start physical IO or use any filesystem callback.
|
||||
*
|
||||
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
|
||||
* that do not require the starting of any physical IO.
|
||||
* Please try to avoid using this flag directly and instead use
|
||||
* memalloc_noio_{save,restore} to mark the whole scope which cannot
|
||||
* perform any IO with a short explanation why. All allocation requests
|
||||
* will inherit GFP_NOIO implicitly.
|
||||
*
|
||||
* %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
|
||||
* Please try to avoid using this flag directly and instead use
|
||||
* memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
|
||||
* recurse into the FS layer with a short explanation why. All allocation
|
||||
* requests will inherit GFP_NOFS implicitly.
|
||||
*
|
||||
* %GFP_USER is for userspace allocations that also need to be directly
|
||||
* accessibly by the kernel or hardware. It is typically used by hardware
|
||||
* for buffers that are mapped to userspace (e.g. graphics) that hardware
|
||||
* still must DMA to. cpuset limits are enforced for these allocations.
|
||||
*
|
||||
* %GFP_DMA exists for historical reasons and should be avoided where possible.
|
||||
* The flags indicates that the caller requires that the lowest zone be
|
||||
* used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
|
||||
* it would require careful auditing as some users really require it and
|
||||
* others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
|
||||
* lowest zone as a type of emergency reserve.
|
||||
*
|
||||
* %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
|
||||
* address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
|
||||
* because the DMA32 kmalloc cache array is not implemented.
|
||||
* (Reason: there is no such user in kernel).
|
||||
*
|
||||
* %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
|
||||
* do not need to be directly accessible by the kernel but that cannot
|
||||
* move once in use. An example may be a hardware allocation that maps
|
||||
* data directly into userspace but has no addressing limitations.
|
||||
*
|
||||
* %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
|
||||
* need direct access to but can use kmap() when access is required. They
|
||||
* are expected to be movable via page reclaim or page migration. Typically,
|
||||
* pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
|
||||
*
|
||||
* %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
|
||||
* are compound allocations that will generally fail quickly if memory is not
|
||||
* available and will not wake kswapd/kcompactd on failure. The _LIGHT
|
||||
* version does not attempt reclaim/compaction at all and is by default used
|
||||
* in page fault path, while the non-light is used by khugepaged.
|
||||
*/
|
||||
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
|
||||
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
|
||||
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_NOIO (__GFP_RECLAIM)
|
||||
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
|
||||
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
|
||||
#define GFP_DMA __GFP_DMA
|
||||
#define GFP_DMA32 __GFP_DMA32
|
||||
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
|
||||
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
|
||||
__GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
|
||||
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
|
||||
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
|
||||
|
||||
#endif /* __LINUX_GFP_TYPES_H */
|
|
@ -94,6 +94,7 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
|
||||
extern nodemask_t _unused_nodemask_arg_;
|
||||
|
@ -276,7 +277,14 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
|
|||
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
|
||||
*/
|
||||
#define next_node_in(n, src) __next_node_in((n), &(src))
|
||||
unsigned int __next_node_in(int node, const nodemask_t *srcp);
|
||||
static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
|
||||
{
|
||||
unsigned int ret = __next_node(node, srcp);
|
||||
|
||||
if (ret == MAX_NUMNODES)
|
||||
ret = __first_node(srcp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
|
||||
{
|
||||
|
@ -493,14 +501,20 @@ static inline int num_node_state(enum node_states state)
|
|||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
|
||||
extern int node_random(const nodemask_t *maskp);
|
||||
#else
|
||||
static inline int node_random(const nodemask_t *mask)
|
||||
static inline int node_random(const nodemask_t *maskp)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
|
||||
int w, bit = NUMA_NO_NODE;
|
||||
|
||||
w = nodes_weight(*maskp);
|
||||
if (w)
|
||||
bit = bitmap_ord_to_pos(maskp->bits,
|
||||
get_random_int() % w, MAX_NUMNODES);
|
||||
return bit;
|
||||
#else
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define node_online_map node_states[N_ONLINE]
|
||||
#define node_possible_map node_states[N_POSSIBLE]
|
||||
|
|
|
@ -33,7 +33,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
|||
flex_proportions.o ratelimit.o show_mem.o \
|
||||
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
|
||||
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
|
||||
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \
|
||||
nmi_backtrace.o win_minmax.o memcat_p.o \
|
||||
buildid.o cpumask.o
|
||||
|
||||
lib-$(CONFIG_PRINTK) += dump_stack.o
|
||||
|
|
11
lib/bitmap.c
11
lib/bitmap.c
|
@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
|
|||
}
|
||||
EXPORT_SYMBOL(bitmap_cut);
|
||||
|
||||
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits)
|
||||
{
|
||||
unsigned int k;
|
||||
|
@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
|||
}
|
||||
EXPORT_SYMBOL(__bitmap_xor);
|
||||
|
||||
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits)
|
||||
{
|
||||
unsigned int k;
|
||||
|
@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1,
|
|||
}
|
||||
EXPORT_SYMBOL(__bitmap_subset);
|
||||
|
||||
int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
|
||||
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
|
||||
{
|
||||
unsigned int k, lim = bits/BITS_PER_LONG;
|
||||
int w = 0;
|
||||
unsigned int k, lim = bits/BITS_PER_LONG, w = 0;
|
||||
|
||||
for (k = 0; k < lim; k++)
|
||||
w += hweight_long(bitmap[k]);
|
||||
|
@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
|
|||
|
||||
/* Clear tail bits in the last element of array beyond nbits. */
|
||||
if (nbits % 64)
|
||||
buf[-1] &= GENMASK_ULL(nbits % 64, 0);
|
||||
buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_to_arr64);
|
||||
#endif
|
||||
|
|
|
@ -7,61 +7,6 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/numa.h>
|
||||
|
||||
/**
|
||||
* cpumask_next - get the next cpu in a cpumask
|
||||
* @n: the cpu prior to the place to search (ie. return will be > @n)
|
||||
* @srcp: the cpumask pointer
|
||||
*
|
||||
* Returns >= nr_cpu_ids if no further cpus set.
|
||||
*/
|
||||
unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpumask_check(n);
|
||||
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_next);
|
||||
|
||||
/**
|
||||
* cpumask_next_and - get the next cpu in *src1p & *src2p
|
||||
* @n: the cpu prior to the place to search (ie. return will be > @n)
|
||||
* @src1p: the first cpumask pointer
|
||||
* @src2p: the second cpumask pointer
|
||||
*
|
||||
* Returns >= nr_cpu_ids if no further cpus set in both.
|
||||
*/
|
||||
int cpumask_next_and(int n, const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpumask_check(n);
|
||||
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||
nr_cpumask_bits, n + 1);
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_next_and);
|
||||
|
||||
/**
|
||||
* cpumask_any_but - return a "random" in a cpumask, but not this one.
|
||||
* @mask: the cpumask to search
|
||||
* @cpu: the cpu to ignore.
|
||||
*
|
||||
* Often used to find any cpu but smp_processor_id() in a mask.
|
||||
* Returns >= nr_cpu_ids if no cpus set.
|
||||
*/
|
||||
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
cpumask_check(cpu);
|
||||
for_each_cpu(i, mask)
|
||||
if (i != cpu)
|
||||
break;
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_any_but);
|
||||
|
||||
/**
|
||||
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
|
||||
* @n: the cpu prior to the place to search
|
||||
|
@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but);
|
|||
* Note: the @wrap argument is required for the start condition when
|
||||
* we cannot assume @start is set in @mask.
|
||||
*/
|
||||
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||
{
|
||||
int next;
|
||||
unsigned int next;
|
||||
|
||||
again:
|
||||
next = cpumask_next(n, mask);
|
||||
|
@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
|
|||
}
|
||||
EXPORT_SYMBOL(alloc_cpumask_var_node);
|
||||
|
||||
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
|
||||
{
|
||||
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
|
||||
}
|
||||
EXPORT_SYMBOL(zalloc_cpumask_var_node);
|
||||
|
||||
/**
|
||||
* alloc_cpumask_var - allocate a struct cpumask
|
||||
* @mask: pointer to cpumask_var_t where the cpumask is returned
|
||||
* @flags: GFP_ flags
|
||||
*
|
||||
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
|
||||
* a nop returning a constant 1 (in <linux/cpumask.h>).
|
||||
*
|
||||
* See alloc_cpumask_var_node.
|
||||
*/
|
||||
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_cpumask_var);
|
||||
|
||||
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
|
||||
}
|
||||
EXPORT_SYMBOL(zalloc_cpumask_var);
|
||||
|
||||
/**
|
||||
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
|
||||
* @mask: pointer to cpumask_var_t where the cpumask is returned
|
||||
|
@ -206,7 +123,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
|
|||
*/
|
||||
unsigned int cpumask_local_spread(unsigned int i, int node)
|
||||
{
|
||||
int cpu;
|
||||
unsigned int cpu;
|
||||
|
||||
/* Wrap: we always want a cpu. */
|
||||
i %= num_online_cpus();
|
||||
|
@ -244,10 +161,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
|
|||
*
|
||||
* Returns >= nr_cpu_ids if the intersection is empty.
|
||||
*/
|
||||
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
int next, prev;
|
||||
unsigned int next, prev;
|
||||
|
||||
/* NOTE: our first selection will skip 0. */
|
||||
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||
|
@ -263,9 +180,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
|
|||
}
|
||||
EXPORT_SYMBOL(cpumask_any_and_distribute);
|
||||
|
||||
int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
unsigned int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
{
|
||||
int next, prev;
|
||||
unsigned int next, prev;
|
||||
|
||||
/* NOTE: our first selection will skip 0. */
|
||||
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||
|
|
|
@ -3,14 +3,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
unsigned int __next_node_in(int node, const nodemask_t *srcp)
|
||||
{
|
||||
unsigned int ret = __next_node(node, srcp);
|
||||
|
||||
if (ret == MAX_NUMNODES)
|
||||
ret = __first_node(srcp);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__next_node_in);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
|
|
@ -604,6 +604,12 @@ static void __init test_bitmap_arr64(void)
|
|||
pr_err("bitmap_copy_arr64(nbits == %d:"
|
||||
" tail is not safely cleared: %d\n", nbits, next_bit);
|
||||
|
||||
if ((nbits % 64) &&
|
||||
(arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0)))
|
||||
pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
|
||||
nbits, arr[(nbits - 1) / 64],
|
||||
GENMASK_ULL((nbits - 1) % 64, 0));
|
||||
|
||||
if (nbits < EXP1_IN_BITS - 64)
|
||||
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
|
||||
}
|
||||
|
@ -869,6 +875,67 @@ static void __init test_bitmap_print_buf(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void __init test_bitmap_const_eval(void)
|
||||
{
|
||||
DECLARE_BITMAP(bitmap, BITS_PER_LONG);
|
||||
unsigned long initvar = BIT(2);
|
||||
unsigned long bitopvar = 0;
|
||||
unsigned long var = 0;
|
||||
int res;
|
||||
|
||||
/*
|
||||
* Compilers must be able to optimize all of those to compile-time
|
||||
* constants on any supported optimization level (-O2, -Os) and any
|
||||
* architecture. Otherwise, trigger a build bug.
|
||||
* The whole function gets optimized out then, there's nothing to do
|
||||
* in runtime.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
|
||||
* Clang on s390 optimizes bitops at compile-time as intended, but at
|
||||
* the same time stops treating @bitmap and @bitopvar as compile-time
|
||||
* constants after regular test_bit() is executed, thus triggering the
|
||||
* build bugs below. So, call const_test_bit() there directly until
|
||||
* the compiler is fixed.
|
||||
*/
|
||||
bitmap_clear(bitmap, 0, BITS_PER_LONG);
|
||||
#if defined(__s390__) && defined(__clang__)
|
||||
if (!const_test_bit(7, bitmap))
|
||||
#else
|
||||
if (!test_bit(7, bitmap))
|
||||
#endif
|
||||
bitmap_set(bitmap, 5, 2);
|
||||
|
||||
/* Equals to `unsigned long bitopvar = BIT(20)` */
|
||||
__change_bit(31, &bitopvar);
|
||||
bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG);
|
||||
|
||||
/* Equals to `unsigned long var = BIT(25)` */
|
||||
var |= BIT(25);
|
||||
if (var & BIT(0))
|
||||
var ^= GENMASK(9, 6);
|
||||
|
||||
/* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */
|
||||
res = bitmap_weight(bitmap, 20);
|
||||
BUILD_BUG_ON(!__builtin_constant_p(res));
|
||||
BUILD_BUG_ON(res != 2);
|
||||
|
||||
/* !(BIT(31) & BIT(18)) == 1 */
|
||||
res = !test_bit(18, &bitopvar);
|
||||
BUILD_BUG_ON(!__builtin_constant_p(res));
|
||||
BUILD_BUG_ON(!res);
|
||||
|
||||
/* BIT(2) & GENMASK(14, 8) == 0 */
|
||||
res = initvar & GENMASK(14, 8);
|
||||
BUILD_BUG_ON(!__builtin_constant_p(res));
|
||||
BUILD_BUG_ON(res);
|
||||
|
||||
/* ~BIT(25) */
|
||||
BUILD_BUG_ON(!__builtin_constant_p(~var));
|
||||
BUILD_BUG_ON(~var != ~BIT(25));
|
||||
}
|
||||
|
||||
static void __init selftest(void)
|
||||
{
|
||||
test_zero_clear();
|
||||
|
@ -884,6 +951,7 @@ static void __init selftest(void)
|
|||
test_for_each_set_clump8();
|
||||
test_bitmap_cut();
|
||||
test_bitmap_print_buf();
|
||||
test_bitmap_const_eval();
|
||||
}
|
||||
|
||||
KSTM_MODULE_LOADERS(test_bitmap);
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/bits.h>
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* ___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -13,7 +13,8 @@
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
|
|||
*p |= mask;
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -30,7 +32,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* ___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
|
@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
|||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -47,7 +50,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* ___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -66,7 +70,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* ___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
|
@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
|||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
|||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
@ -97,11 +102,12 @@ static inline int __test_and_change_bit(int nr,
|
|||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* _test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
int __bitmap_weight(const unsigned long *bitmap, int bits);
|
||||
unsigned int __bitmap_weight(const unsigned long *bitmap, int bits);
|
||||
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits);
|
||||
bool __bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits);
|
||||
|
@ -45,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
|
|||
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
|
||||
static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
|
@ -53,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
|
|||
return find_first_bit(src, nbits) == nbits;
|
||||
}
|
||||
|
||||
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
|
||||
static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
|
@ -61,7 +61,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
|
|||
return find_first_zero_bit(src, nbits) == nbits;
|
||||
}
|
||||
|
||||
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
|
@ -146,7 +146,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
|
|||
* @src2: operand 2
|
||||
* @nbits: size of bitmap
|
||||
*/
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
|
|
|
@ -25,6 +25,22 @@ extern unsigned int __sw_hweight16(unsigned int w);
|
|||
extern unsigned int __sw_hweight32(unsigned int w);
|
||||
extern unsigned long __sw_hweight64(__u64 w);
|
||||
|
||||
/*
|
||||
* Defined here because those may be needed by architecture-specific static
|
||||
* inlines.
|
||||
*/
|
||||
|
||||
#define bitop(op, nr, addr) \
|
||||
op(nr, addr)
|
||||
|
||||
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
|
||||
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
|
||||
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
|
||||
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
|
||||
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
|
||||
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
|
||||
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
|
||||
|
||||
/*
|
||||
* Include this here because some architectures need generic_ffs/fls in
|
||||
* scope
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
int __bitmap_weight(const unsigned long *bitmap, int bits)
|
||||
unsigned int __bitmap_weight(const unsigned long *bitmap, int bits)
|
||||
{
|
||||
int k, w = 0, lim = bits/BITS_PER_LONG;
|
||||
unsigned int k, w = 0, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; k++)
|
||||
w += hweight_long(bitmap[k]);
|
||||
|
@ -57,7 +57,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, unsigned int bits)
|
||||
{
|
||||
unsigned int k;
|
||||
|
|
Loading…
Reference in New Issue