2019-05-29 01:10:22 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* include/linux/idr.h
|
|
|
|
*
|
|
|
|
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
|
|
|
|
* Copyright (C) 2002 by Concurrent Computer Corporation
|
|
|
|
*
|
|
|
|
* Small id to pointer translation service avoiding fixed sized
|
|
|
|
* tables.
|
|
|
|
*/
|
2005-11-09 00:14:08 +08:00
|
|
|
|
|
|
|
#ifndef __IDR_H__
|
|
|
|
#define __IDR_H__
|
|
|
|
|
2016-12-20 23:27:56 +08:00
|
|
|
#include <linux/radix-tree.h>
|
|
|
|
#include <linux/gfp.h>
|
2016-12-17 00:55:56 +08:00
|
|
|
#include <linux/percpu.h>
|
2016-12-20 23:27:56 +08:00
|
|
|
|
|
|
|
struct idr {
|
|
|
|
struct radix_tree_root idr_rt;
|
2017-12-01 02:45:11 +08:00
|
|
|
unsigned int idr_base;
|
2016-12-20 23:27:56 +08:00
|
|
|
unsigned int idr_next;
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-02-28 09:05:06 +08:00
|
|
|
/*
|
2016-12-20 23:27:56 +08:00
|
|
|
* The IDR API does not expose the tagging functionality of the radix tree
|
|
|
|
* to users. Use tag 0 to track whether a node has free space below it.
|
2013-02-28 09:05:06 +08:00
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
#define IDR_FREE 0
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-12-20 23:27:56 +08:00
|
|
|
/* Set the IDR flag and the IDR_FREE tag */
|
radix tree: use GFP_ZONEMASK bits of gfp_t for flags
Patch series "XArray", v9. (First part thereof).
This patchset is, I believe, appropriate for merging for 4.17. It
contains the XArray implementation, to eventually replace the radix
tree, and converts the page cache to use it.
This conversion keeps the radix tree and XArray data structures in sync
at all times. That allows us to convert the page cache one function at
a time and should allow for easier bisection. Other than renaming some
elements of the structures, the data structures are fundamentally
unchanged; a radix tree walk and an XArray walk will touch the same
number of cachelines. I have changes planned to the XArray data
structure, but those will happen in future patches.
Improvements the XArray has over the radix tree:
- The radix tree provides operations like other trees do; 'insert' and
'delete'. But what most users really want is an automatically
resizing array, and so it makes more sense to give users an API that
is like an array -- 'load' and 'store'. We still have an 'insert'
operation for users that really want that semantic.
- The XArray considers locking as part of its API. This simplifies a
lot of users who formerly had to manage their own locking just for
the radix tree. It also improves code generation as we can now tell
RCU that we're holding a lock and it doesn't need to generate as much
fencing code. The other advantage is that tree nodes can be moved
(not yet implemented).
- GFP flags are now parameters to calls which may need to allocate
memory. The radix tree forced users to decide what the allocation
flags would be at creation time. It's much clearer to specify them at
allocation time.
- Memory is not preloaded; we don't tie up dozens of pages on the off
chance that the slab allocator fails. Instead, we drop the lock,
allocate a new node and retry the operation. We have to convert all
the radix tree, IDA and IDR preload users before we can realise this
benefit, but I have not yet found a user which cannot be converted.
- The XArray provides a cmpxchg operation. The radix tree forces users
to roll their own (and at least four have).
- Iterators take a 'max' parameter. That simplifies many users and will
reduce the amount of iteration done.
- Iteration can proceed backwards. We only have one user for this, but
since it's called as part of the pagefault readahead algorithm, that
seemed worth mentioning.
- RCU-protected pointers are not exposed as part of the API. There are
some fun bugs where the page cache forgets to use rcu_dereference()
in the current codebase.
- Value entries gain an extra bit compared to radix tree exceptional
entries. That gives us the extra bit we need to put huge page swap
entries in the page cache.
- Some iterators now take a 'filter' argument instead of having
separate iterators for tagged/untagged iterations.
The page cache is improved by this:
- Shorter, easier to read code
- More efficient iterations
- Reduction in size of struct address_space
- Fewer walks from the top of the data structure; the XArray API
encourages staying at the leaf node and conducting operations there.
This patch (of 8):
None of these bits may be used for slab allocations, so we can use them
as radix tree flags as long as we mask them off before passing them to
the slab allocator. Move the IDR flag from the high bits to the
GFP_ZONEMASK bits.
Link: http://lkml.kernel.org/r/20180313132639.17387-3-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@kernel.org>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-11 07:36:28 +08:00
|
|
|
#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
|
|
|
|
(1 << (ROOT_TAG_SHIFT + IDR_FREE)))
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-04-11 07:36:52 +08:00
|
|
|
#define IDR_INIT_BASE(name, base) { \
|
|
|
|
.idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
|
2017-12-01 02:45:11 +08:00
|
|
|
.idr_base = (base), \
|
|
|
|
.idr_next = 0, \
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-12-01 02:45:11 +08:00
|
|
|
/**
|
|
|
|
* IDR_INIT() - Initialise an IDR.
|
2018-04-11 07:36:52 +08:00
|
|
|
* @name: Name of IDR.
|
2017-12-01 02:45:11 +08:00
|
|
|
*
|
|
|
|
* A freshly-initialised IDR contains no IDs.
|
|
|
|
*/
|
2018-04-11 07:36:52 +08:00
|
|
|
#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
|
2017-12-01 02:45:11 +08:00
|
|
|
|
2018-02-07 04:05:49 +08:00
|
|
|
/**
|
2018-04-11 07:36:52 +08:00
|
|
|
* DEFINE_IDR() - Define a statically-allocated IDR.
|
|
|
|
* @name: Name of IDR.
|
2018-02-07 04:05:49 +08:00
|
|
|
*
|
|
|
|
* An IDR defined using this macro is ready for use with no additional
|
|
|
|
* initialisation required. It contains no IDs.
|
|
|
|
*/
|
2018-04-11 07:36:52 +08:00
|
|
|
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
2018-02-07 04:05:49 +08:00
|
|
|
|
2016-12-15 07:09:19 +08:00
|
|
|
/**
|
|
|
|
* idr_get_cursor - Return the current position of the cyclic allocator
|
|
|
|
* @idr: idr handle
|
|
|
|
*
|
|
|
|
* The value returned is the value that will be next returned from
|
|
|
|
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
|
|
|
* this position).
|
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
static inline unsigned int idr_get_cursor(const struct idr *idr)
|
2016-12-15 07:09:19 +08:00
|
|
|
{
|
2016-12-20 23:27:56 +08:00
|
|
|
return READ_ONCE(idr->idr_next);
|
2016-12-15 07:09:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* idr_set_cursor - Set the current position of the cyclic allocator
|
|
|
|
* @idr: idr handle
|
|
|
|
* @val: new position
|
|
|
|
*
|
|
|
|
* The next call to idr_alloc_cyclic() will return @val if it is free
|
|
|
|
* (otherwise the search will start from this position).
|
|
|
|
*/
|
|
|
|
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
|
|
|
{
|
2016-12-20 23:27:56 +08:00
|
|
|
WRITE_ONCE(idr->idr_next, val);
|
2016-12-15 07:09:19 +08:00
|
|
|
}
|
|
|
|
|
2008-07-25 16:48:01 +08:00
|
|
|
/**
|
2010-10-27 05:19:08 +08:00
|
|
|
* DOC: idr sync
|
2008-07-25 16:48:01 +08:00
|
|
|
* idr synchronization (stolen from radix-tree.h)
|
|
|
|
*
|
|
|
|
* idr_find() is able to be called locklessly, using RCU. The caller must
|
|
|
|
* ensure calls to this function are made within rcu_read_lock() regions.
|
|
|
|
* Other readers (lock-free or otherwise) and modifications may be running
|
|
|
|
* concurrently.
|
|
|
|
*
|
|
|
|
* It is still required that the caller manage the synchronization and
|
|
|
|
* lifetimes of the items. So if RCU lock-free lookups are used, typically
|
|
|
|
* this would mean that the items have their own locks, or are amenable to
|
|
|
|
* lock-free access; and that the items are freed by RCU (or only freed after
|
|
|
|
* having been deleted from the idr tree *and* a synchronize_rcu() grace
|
|
|
|
* period).
|
|
|
|
*/
|
|
|
|
|
2018-06-14 02:45:55 +08:00
|
|
|
#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
|
|
|
|
#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
|
|
|
|
#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
|
|
|
|
#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
|
|
|
|
#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
|
|
|
|
#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
|
|
|
|
#define idr_lock_irqsave(idr, flags) \
|
|
|
|
xa_lock_irqsave(&(idr)->idr_rt, flags)
|
|
|
|
#define idr_unlock_irqrestore(idr, flags) \
|
|
|
|
xa_unlock_irqrestore(&(idr)->idr_rt, flags)
|
|
|
|
|
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome.
* For all allocations, two function calls - idr_pre_get() and
idr_get_new*() - should be made.
* idr_pre_get() doesn't guarantee that the following idr_get_new*()
will not fail from memory shortage. If idr_get_new*() returns
-EAGAIN, the caller is expected to retry pre_get and allocation.
* idr_get_new*() can't enforce upper limit. Upper limit can only be
enforced by allocating and then freeing if above limit.
* idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping
around MAX_IDR_FREE idr_layers. The memory consumed per idr is
under two pages but it makes it difficult to make idr_layer larger.
This patch implements the following new set of allocation functions.
* idr_preload[_end]() - Similar to radix preload but doesn't fail.
The first idr_alloc() inside preload section can be treated as if it
were called with @gfp_mask used for idr_preload().
* idr_alloc() - Allocate an ID w/ lower and upper limits. Takes
@gfp_flags and can be used w/o preloading. When used inside
preloaded section, the allocation mask of preloading can be assumed.
If idr_alloc() can be called from a context which allows sufficiently
relaxed @gfp_mask, it can be used by itself. If, for example,
idr_alloc() is called inside spinlock protected region, preloading can
be used like the following.
idr_preload(GFP_KERNEL);
spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end();
if (id < 0)
error;
which is much simpler and less error-prone than idr_pre_get and
idr_get_new*() loop.
The new interface uses per-pcu idr_layer buffer and thus the number of
idr's in the system doesn't affect the amount of memory used for
preloading.
idr_layer_alloc() is introduced to handle idr_layer allocations for
both old and new ID allocation paths. This is a bit hairy now but the
new interface is expected to replace the old and the internal
implementation eventually will become simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:03:55 +08:00
|
|
|
void idr_preload(gfp_t gfp_mask);
|
idr: Add new APIs to support unsigned long
The following new APIs are added:
int idr_alloc_ext(struct idr *idr, void *ptr, unsigned long *index,
unsigned long start, unsigned long end, gfp_t gfp);
void *idr_remove_ext(struct idr *idr, unsigned long id);
void *idr_find_ext(const struct idr *idr, unsigned long id);
void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
Signed-off-by: Chris Mi <chrism@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-30 14:31:57 +08:00
|
|
|
|
2017-12-01 02:45:11 +08:00
|
|
|
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
|
|
|
|
int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
|
2017-11-28 23:14:27 +08:00
|
|
|
unsigned long max, gfp_t);
|
2017-12-01 02:45:11 +08:00
|
|
|
int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
|
|
|
|
void *idr_remove(struct idr *, unsigned long id);
|
|
|
|
void *idr_find(const struct idr *, unsigned long id);
|
2016-12-20 23:27:56 +08:00
|
|
|
int idr_for_each(const struct idr *,
|
2007-07-16 14:37:24 +08:00
|
|
|
int (*fn)(int id, void *p, void *data), void *data);
|
2016-12-20 23:27:56 +08:00
|
|
|
void *idr_get_next(struct idr *, int *nextid);
|
2017-11-29 04:39:51 +08:00
|
|
|
void *idr_get_next_ul(struct idr *, unsigned long *nextid);
|
2017-11-28 22:56:36 +08:00
|
|
|
void *idr_replace(struct idr *, void *, unsigned long id);
|
2016-12-20 23:27:56 +08:00
|
|
|
void idr_destroy(struct idr *);
|
|
|
|
|
2017-12-01 02:45:11 +08:00
|
|
|
/**
|
|
|
|
* idr_init_base() - Initialise an IDR.
|
|
|
|
* @idr: IDR handle.
|
|
|
|
* @base: The base value for the IDR.
|
|
|
|
*
|
|
|
|
* This variation of idr_init() creates an IDR which will allocate IDs
|
|
|
|
* starting at %base.
|
|
|
|
*/
|
|
|
|
static inline void idr_init_base(struct idr *idr, int base)
|
2016-12-20 23:27:56 +08:00
|
|
|
{
|
2017-12-01 02:45:11 +08:00
|
|
|
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
|
|
|
|
idr->idr_base = base;
|
|
|
|
idr->idr_next = 0;
|
2016-12-20 23:27:56 +08:00
|
|
|
}
|
|
|
|
|
2017-12-01 02:45:11 +08:00
|
|
|
/**
|
|
|
|
* idr_init() - Initialise an IDR.
|
|
|
|
* @idr: IDR handle.
|
|
|
|
*
|
|
|
|
* Initialise a dynamically allocated IDR. To initialise a
|
|
|
|
* statically allocated IDR, use DEFINE_IDR().
|
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
static inline void idr_init(struct idr *idr)
|
|
|
|
{
|
2017-12-01 02:45:11 +08:00
|
|
|
idr_init_base(idr, 0);
|
2016-12-20 23:27:56 +08:00
|
|
|
}
|
|
|
|
|
2018-02-07 04:05:49 +08:00
|
|
|
/**
|
|
|
|
* idr_is_empty() - Are there any IDs allocated?
|
|
|
|
* @idr: IDR handle.
|
|
|
|
*
|
|
|
|
* Return: %true if any IDs have been allocated from this IDR.
|
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
static inline bool idr_is_empty(const struct idr *idr)
|
|
|
|
{
|
|
|
|
return radix_tree_empty(&idr->idr_rt) &&
|
|
|
|
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
|
|
|
|
}
|
2005-11-09 00:14:08 +08:00
|
|
|
|
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome.
* For all allocations, two function calls - idr_pre_get() and
idr_get_new*() - should be made.
* idr_pre_get() doesn't guarantee that the following idr_get_new*()
will not fail from memory shortage. If idr_get_new*() returns
-EAGAIN, the caller is expected to retry pre_get and allocation.
* idr_get_new*() can't enforce upper limit. Upper limit can only be
enforced by allocating and then freeing if above limit.
* idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping
around MAX_IDR_FREE idr_layers. The memory consumed per idr is
under two pages but it makes it difficult to make idr_layer larger.
This patch implements the following new set of allocation functions.
* idr_preload[_end]() - Similar to radix preload but doesn't fail.
The first idr_alloc() inside preload section can be treated as if it
were called with @gfp_mask used for idr_preload().
* idr_alloc() - Allocate an ID w/ lower and upper limits. Takes
@gfp_flags and can be used w/o preloading. When used inside
preloaded section, the allocation mask of preloading can be assumed.
If idr_alloc() can be called from a context which allows sufficiently
relaxed @gfp_mask, it can be used by itself. If, for example,
idr_alloc() is called inside spinlock protected region, preloading can
be used like the following.
idr_preload(GFP_KERNEL);
spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end();
if (id < 0)
error;
which is much simpler and less error-prone than idr_pre_get and
idr_get_new*() loop.
The new interface uses per-pcu idr_layer buffer and thus the number of
idr's in the system doesn't affect the amount of memory used for
preloading.
idr_layer_alloc() is introduced to handle idr_layer allocations for
both old and new ID allocation paths. This is a bit hairy now but the
new interface is expected to replace the old and the internal
implementation eventually will become simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:03:55 +08:00
|
|
|
/**
|
|
|
|
* idr_preload_end - end preload section started with idr_preload()
|
|
|
|
*
|
|
|
|
* Each idr_preload() should be matched with an invocation of this
|
|
|
|
* function. See idr_preload() for details.
|
|
|
|
*/
|
|
|
|
static inline void idr_preload_end(void)
|
|
|
|
{
|
2020-05-28 04:11:14 +08:00
|
|
|
local_unlock(&radix_tree_preloads.lock);
|
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome.
* For all allocations, two function calls - idr_pre_get() and
idr_get_new*() - should be made.
* idr_pre_get() doesn't guarantee that the following idr_get_new*()
will not fail from memory shortage. If idr_get_new*() returns
-EAGAIN, the caller is expected to retry pre_get and allocation.
* idr_get_new*() can't enforce upper limit. Upper limit can only be
enforced by allocating and then freeing if above limit.
* idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping
around MAX_IDR_FREE idr_layers. The memory consumed per idr is
under two pages but it makes it difficult to make idr_layer larger.
This patch implements the following new set of allocation functions.
* idr_preload[_end]() - Similar to radix preload but doesn't fail.
The first idr_alloc() inside preload section can be treated as if it
were called with @gfp_mask used for idr_preload().
* idr_alloc() - Allocate an ID w/ lower and upper limits. Takes
@gfp_flags and can be used w/o preloading. When used inside
preloaded section, the allocation mask of preloading can be assumed.
If idr_alloc() can be called from a context which allows sufficiently
relaxed @gfp_mask, it can be used by itself. If, for example,
idr_alloc() is called inside spinlock protected region, preloading can
be used like the following.
idr_preload(GFP_KERNEL);
spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end();
if (id < 0)
error;
which is much simpler and less error-prone than idr_pre_get and
idr_get_new*() loop.
The new interface uses per-pcu idr_layer buffer and thus the number of
idr's in the system doesn't affect the amount of memory used for
preloading.
idr_layer_alloc() is introduced to handle idr_layer allocations for
both old and new ID allocation paths. This is a bit hairy now but the
new interface is expected to replace the old and the internal
implementation eventually will become simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:03:55 +08:00
|
|
|
}
|
|
|
|
|
2013-02-28 09:03:52 +08:00
|
|
|
/**
|
2017-11-29 04:39:51 +08:00
|
|
|
* idr_for_each_entry() - Iterate over an IDR's elements of a given type.
|
|
|
|
* @idr: IDR handle.
|
|
|
|
* @entry: The type * to use as cursor
|
|
|
|
* @id: Entry ID.
|
2013-03-27 21:08:33 +08:00
|
|
|
*
|
|
|
|
* @entry and @id do not need to be initialized before the loop, and
|
2017-11-29 04:39:51 +08:00
|
|
|
* after normal termination @entry is left with the value NULL. This
|
2013-03-27 21:08:33 +08:00
|
|
|
* is convenient for a "not found" value.
|
2013-02-28 09:03:52 +08:00
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
#define idr_for_each_entry(idr, entry, id) \
|
2019-11-03 19:36:43 +08:00
|
|
|
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
|
2013-02-28 09:03:52 +08:00
|
|
|
|
2014-08-28 19:31:14 +08:00
|
|
|
/**
|
2017-11-29 04:39:51 +08:00
|
|
|
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
|
|
|
|
* @idr: IDR handle.
|
|
|
|
* @entry: The type * to use as cursor.
|
2019-06-29 02:03:41 +08:00
|
|
|
* @tmp: A temporary placeholder for ID.
|
2017-11-29 04:39:51 +08:00
|
|
|
* @id: Entry ID.
|
|
|
|
*
|
|
|
|
* @entry and @id do not need to be initialized before the loop, and
|
|
|
|
* after normal termination @entry is left with the value NULL. This
|
|
|
|
* is convenient for a "not found" value.
|
|
|
|
*/
|
2019-06-29 02:03:41 +08:00
|
|
|
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
|
|
|
|
for (tmp = 0, id = 0; \
|
|
|
|
tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
|
|
|
|
tmp = id, ++id)
|
2017-11-29 04:39:51 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
|
|
|
|
* @idr: IDR handle.
|
|
|
|
* @entry: The type * to use as a cursor.
|
|
|
|
* @id: Entry ID.
|
2014-08-28 19:31:14 +08:00
|
|
|
*
|
2017-11-29 04:39:51 +08:00
|
|
|
* Continue to iterate over entries, continuing after the current position.
|
2014-08-28 19:31:14 +08:00
|
|
|
*/
|
2016-12-20 23:27:56 +08:00
|
|
|
#define idr_for_each_entry_continue(idr, entry, id) \
|
|
|
|
for ((entry) = idr_get_next((idr), &(id)); \
|
2014-08-28 19:31:14 +08:00
|
|
|
entry; \
|
2016-12-20 23:27:56 +08:00
|
|
|
++id, (entry) = idr_get_next((idr), &(id)))
|
2014-08-28 19:31:14 +08:00
|
|
|
|
2019-06-29 02:03:42 +08:00
|
|
|
/**
|
|
|
|
* idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
|
|
|
|
* @idr: IDR handle.
|
|
|
|
* @entry: The type * to use as a cursor.
|
|
|
|
* @tmp: A temporary placeholder for ID.
|
|
|
|
* @id: Entry ID.
|
|
|
|
*
|
|
|
|
* Continue to iterate over entries, continuing after the current position.
|
|
|
|
*/
|
|
|
|
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
|
|
|
|
for (tmp = id; \
|
|
|
|
tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
|
|
|
|
tmp = id, ++id)
|
|
|
|
|
2007-06-14 02:45:13 +08:00
|
|
|
/*
|
2018-07-05 03:42:46 +08:00
|
|
|
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
|
2007-06-14 02:45:13 +08:00
|
|
|
*/
|
|
|
|
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
|
2016-12-20 23:27:56 +08:00
|
|
|
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
|
2010-09-16 00:30:19 +08:00
|
|
|
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
|
2007-06-14 02:45:13 +08:00
|
|
|
|
|
|
|
struct ida_bitmap {
|
|
|
|
unsigned long bitmap[IDA_BITMAP_LONGS];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ida {
|
2018-07-05 03:42:46 +08:00
|
|
|
struct xarray xa;
|
2007-06-14 02:45:13 +08:00
|
|
|
};
|
|
|
|
|
2018-07-05 03:42:46 +08:00
|
|
|
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
|
|
|
|
|
2018-04-11 07:36:52 +08:00
|
|
|
#define IDA_INIT(name) { \
|
2018-07-05 03:42:46 +08:00
|
|
|
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
|
2016-12-20 23:27:56 +08:00
|
|
|
}
|
2018-04-11 07:36:52 +08:00
|
|
|
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
2007-06-14 02:45:13 +08:00
|
|
|
|
2018-03-21 05:07:11 +08:00
|
|
|
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
|
|
|
|
void ida_free(struct ida *, unsigned int id);
|
2007-06-14 02:45:13 +08:00
|
|
|
void ida_destroy(struct ida *ida);
|
|
|
|
|
2018-03-21 05:07:11 +08:00
|
|
|
/**
|
|
|
|
* ida_alloc() - Allocate an unused ID.
|
|
|
|
* @ida: IDA handle.
|
|
|
|
* @gfp: Memory allocation flags.
|
|
|
|
*
|
|
|
|
* Allocate an ID between 0 and %INT_MAX, inclusive.
|
|
|
|
*
|
2020-10-16 11:11:17 +08:00
|
|
|
* Context: Any context. It is safe to call this function without
|
|
|
|
* locking in your code.
|
2018-03-21 05:07:11 +08:00
|
|
|
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
|
|
|
|
* or %-ENOSPC if there are no free IDs.
|
|
|
|
*/
|
|
|
|
static inline int ida_alloc(struct ida *ida, gfp_t gfp)
|
|
|
|
{
|
|
|
|
return ida_alloc_range(ida, 0, ~0, gfp);
|
|
|
|
}
|
2011-08-04 07:21:06 +08:00
|
|
|
|
2018-03-21 05:07:11 +08:00
|
|
|
/**
|
|
|
|
* ida_alloc_min() - Allocate an unused ID.
|
|
|
|
* @ida: IDA handle.
|
|
|
|
* @min: Lowest ID to allocate.
|
|
|
|
* @gfp: Memory allocation flags.
|
|
|
|
*
|
|
|
|
* Allocate an ID between @min and %INT_MAX, inclusive.
|
|
|
|
*
|
2020-10-16 11:11:17 +08:00
|
|
|
* Context: Any context. It is safe to call this function without
|
|
|
|
* locking in your code.
|
2018-03-21 05:07:11 +08:00
|
|
|
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
|
|
|
|
* or %-ENOSPC if there are no free IDs.
|
|
|
|
*/
|
|
|
|
static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
|
2016-12-20 23:27:56 +08:00
|
|
|
{
|
2018-03-21 05:07:11 +08:00
|
|
|
return ida_alloc_range(ida, min, ~0, gfp);
|
2016-12-20 23:27:56 +08:00
|
|
|
}
|
|
|
|
|
2011-07-20 20:59:37 +08:00
|
|
|
/**
|
2018-03-21 05:07:11 +08:00
|
|
|
* ida_alloc_max() - Allocate an unused ID.
|
|
|
|
* @ida: IDA handle.
|
|
|
|
* @max: Highest ID to allocate.
|
|
|
|
* @gfp: Memory allocation flags.
|
|
|
|
*
|
|
|
|
* Allocate an ID between 0 and @max, inclusive.
|
2013-02-28 09:03:52 +08:00
|
|
|
*
|
2020-10-16 11:11:17 +08:00
|
|
|
* Context: Any context. It is safe to call this function without
|
|
|
|
* locking in your code.
|
2018-03-21 05:07:11 +08:00
|
|
|
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
|
|
|
|
* or %-ENOSPC if there are no free IDs.
|
2011-07-20 20:59:37 +08:00
|
|
|
*/
|
2018-03-21 05:07:11 +08:00
|
|
|
static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
|
2013-02-28 09:03:52 +08:00
|
|
|
{
|
2018-03-21 05:07:11 +08:00
|
|
|
return ida_alloc_range(ida, 0, max, gfp);
|
2013-02-28 09:03:52 +08:00
|
|
|
}
|
|
|
|
|
2016-12-20 23:27:56 +08:00
|
|
|
static inline void ida_init(struct ida *ida)
|
|
|
|
{
|
2018-07-05 03:42:46 +08:00
|
|
|
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
|
2016-12-20 23:27:56 +08:00
|
|
|
}
|
|
|
|
|
2020-10-16 11:11:21 +08:00
|
|
|
/*
|
|
|
|
* ida_simple_get() and ida_simple_remove() are deprecated. Use
|
|
|
|
* ida_alloc() and ida_free() instead respectively.
|
|
|
|
*/
|
2018-03-21 05:07:11 +08:00
|
|
|
#define ida_simple_get(ida, start, end, gfp) \
|
|
|
|
ida_alloc_range(ida, start, (end) - 1, gfp)
|
|
|
|
#define ida_simple_remove(ida, id) ida_free(ida, id)
|
|
|
|
|
2016-12-20 23:27:56 +08:00
|
|
|
static inline bool ida_is_empty(const struct ida *ida)
|
2016-12-15 07:09:13 +08:00
|
|
|
{
|
2018-07-05 03:42:46 +08:00
|
|
|
return xa_empty(&ida->xa);
|
2016-12-15 07:09:13 +08:00
|
|
|
}
|
2005-11-09 00:14:08 +08:00
|
|
|
#endif /* __IDR_H__ */
|