percpu-refcount: implement percpu_tryget() along with percpu_ref_kill_and_confirm()
Implement percpu_tryget() which stops giving out references once the percpu_ref is visible as killed. Because the refcnt is per-cpu, different CPUs will start to see a refcnt as killed at different points in time and tryget() may continue to succeed on subset of cpus for a while after percpu_ref_kill() returns. For use cases where it's necessary to know when all CPUs start to see the refcnt as dead, percpu_ref_kill_and_confirm() is added. The new function takes an extra argument @confirm_kill which is invoked when the refcnt is guaranteed to be viewed as killed on all CPUs. While this isn't the prettiest interface, it doesn't force synchronous wait and is much safer than requiring the caller to do its own call_rcu(). v2: Patch description rephrased to emphasize that tryget() may continue to succeed on some CPUs after kill() returns as suggested by Kent. v3: Function comment in percpu_ref_kill_and_confirm() updated warning people to not depend on the implied RCU grace period from the confirm callback as it's an implementation detail. Signed-off-by: Tejun Heo <tj@kernel.org> Slightly-Grumpily-Acked-by: Kent Overstreet <koverstreet@google.com>
This commit is contained in:
parent
bc497bd33b
commit
dbece3a0f1
|
@ -63,13 +63,30 @@ struct percpu_ref {
|
||||||
*/
|
*/
|
||||||
unsigned __percpu *pcpu_count;
|
unsigned __percpu *pcpu_count;
|
||||||
percpu_ref_func_t *release;
|
percpu_ref_func_t *release;
|
||||||
|
percpu_ref_func_t *confirm_kill;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
int __must_check percpu_ref_init(struct percpu_ref *ref,
|
int __must_check percpu_ref_init(struct percpu_ref *ref,
|
||||||
percpu_ref_func_t *release);
|
percpu_ref_func_t *release);
|
||||||
void percpu_ref_cancel_init(struct percpu_ref *ref);
|
void percpu_ref_cancel_init(struct percpu_ref *ref);
|
||||||
void percpu_ref_kill(struct percpu_ref *ref);
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||||
|
percpu_ref_func_t *confirm_kill);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* percpu_ref_kill - drop the initial ref
|
||||||
|
* @ref: percpu_ref to kill
|
||||||
|
*
|
||||||
|
* Must be used to drop the initial ref on a percpu refcount; must be called
|
||||||
|
* precisely once before shutdown.
|
||||||
|
*
|
||||||
|
* Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
|
||||||
|
* percpu counters and dropping the initial ref.
|
||||||
|
*/
|
||||||
|
static inline void percpu_ref_kill(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
return percpu_ref_kill_and_confirm(ref, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
#define PCPU_STATUS_BITS 2
|
#define PCPU_STATUS_BITS 2
|
||||||
#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
|
#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
|
||||||
|
@ -100,6 +117,37 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* percpu_ref_tryget - try to increment a percpu refcount
|
||||||
|
* @ref: percpu_ref to try-get
|
||||||
|
*
|
||||||
|
* Increment a percpu refcount unless it has already been killed. Returns
|
||||||
|
* %true on success; %false on failure.
|
||||||
|
*
|
||||||
|
* Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
|
||||||
|
* will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
|
||||||
|
* used. After the confirm_kill callback is invoked, it's guaranteed that
|
||||||
|
* no new reference will be given out by percpu_ref_tryget().
|
||||||
|
*/
|
||||||
|
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
unsigned __percpu *pcpu_count;
|
||||||
|
int ret = false;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
|
||||||
|
|
||||||
|
if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
|
||||||
|
__this_cpu_inc(*pcpu_count);
|
||||||
|
ret = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* percpu_ref_put - decrement a percpu refcount
|
* percpu_ref_put - decrement a percpu refcount
|
||||||
* @ref: percpu_ref to put
|
* @ref: percpu_ref to put
|
||||||
|
|
|
@ -118,6 +118,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
|
||||||
|
|
||||||
atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
|
atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
|
||||||
|
|
||||||
|
/* @ref is viewed as dead on all CPUs, send out kill confirmation */
|
||||||
|
if (ref->confirm_kill)
|
||||||
|
ref->confirm_kill(ref);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now we're in single atomic_t mode with a consistent refcount, so it's
|
* Now we're in single atomic_t mode with a consistent refcount, so it's
|
||||||
* safe to drop our initial ref:
|
* safe to drop our initial ref:
|
||||||
|
@ -126,22 +130,29 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* percpu_ref_kill - safely drop initial ref
|
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
|
||||||
* @ref: percpu_ref to kill
|
* @ref: percpu_ref to kill
|
||||||
|
* @confirm_kill: optional confirmation callback
|
||||||
*
|
*
|
||||||
* Must be used to drop the initial ref on a percpu refcount; must be called
|
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
|
||||||
* precisely once before shutdown.
|
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
|
||||||
|
* called after @ref is seen as dead from all CPUs - all further
|
||||||
|
* invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
|
||||||
|
* for more details.
|
||||||
*
|
*
|
||||||
* Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
|
* Due to the way percpu_ref is implemented, @confirm_kill will be called
|
||||||
* percpu counters and dropping the initial ref.
|
* after at least one full RCU grace period has passed but this is an
|
||||||
|
* implementation detail and callers must not depend on it.
|
||||||
*/
|
*/
|
||||||
void percpu_ref_kill(struct percpu_ref *ref)
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||||
|
percpu_ref_func_t *confirm_kill)
|
||||||
{
|
{
|
||||||
WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
|
WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
|
||||||
"percpu_ref_kill() called more than once!\n");
|
"percpu_ref_kill() called more than once!\n");
|
||||||
|
|
||||||
ref->pcpu_count = (unsigned __percpu *)
|
ref->pcpu_count = (unsigned __percpu *)
|
||||||
(((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
|
(((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
|
||||||
|
ref->confirm_kill = confirm_kill;
|
||||||
|
|
||||||
call_rcu(&ref->rcu, percpu_ref_kill_rcu);
|
call_rcu(&ref->rcu, percpu_ref_kill_rcu);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue