Merge branch 'ucount-fixes-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace
Pull ucount cleanups from Eric Biederman: "While working on the ucount fixes a for v5.15 a number of cleanups suggested themselves. Little things like not testing for NULL when a pointer can not be NULL and wrapping atomic_add_negative with a more descriptive name, so that people reading the code can more quickly understand what is going on" * 'ucount-fixes-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: ucounts: Use atomic_long_sub_return for clarity ucounts: Add get_ucounts_or_wrap for clarity ucounts: Remove unnecessary test for NULL ucount in get_ucounts ucounts: In set_cred_ucounts assume new->ucounts is non-NULL
This commit is contained in:
commit
33fb42636a
|
@ -676,15 +676,14 @@ int set_cred_ucounts(struct cred *new)
|
|||
* This optimization is needed because alloc_ucounts() uses locks
|
||||
* for table lookups.
|
||||
*/
|
||||
if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
|
||||
if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
|
||||
return 0;
|
||||
|
||||
if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
|
||||
return -EAGAIN;
|
||||
|
||||
new->ucounts = new_ucounts;
|
||||
if (old_ucounts)
|
||||
put_ucounts(old_ucounts);
|
||||
put_ucounts(old_ucounts);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -150,9 +150,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
|
|||
spin_unlock_irq(&ucounts_lock);
|
||||
}
|
||||
|
||||
static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
|
||||
{
|
||||
/* Returns true on a successful get, false if the count wraps. */
|
||||
return !atomic_add_negative(1, &ucounts->count);
|
||||
}
|
||||
|
||||
struct ucounts *get_ucounts(struct ucounts *ucounts)
|
||||
{
|
||||
if (ucounts && atomic_add_negative(1, &ucounts->count)) {
|
||||
if (!get_ucounts_or_wrap(ucounts)) {
|
||||
put_ucounts(ucounts);
|
||||
ucounts = NULL;
|
||||
}
|
||||
|
@ -163,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
|
|||
{
|
||||
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
|
||||
struct ucounts *ucounts, *new;
|
||||
long overflow;
|
||||
bool wrapped;
|
||||
|
||||
spin_lock_irq(&ucounts_lock);
|
||||
ucounts = find_ucounts(ns, uid, hashent);
|
||||
|
@ -188,9 +194,9 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
|
|||
return new;
|
||||
}
|
||||
}
|
||||
overflow = atomic_add_negative(1, &ucounts->count);
|
||||
wrapped = !get_ucounts_or_wrap(ucounts);
|
||||
spin_unlock_irq(&ucounts_lock);
|
||||
if (overflow) {
|
||||
if (wrapped) {
|
||||
put_ucounts(ucounts);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -276,7 +282,7 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
|
|||
struct ucounts *iter;
|
||||
long new = -1; /* Silence compiler warning */
|
||||
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
|
||||
long dec = atomic_long_add_return(-v, &iter->ucount[type]);
|
||||
long dec = atomic_long_sub_return(v, &iter->ucount[type]);
|
||||
WARN_ON_ONCE(dec < 0);
|
||||
if (iter == ucounts)
|
||||
new = dec;
|
||||
|
@ -289,7 +295,7 @@ static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
|
|||
{
|
||||
struct ucounts *iter, *next;
|
||||
for (iter = ucounts; iter != last; iter = next) {
|
||||
long dec = atomic_long_add_return(-1, &iter->ucount[type]);
|
||||
long dec = atomic_long_sub_return(1, &iter->ucount[type]);
|
||||
WARN_ON_ONCE(dec < 0);
|
||||
next = iter->ns->ucounts;
|
||||
if (dec == 0)
|
||||
|
@ -326,7 +332,7 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
|
|||
}
|
||||
return ret;
|
||||
dec_unwind:
|
||||
dec = atomic_long_add_return(-1, &iter->ucount[type]);
|
||||
dec = atomic_long_sub_return(1, &iter->ucount[type]);
|
||||
WARN_ON_ONCE(dec < 0);
|
||||
unwind:
|
||||
do_dec_rlimit_put_ucounts(ucounts, iter, type);
|
||||
|
|
Loading…
Reference in New Issue