af_unix: Refactor unix_next_socket().
Currently, unix_next_socket() is overloaded depending on the 2nd argument. If it is NULL, unix_next_socket() returns the first socket in the hash. If not NULL, it returns the next socket in the same hash list or the first socket in the next non-empty hash list. This patch refactors unix_next_socket() into two functions unix_get_first() and unix_get_next(). unix_get_first() newly acquires a lock and returns the first socket in the list. unix_get_next() returns the next socket in a list or releases a lock and falls back to unix_get_first(). In the following patch, bpf iter holds entire sockets in a list and always releases the lock before .show(). It always calls unix_get_first() to acquire a lock in each iteration. So, this patch makes the change easier to follow. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> Link: https://lore.kernel.org/r/20220113002849.4384-2-kuniyu@amazon.co.jp Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
2a1aff6035
commit
4408d55a64
|
@ -3240,49 +3240,58 @@ static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
|
||||||
return sk;
|
return sk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sock *unix_next_socket(struct seq_file *seq,
|
static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
|
||||||
struct sock *sk,
|
|
||||||
loff_t *pos)
|
|
||||||
{
|
{
|
||||||
unsigned long bucket = get_bucket(*pos);
|
unsigned long bucket = get_bucket(*pos);
|
||||||
|
struct sock *sk;
|
||||||
|
|
||||||
while (sk > (struct sock *)SEQ_START_TOKEN) {
|
while (bucket < ARRAY_SIZE(unix_socket_table)) {
|
||||||
sk = sk_next(sk);
|
|
||||||
if (!sk)
|
|
||||||
goto next_bucket;
|
|
||||||
if (sock_net(sk) == seq_file_net(seq))
|
|
||||||
return sk;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
|
||||||
spin_lock(&unix_table_locks[bucket]);
|
spin_lock(&unix_table_locks[bucket]);
|
||||||
|
|
||||||
sk = unix_from_bucket(seq, pos);
|
sk = unix_from_bucket(seq, pos);
|
||||||
if (sk)
|
if (sk)
|
||||||
return sk;
|
return sk;
|
||||||
|
|
||||||
next_bucket:
|
spin_unlock(&unix_table_locks[bucket]);
|
||||||
spin_unlock(&unix_table_locks[bucket++]);
|
|
||||||
*pos = set_bucket_offset(bucket, 1);
|
*pos = set_bucket_offset(++bucket, 1);
|
||||||
} while (bucket < ARRAY_SIZE(unix_socket_table));
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
|
||||||
|
loff_t *pos)
|
||||||
|
{
|
||||||
|
unsigned long bucket = get_bucket(*pos);
|
||||||
|
|
||||||
|
for (sk = sk_next(sk); sk; sk = sk_next(sk))
|
||||||
|
if (sock_net(sk) == seq_file_net(seq))
|
||||||
|
return sk;
|
||||||
|
|
||||||
|
spin_unlock(&unix_table_locks[bucket]);
|
||||||
|
|
||||||
|
*pos = set_bucket_offset(++bucket, 1);
|
||||||
|
|
||||||
|
return unix_get_first(seq, pos);
|
||||||
|
}
|
||||||
|
|
||||||
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
|
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
{
|
{
|
||||||
if (!*pos)
|
if (!*pos)
|
||||||
return SEQ_START_TOKEN;
|
return SEQ_START_TOKEN;
|
||||||
|
|
||||||
if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
|
return unix_get_first(seq, pos);
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return unix_next_socket(seq, NULL, pos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||||
{
|
{
|
||||||
++*pos;
|
++*pos;
|
||||||
return unix_next_socket(seq, v, pos);
|
|
||||||
|
if (v == SEQ_START_TOKEN)
|
||||||
|
return unix_get_first(seq, pos);
|
||||||
|
|
||||||
|
return unix_get_next(seq, v, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unix_seq_stop(struct seq_file *seq, void *v)
|
static void unix_seq_stop(struct seq_file *seq, void *v)
|
||||||
|
|
Loading…
Reference in New Issue