bpf: Generalize caching for sk_storage.

Provide the a ability to define local storage caches on a per-object
type basis. The caches and caching indices for different objects should
not be inter-mixed as suggested in:

  https://lore.kernel.org/bpf/20200630193441.kdwnkestulg5erii@kafai-mbp.dhcp.thefacebook.com/

  "Caching a sk-storage at idx=0 of a sk should not stop an
  inode-storage to be cached at the same idx of a inode."

Signed-off-by: KP Singh <kpsingh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20200825182919.1118197-3-kpsingh@chromium.org
This commit is contained in:
KP Singh 2020-08-25 20:29:14 +02:00 committed by Alexei Starovoitov
parent 1f00d375af
commit 4cc9ce4e73
2 changed files with 34 additions and 16 deletions

View File

@ -3,6 +3,9 @@
#ifndef _BPF_SK_STORAGE_H #ifndef _BPF_SK_STORAGE_H
#define _BPF_SK_STORAGE_H #define _BPF_SK_STORAGE_H
#include <linux/types.h>
#include <linux/spinlock.h>
struct sock; struct sock;
void bpf_sk_storage_free(struct sock *sk); void bpf_sk_storage_free(struct sock *sk);
@ -15,6 +18,22 @@ struct sk_buff;
struct nlattr; struct nlattr;
struct sock; struct sock;
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
struct bpf_local_storage_cache {
spinlock_t idx_lock;
u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
};
#define DEFINE_BPF_STORAGE_CACHE(name) \
static struct bpf_local_storage_cache name = { \
.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
}
u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
u16 idx);
#ifdef CONFIG_BPF_SYSCALL #ifdef CONFIG_BPF_SYSCALL
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
struct bpf_sk_storage_diag * struct bpf_sk_storage_diag *

View File

@ -14,6 +14,8 @@
#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
DEFINE_BPF_STORAGE_CACHE(sk_cache);
struct bpf_local_storage_map_bucket { struct bpf_local_storage_map_bucket {
struct hlist_head list; struct hlist_head list;
raw_spinlock_t lock; raw_spinlock_t lock;
@ -78,10 +80,6 @@ struct bpf_local_storage_elem {
#define SELEM(_SDATA) \ #define SELEM(_SDATA) \
container_of((_SDATA), struct bpf_local_storage_elem, sdata) container_of((_SDATA), struct bpf_local_storage_elem, sdata)
#define SDATA(_SELEM) (&(_SELEM)->sdata) #define SDATA(_SELEM) (&(_SELEM)->sdata)
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
static DEFINE_SPINLOCK(cache_idx_lock);
static u64 cache_idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
struct bpf_local_storage { struct bpf_local_storage {
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE]; struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
@ -521,16 +519,16 @@ static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
return 0; return 0;
} }
static u16 cache_idx_get(void) u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
{ {
u64 min_usage = U64_MAX; u64 min_usage = U64_MAX;
u16 i, res = 0; u16 i, res = 0;
spin_lock(&cache_idx_lock); spin_lock(&cache->idx_lock);
for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) { for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
if (cache_idx_usage_counts[i] < min_usage) { if (cache->idx_usage_counts[i] < min_usage) {
min_usage = cache_idx_usage_counts[i]; min_usage = cache->idx_usage_counts[i];
res = i; res = i;
/* Found a free cache_idx */ /* Found a free cache_idx */
@ -538,18 +536,19 @@ static u16 cache_idx_get(void)
break; break;
} }
} }
cache_idx_usage_counts[res]++; cache->idx_usage_counts[res]++;
spin_unlock(&cache_idx_lock); spin_unlock(&cache->idx_lock);
return res; return res;
} }
static void cache_idx_free(u16 idx) void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
u16 idx)
{ {
spin_lock(&cache_idx_lock); spin_lock(&cache->idx_lock);
cache_idx_usage_counts[idx]--; cache->idx_usage_counts[idx]--;
spin_unlock(&cache_idx_lock); spin_unlock(&cache->idx_lock);
} }
/* Called by __sk_destruct() & bpf_sk_storage_clone() */ /* Called by __sk_destruct() & bpf_sk_storage_clone() */
@ -601,7 +600,7 @@ static void bpf_local_storage_map_free(struct bpf_map *map)
smap = (struct bpf_local_storage_map *)map; smap = (struct bpf_local_storage_map *)map;
cache_idx_free(smap->cache_idx); bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
/* Note that this map might be concurrently cloned from /* Note that this map might be concurrently cloned from
* bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
@ -718,7 +717,7 @@ static struct bpf_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
smap->elem_size = smap->elem_size =
sizeof(struct bpf_local_storage_elem) + attr->value_size; sizeof(struct bpf_local_storage_elem) + attr->value_size;
smap->cache_idx = cache_idx_get(); smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
return &smap->map; return &smap->map;
} }