2019-05-19 21:51:31 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2012-03-21 21:52:05 +08:00
|
|
|
/*
|
|
|
|
* per net namespace data structures for nfsd
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012, Jeff Layton <jlayton@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __NFSD_NETNS_H__
|
|
|
|
#define __NFSD_NETNS_H__
|
|
|
|
|
|
|
|
#include <net/net_namespace.h>
|
|
|
|
#include <net/netns/generic.h>
|
2021-01-06 15:52:35 +08:00
|
|
|
#include <linux/percpu_counter.h>
|
2021-12-30 03:43:16 +08:00
|
|
|
#include <linux/siphash.h>
|
2012-03-21 21:52:05 +08:00
|
|
|
|
2012-11-14 23:21:16 +08:00
|
|
|
/* Hash tables for nfs4_clientid state */
|
|
|
|
#define CLIENT_HASH_BITS 4
|
|
|
|
#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
|
|
|
|
#define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
|
|
|
|
|
2012-11-14 23:21:51 +08:00
|
|
|
#define SESSION_HASH_SIZE 512
|
|
|
|
|
2012-03-21 21:52:05 +08:00
|
|
|
struct cld_net;
|
2012-12-04 19:29:27 +08:00
|
|
|
struct nfsd4_client_tracking_ops;
|
2012-03-21 21:52:05 +08:00
|
|
|
|
2021-01-06 15:52:35 +08:00
|
|
|
enum {
|
|
|
|
/* cache misses due only to checksum comparison failures */
|
|
|
|
NFSD_NET_PAYLOAD_MISSES,
|
|
|
|
/* amount of memory (in bytes) currently consumed by the DRC */
|
|
|
|
NFSD_NET_DRC_MEM_USAGE,
|
|
|
|
NFSD_NET_COUNTERS_NUM
|
|
|
|
};
|
|
|
|
|
2014-08-06 03:13:30 +08:00
|
|
|
/*
|
|
|
|
* Represents a nfsd "container". With respect to nfsv4 state tracking, the
|
|
|
|
* fields of interest are the *_id_hashtbls and the *_name_tree. These track
|
|
|
|
* the nfs4_client objects by either short or long form clientid.
|
|
|
|
*
|
|
|
|
* Each nfsd_net runs a nfs4_laundromat workqueue job when necessary to clean
|
|
|
|
* up expired clients and delegations within the container.
|
|
|
|
*/
|
2012-03-21 21:52:05 +08:00
|
|
|
struct nfsd_net {
|
|
|
|
struct cld_net *cld_net;
|
2012-04-11 19:13:21 +08:00
|
|
|
|
2012-04-11 19:13:28 +08:00
|
|
|
struct cache_detail *svc_expkey_cache;
|
2012-04-11 19:13:21 +08:00
|
|
|
struct cache_detail *svc_export_cache;
|
2012-04-11 21:32:51 +08:00
|
|
|
|
|
|
|
struct cache_detail *idtoname_cache;
|
2012-04-11 21:32:58 +08:00
|
|
|
struct cache_detail *nametoid_cache;
|
2012-07-25 20:56:58 +08:00
|
|
|
|
|
|
|
struct lock_manager nfsd4_manager;
|
2012-07-25 20:57:37 +08:00
|
|
|
bool grace_ended;
|
2019-10-31 22:53:13 +08:00
|
|
|
time64_t boot_time;
|
2012-11-14 23:21:16 +08:00
|
|
|
|
2019-03-22 23:11:06 +08:00
|
|
|
struct dentry *nfsd_client_dir;
|
|
|
|
|
2012-11-14 23:21:16 +08:00
|
|
|
/*
|
|
|
|
* reclaim_str_hashtbl[] holds known client info from previous reset/reboot
|
|
|
|
* used in reboot/reset lease grace period processing
|
2012-11-14 23:21:26 +08:00
|
|
|
*
|
|
|
|
* conf_id_hashtbl[], and conf_name_tree hold confirmed
|
|
|
|
* setclientid_confirmed info.
|
2012-11-14 23:21:36 +08:00
|
|
|
*
|
|
|
|
* unconf_str_hastbl[] and unconf_name_tree hold unconfirmed
|
|
|
|
* setclientid info.
|
2012-11-14 23:21:16 +08:00
|
|
|
*/
|
|
|
|
struct list_head *reclaim_str_hashtbl;
|
|
|
|
int reclaim_str_hashtbl_size;
|
2012-11-14 23:21:21 +08:00
|
|
|
struct list_head *conf_id_hashtbl;
|
2012-11-14 23:21:26 +08:00
|
|
|
struct rb_root conf_name_tree;
|
2012-11-14 23:21:31 +08:00
|
|
|
struct list_head *unconf_id_hashtbl;
|
2012-11-14 23:21:36 +08:00
|
|
|
struct rb_root unconf_name_tree;
|
2012-11-14 23:21:51 +08:00
|
|
|
struct list_head *sessionid_hashtbl;
|
2012-11-14 23:21:56 +08:00
|
|
|
/*
|
|
|
|
* client_lru holds client queue ordered by nfs4_client.cl_time
|
|
|
|
* for lease renewal.
|
2012-11-14 23:22:01 +08:00
|
|
|
*
|
|
|
|
* close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
|
|
|
|
* for last close replay.
|
|
|
|
*
|
|
|
|
* All of the above fields are protected by the client_mutex.
|
2012-11-14 23:21:56 +08:00
|
|
|
*/
|
|
|
|
struct list_head client_lru;
|
2012-11-14 23:22:01 +08:00
|
|
|
struct list_head close_lru;
|
2013-03-22 03:19:33 +08:00
|
|
|
struct list_head del_recall_lru;
|
2016-10-20 21:34:31 +08:00
|
|
|
|
|
|
|
/* protected by blocked_locks_lock */
|
2016-09-17 04:28:25 +08:00
|
|
|
struct list_head blocked_locks_lru;
|
2012-11-14 23:22:17 +08:00
|
|
|
|
|
|
|
struct delayed_work laundromat_work;
|
2012-11-26 20:21:58 +08:00
|
|
|
|
|
|
|
/* client_lock protects the client lru list and session hash table */
|
|
|
|
spinlock_t client_lock;
|
2012-11-26 21:16:25 +08:00
|
|
|
|
2016-10-20 21:34:31 +08:00
|
|
|
/* protects blocked_locks_lru */
|
|
|
|
spinlock_t blocked_locks_lock;
|
|
|
|
|
2012-11-26 21:16:25 +08:00
|
|
|
struct file *rec_file;
|
2012-11-26 21:16:30 +08:00
|
|
|
bool in_grace;
|
2015-11-22 15:22:10 +08:00
|
|
|
const struct nfsd4_client_tracking_ops *client_tracking_ops;
|
2012-11-27 19:11:44 +08:00
|
|
|
|
2019-11-04 23:31:52 +08:00
|
|
|
time64_t nfsd4_lease;
|
|
|
|
time64_t nfsd4_grace;
|
2018-06-09 00:28:47 +08:00
|
|
|
bool somebody_reclaimed;
|
2012-12-06 19:23:14 +08:00
|
|
|
|
2019-03-27 06:06:28 +08:00
|
|
|
bool track_reclaim_completes;
|
|
|
|
atomic_t nr_reclaim_complete;
|
|
|
|
|
2012-12-06 19:23:14 +08:00
|
|
|
bool nfsd_net_up;
|
2013-12-31 13:17:30 +08:00
|
|
|
bool lockd_up;
|
2012-12-06 19:23:19 +08:00
|
|
|
|
2021-12-30 03:43:16 +08:00
|
|
|
seqlock_t writeverf_lock;
|
|
|
|
unsigned char writeverf[8];
|
2012-12-06 19:23:24 +08:00
|
|
|
|
2014-07-03 04:11:22 +08:00
|
|
|
/*
|
|
|
|
* Max number of connections this nfsd container will allow. Defaults
|
|
|
|
* to '0' which is means that it bases this on the number of threads.
|
|
|
|
*/
|
|
|
|
unsigned int max_connections;
|
|
|
|
|
2019-03-15 04:20:19 +08:00
|
|
|
u32 clientid_base;
|
2014-07-30 20:27:15 +08:00
|
|
|
u32 clientid_counter;
|
2015-07-18 07:33:31 +08:00
|
|
|
u32 clverifier_counter;
|
2014-07-30 20:27:15 +08:00
|
|
|
|
2012-12-06 19:23:24 +08:00
|
|
|
struct svc_serv *nfsd_serv;
|
2021-11-29 12:51:25 +08:00
|
|
|
/* When a listening socket is added to nfsd, keep_active is set
|
|
|
|
* and this justifies a reference on nfsd_serv. This stops
|
|
|
|
* nfsd_serv from being freed. When the number of threads is
|
|
|
|
* set, keep_active is cleared and the reference is dropped. So
|
|
|
|
* when the last thread exits, the service will be destroyed.
|
|
|
|
*/
|
|
|
|
int keep_active;
|
2017-11-10 15:19:35 +08:00
|
|
|
|
2018-07-21 06:19:20 +08:00
|
|
|
/*
|
|
|
|
* clientid and stateid data for construction of net unique COPY
|
|
|
|
* stateids.
|
|
|
|
*/
|
|
|
|
u32 s2s_cp_cl_id;
|
|
|
|
struct idr s2s_cp_stateids;
|
|
|
|
spinlock_t s2s_cp_lock;
|
2019-04-09 23:46:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Version information
|
|
|
|
*/
|
|
|
|
bool *nfsd_versions;
|
|
|
|
bool *nfsd4_minorversions;
|
2019-05-17 21:03:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicate reply cache
|
|
|
|
*/
|
|
|
|
struct nfsd_drc_bucket *drc_hashtbl;
|
|
|
|
|
|
|
|
/* max number of entries allowed in the cache */
|
|
|
|
unsigned int max_drc_entries;
|
|
|
|
|
|
|
|
/* number of significant bits in the hash value */
|
|
|
|
unsigned int maskbits;
|
|
|
|
unsigned int drc_hashsize;
|
|
|
|
|
|
|
|
/*
|
2019-05-18 04:22:18 +08:00
|
|
|
* Stats and other tracking of on the duplicate reply cache.
|
2021-01-06 15:52:35 +08:00
|
|
|
* The longest_chain* fields are modified with only the per-bucket
|
|
|
|
* cache lock, which isn't really safe and should be fixed if we want
|
|
|
|
* these statistics to be completely accurate.
|
2019-05-17 21:03:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* total number of entries */
|
|
|
|
atomic_t num_drc_entries;
|
|
|
|
|
2021-01-06 15:52:35 +08:00
|
|
|
/* Per-netns stats counters */
|
|
|
|
struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
|
2019-05-17 21:03:38 +08:00
|
|
|
|
|
|
|
/* longest hash chain seen */
|
|
|
|
unsigned int longest_chain;
|
|
|
|
|
|
|
|
/* size of cache when we saw the longest hash chain */
|
|
|
|
unsigned int longest_chain_cachesize;
|
|
|
|
|
|
|
|
struct shrinker nfsd_reply_cache_shrinker;
|
2021-05-22 03:09:37 +08:00
|
|
|
|
|
|
|
/* tracking server-to-server copy mounts */
|
|
|
|
spinlock_t nfsd_ssc_lock;
|
|
|
|
struct list_head nfsd_ssc_mount_list;
|
|
|
|
wait_queue_head_t nfsd_ssc_waitq;
|
|
|
|
|
2020-07-20 08:14:03 +08:00
|
|
|
/* utsname taken from the process that starts the server */
|
2020-02-20 04:52:15 +08:00
|
|
|
char nfsd_name[UNX_MAXNODENAME+1];
|
2021-12-01 07:58:14 +08:00
|
|
|
|
|
|
|
struct nfsd_fcache_disposal *fcache_disposal;
|
2021-12-30 03:43:16 +08:00
|
|
|
|
|
|
|
siphash_key_t siphash_key;
|
2022-07-16 07:54:52 +08:00
|
|
|
|
|
|
|
atomic_t nfs4_client_count;
|
2022-07-16 07:54:53 +08:00
|
|
|
int nfs4_max_clients;
|
2022-09-14 23:54:25 +08:00
|
|
|
|
|
|
|
atomic_t nfsd_courtesy_clients;
|
2022-09-14 23:54:26 +08:00
|
|
|
struct shrinker nfsd_client_shrinker;
|
|
|
|
struct delayed_work nfsd_shrinker_work;
|
2012-03-21 21:52:05 +08:00
|
|
|
};
|
|
|
|
|
2012-11-30 00:40:39 +08:00
|
|
|
/* Simple check to find out if a given net was properly initialized */
|
|
|
|
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
|
|
|
|
|
2019-04-09 23:46:19 +08:00
|
|
|
extern void nfsd_netns_free_versions(struct nfsd_net *nn);
|
|
|
|
|
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 09:58:21 +08:00
|
|
|
extern unsigned int nfsd_net_id;
|
2019-09-03 01:02:56 +08:00
|
|
|
|
2021-12-30 23:22:05 +08:00
|
|
|
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
|
|
|
|
void nfsd_reset_write_verifier(struct nfsd_net *nn);
|
2012-03-21 21:52:05 +08:00
|
|
|
#endif /* __NFSD_NETNS_H__ */
|