ceph: eliminate session->s_gen_ttl_lock
Turn s_cap_gen field into an atomic_t, and just rely on the fact that we hold the s_mutex when changing the s_cap_ttl field. Signed-off-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Luis Henriques <lhenriques@suse.de> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
parent
7e65624d32
commit
52d60f8e18
|
@ -645,9 +645,7 @@ void ceph_add_cap(struct inode *inode,
|
|||
dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
|
||||
session->s_mds, cap_id, ceph_cap_string(issued), seq);
|
||||
|
||||
spin_lock(&session->s_gen_ttl_lock);
|
||||
gen = session->s_cap_gen;
|
||||
spin_unlock(&session->s_gen_ttl_lock);
|
||||
gen = atomic_read(&session->s_cap_gen);
|
||||
|
||||
cap = __get_cap_for_mds(ci, mds);
|
||||
if (!cap) {
|
||||
|
@ -785,10 +783,8 @@ static int __cap_is_valid(struct ceph_cap *cap)
|
|||
unsigned long ttl;
|
||||
u32 gen;
|
||||
|
||||
spin_lock(&cap->session->s_gen_ttl_lock);
|
||||
gen = cap->session->s_cap_gen;
|
||||
gen = atomic_read(&cap->session->s_cap_gen);
|
||||
ttl = cap->session->s_cap_ttl;
|
||||
spin_unlock(&cap->session->s_gen_ttl_lock);
|
||||
|
||||
if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
|
||||
dout("__cap_is_valid %p cap %p issued %s "
|
||||
|
@ -1182,7 +1178,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
|
|||
* s_cap_gen while session is in the reconnect state.
|
||||
*/
|
||||
if (queue_release &&
|
||||
(!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
|
||||
(!session->s_cap_reconnect ||
|
||||
cap->cap_gen == atomic_read(&session->s_cap_gen))) {
|
||||
cap->queue_release = 1;
|
||||
if (removed) {
|
||||
__ceph_queue_cap_release(session, cap);
|
||||
|
@ -3288,7 +3285,7 @@ static void handle_cap_grant(struct inode *inode,
|
|||
u64 size = le64_to_cpu(grant->size);
|
||||
u64 max_size = le64_to_cpu(grant->max_size);
|
||||
unsigned char check_caps = 0;
|
||||
bool was_stale = cap->cap_gen < session->s_cap_gen;
|
||||
bool was_stale = cap->cap_gen < atomic_read(&session->s_cap_gen);
|
||||
bool wake = false;
|
||||
bool writeback = false;
|
||||
bool queue_trunc = false;
|
||||
|
@ -3340,7 +3337,7 @@ static void handle_cap_grant(struct inode *inode,
|
|||
}
|
||||
|
||||
/* side effects now are allowed */
|
||||
cap->cap_gen = session->s_cap_gen;
|
||||
cap->cap_gen = atomic_read(&session->s_cap_gen);
|
||||
cap->seq = seq;
|
||||
|
||||
__check_cap_issue(ci, cap, newcaps);
|
||||
|
|
|
@ -1548,10 +1548,8 @@ static bool __dentry_lease_is_valid(struct ceph_dentry_info *di)
|
|||
u32 gen;
|
||||
unsigned long ttl;
|
||||
|
||||
spin_lock(&session->s_gen_ttl_lock);
|
||||
gen = session->s_cap_gen;
|
||||
gen = atomic_read(&session->s_cap_gen);
|
||||
ttl = session->s_cap_ttl;
|
||||
spin_unlock(&session->s_gen_ttl_lock);
|
||||
|
||||
if (di->lease_gen == gen &&
|
||||
time_before(jiffies, ttl) &&
|
||||
|
|
|
@ -1124,7 +1124,7 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
|
|||
return;
|
||||
}
|
||||
|
||||
if (di->lease_gen == session->s_cap_gen &&
|
||||
if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
|
||||
time_before(ttl, di->time))
|
||||
return; /* we already have a newer lease. */
|
||||
|
||||
|
@ -1135,7 +1135,7 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
|
|||
|
||||
if (!di->lease_session)
|
||||
di->lease_session = ceph_get_mds_session(session);
|
||||
di->lease_gen = session->s_cap_gen;
|
||||
di->lease_gen = atomic_read(&session->s_cap_gen);
|
||||
di->lease_seq = le32_to_cpu(lease->seq);
|
||||
di->lease_renew_after = half_ttl;
|
||||
di->lease_renew_from = 0;
|
||||
|
|
|
@ -749,8 +749,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
|
|||
|
||||
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
|
||||
|
||||
spin_lock_init(&s->s_gen_ttl_lock);
|
||||
s->s_cap_gen = 1;
|
||||
atomic_set(&s->s_cap_gen, 1);
|
||||
s->s_cap_ttl = jiffies - 1;
|
||||
|
||||
spin_lock_init(&s->s_cap_lock);
|
||||
|
@ -1763,7 +1762,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
ci->i_requested_max_size = 0;
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
} else if (ev == RENEWCAPS) {
|
||||
if (cap->cap_gen < cap->session->s_cap_gen) {
|
||||
if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
|
||||
/* mds did not re-issue stale cap */
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap->issued = cap->implemented = CEPH_CAP_PIN;
|
||||
|
@ -3501,10 +3500,8 @@ static void handle_session(struct ceph_mds_session *session,
|
|||
case CEPH_SESSION_STALE:
|
||||
pr_info("mds%d caps went stale, renewing\n",
|
||||
session->s_mds);
|
||||
spin_lock(&session->s_gen_ttl_lock);
|
||||
session->s_cap_gen++;
|
||||
atomic_inc(&session->s_cap_gen);
|
||||
session->s_cap_ttl = jiffies - 1;
|
||||
spin_unlock(&session->s_gen_ttl_lock);
|
||||
send_renew_caps(mdsc, session);
|
||||
break;
|
||||
|
||||
|
@ -3773,7 +3770,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
cap->seq = 0; /* reset cap seq */
|
||||
cap->issue_seq = 0; /* and issue_seq */
|
||||
cap->mseq = 0; /* and migrate_seq */
|
||||
cap->cap_gen = cap->session->s_cap_gen;
|
||||
cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
|
||||
|
||||
/* These are lost when the session goes away */
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
|
@ -4013,9 +4010,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
|
|||
dout("session %p state %s\n", session,
|
||||
ceph_session_state_name(session->s_state));
|
||||
|
||||
spin_lock(&session->s_gen_ttl_lock);
|
||||
session->s_cap_gen++;
|
||||
spin_unlock(&session->s_gen_ttl_lock);
|
||||
atomic_inc(&session->s_cap_gen);
|
||||
|
||||
spin_lock(&session->s_cap_lock);
|
||||
/* don't know if session is readonly */
|
||||
|
@ -4346,7 +4341,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
|||
|
||||
case CEPH_MDS_LEASE_RENEW:
|
||||
if (di->lease_session == session &&
|
||||
di->lease_gen == session->s_cap_gen &&
|
||||
di->lease_gen == atomic_read(&session->s_cap_gen) &&
|
||||
di->lease_renew_from &&
|
||||
di->lease_renew_after == 0) {
|
||||
unsigned long duration =
|
||||
|
|
|
@ -186,10 +186,8 @@ struct ceph_mds_session {
|
|||
|
||||
struct ceph_auth_handshake s_auth;
|
||||
|
||||
/* protected by s_gen_ttl_lock */
|
||||
spinlock_t s_gen_ttl_lock;
|
||||
u32 s_cap_gen; /* inc each time we get mds stale msg */
|
||||
unsigned long s_cap_ttl; /* when session caps expire */
|
||||
atomic_t s_cap_gen; /* inc each time we get mds stale msg */
|
||||
unsigned long s_cap_ttl; /* when session caps expire. protected by s_mutex */
|
||||
|
||||
/* protected by s_cap_lock */
|
||||
spinlock_t s_cap_lock;
|
||||
|
|
Loading…
Reference in New Issue