SUNRPC: Use atomic(64)_t for seq_send(64)
The seq_send & seq_send64 fields in struct krb5_ctx are used as atomically incrementing counters. This is implemented using cmpxchg() & cmpxchg64() to implement what amount to custom versions of atomic_fetch_inc() & atomic64_fetch_inc(). Besides the duplication, using cmpxchg64() has another major drawback in that some 32 bit architectures don't provide it. As such commit571ed1fd23
("SUNRPC: Replace krb5_seq_lock with a lockless scheme") resulted in build failures for some architectures. Change seq_send to be an atomic_t and seq_send64 to be an atomic64_t, then use atomic(64)_* functions to manipulate the values. The atomic64_t type & associated functions are provided even on architectures which lack real 64 bit atomic memory access via CONFIG_GENERIC_ATOMIC64 which uses spinlocks to serialize access. This fixes the build failures for architectures lacking cmpxchg64(). A potential alternative that was raised would be to provide cmpxchg64() on the 32 bit architectures that currently lack it, using spinlocks. However this would provide a version of cmpxchg64() with semantics a little different to the implementations on architectures with real 64 bit atomics - the spinlock-based implementation would only work if all access to the memory used with cmpxchg64() is *always* performed using cmpxchg64(). That is not currently a requirement for users of cmpxchg64(), and making it one seems questionable. As such avoiding cmpxchg64() outside of architecture-specific code seems best, particularly in cases where atomic64_t seems like a better fit anyway. The CONFIG_GENERIC_ATOMIC64 implementation of atomic64_* functions will use spinlocks & so faces the same issue, but with the key difference that the memory backing an atomic64_t ought to always be accessed via the atomic64_* functions anyway making the issue moot. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes:571ed1fd23
("SUNRPC: Replace krb5_seq_lock with a lockless scheme") Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Anna Schumaker <anna.schumaker@netapp.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Jeff Layton <jlayton@kernel.org> Cc: David S. Miller <davem@davemloft.net> Cc: linux-nfs@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
parent
86bbd7422a
commit
c3be6577d8
|
@ -107,8 +107,8 @@ struct krb5_ctx {
|
|||
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
|
||||
u8 cksum[GSS_KRB5_MAX_KEYLEN];
|
||||
s32 endtime;
|
||||
u32 seq_send;
|
||||
u64 seq_send64;
|
||||
atomic_t seq_send;
|
||||
atomic64_t seq_send64;
|
||||
struct xdr_netobj mech_used;
|
||||
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
|
||||
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
|
||||
|
@ -118,9 +118,6 @@ struct krb5_ctx {
|
|||
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
|
||||
};
|
||||
|
||||
extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
|
||||
extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
|
||||
|
||||
/* The length of the Kerberos GSS token header */
|
||||
#define GSS_KRB5_TOK_HDR_LEN (16)
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ out_err:
|
|||
static int
|
||||
gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
|
||||
{
|
||||
u32 seq_send;
|
||||
int tmp;
|
||||
|
||||
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
|
||||
|
@ -316,9 +317,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
|
|||
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
|
||||
p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
atomic_set(&ctx->seq_send, seq_send);
|
||||
p = simple_get_netobj(p, end, &ctx->mech_used);
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
|
@ -610,6 +612,7 @@ static int
|
|||
gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
u64 seq_send64;
|
||||
int keylen;
|
||||
|
||||
p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
|
||||
|
@ -620,14 +623,15 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
|
|||
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
|
||||
p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64));
|
||||
if (IS_ERR(p))
|
||||
goto out_err;
|
||||
atomic64_set(&ctx->seq_send64, seq_send64);
|
||||
/* set seq_send for use by "older" enctypes */
|
||||
ctx->seq_send = ctx->seq_send64;
|
||||
if (ctx->seq_send64 != ctx->seq_send) {
|
||||
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
|
||||
(unsigned long)ctx->seq_send64, ctx->seq_send);
|
||||
atomic_set(&ctx->seq_send, seq_send64);
|
||||
if (seq_send64 != atomic_read(&ctx->seq_send)) {
|
||||
dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__,
|
||||
seq_send64, atomic_read(&ctx->seq_send));
|
||||
p = ERR_PTR(-EINVAL);
|
||||
goto out_err;
|
||||
}
|
||||
|
|
|
@ -123,30 +123,6 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
|
|||
return krb5_hdr;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
|
||||
{
|
||||
u32 old, seq_send = READ_ONCE(ctx->seq_send);
|
||||
|
||||
do {
|
||||
old = seq_send;
|
||||
seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
|
||||
} while (old != seq_send);
|
||||
return seq_send;
|
||||
}
|
||||
|
||||
u64
|
||||
gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
|
||||
{
|
||||
u64 old, seq_send = READ_ONCE(ctx->seq_send);
|
||||
|
||||
do {
|
||||
old = seq_send;
|
||||
seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1);
|
||||
} while (old != seq_send);
|
||||
return seq_send;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
struct xdr_netobj *token)
|
||||
|
@ -177,7 +153,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
|
|||
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
seq_send = gss_seq_send_fetch_and_inc(ctx);
|
||||
seq_send = atomic_fetch_inc(&ctx->seq_send);
|
||||
|
||||
if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
|
||||
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
|
||||
|
@ -205,7 +181,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
|
|||
|
||||
/* Set up the sequence number. Now 64-bits in clear
|
||||
* text and w/o direction indicator */
|
||||
seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx));
|
||||
seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64));
|
||||
memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
|
||||
|
||||
if (ctx->initiate) {
|
||||
|
|
|
@ -228,7 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
|
|||
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
seq_send = gss_seq_send_fetch_and_inc(kctx);
|
||||
seq_send = atomic_fetch_inc(&kctx->seq_send);
|
||||
|
||||
/* XXX would probably be more efficient to compute checksum
|
||||
* and encrypt at the same time: */
|
||||
|
@ -475,7 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
|
|||
*be16ptr++ = 0;
|
||||
|
||||
be64ptr = (__be64 *)be16ptr;
|
||||
*be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
|
||||
*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
|
||||
|
||||
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
|
||||
if (err)
|
||||
|
|
Loading…
Reference in New Issue