2019-02-12 00:25:15 +08:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-10-04 05:01:26 +08:00
|
|
|
* linux/net/sunrpc/auth_gss/auth_gss.c
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* RPCSEC_GSS client authentication.
|
2007-02-10 07:38:13 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (c) 2000 The Regents of the University of Michigan.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Dug Song <dugsong@monkey.org>
|
|
|
|
* Andy Adamson <andros@umich.edu>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
2005-10-14 04:54:58 +08:00
|
|
|
#include <linux/pagemap.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
|
|
|
#include <linux/sunrpc/auth.h>
|
|
|
|
#include <linux/sunrpc/auth_gss.h>
|
SUNRPC: fix krb5p mount to provide large enough buffer in rq_rcvsize
Ever since commit 2c94b8eca1a2 ("SUNRPC: Use au_rslack when computing
reply buffer size"). It changed how "req->rq_rcvsize" is calculated. It
used to use au_cslack value which was nice and large and changed it to
au_rslack value which turns out to be too small.
Since 5.1, v3 mount with sec=krb5p fails against an Ontap server
because client's receive buffer it too small.
For gss krb5p, we need to account for the mic token in the verifier,
and the wrap token in the wrap token.
RFC 4121 defines:
mic token
Octet no Name Description
--------------------------------------------------------------
0..1 TOK_ID Identification field. Tokens emitted by
GSS_GetMIC() contain the hex value 04 04
expressed in big-endian order in this
field.
2 Flags Attributes field, as described in section
4.2.2.
3..7 Filler Contains five octets of hex value FF.
8..15 SND_SEQ Sequence number field in clear text,
expressed in big-endian order.
16..last SGN_CKSUM Checksum of the "to-be-signed" data and
octet 0..15, as described in section 4.2.4.
that's 16bytes (GSS_KRB5_TOK_HDR_LEN) + chksum
wrap token
Octet no Name Description
--------------------------------------------------------------
0..1 TOK_ID Identification field. Tokens emitted by
GSS_Wrap() contain the hex value 05 04
expressed in big-endian order in this
field.
2 Flags Attributes field, as described in section
4.2.2.
3 Filler Contains the hex value FF.
4..5 EC Contains the "extra count" field, in big-
endian order as described in section 4.2.3.
6..7 RRC Contains the "right rotation count" in big-
endian order, as described in section
4.2.5.
8..15 SND_SEQ Sequence number field in clear text,
expressed in big-endian order.
16..last Data Encrypted data for Wrap tokens with
confidentiality, or plaintext data followed
by the checksum for Wrap tokens without
confidentiality, as described in section
4.2.4.
Also 16bytes of header (GSS_KRB5_TOK_HDR_LEN), encrypted data, and cksum
(other things like padding)
RFC 3961 defines known cksum sizes:
Checksum type sumtype checksum section or
value size reference
---------------------------------------------------------------------
CRC32 1 4 6.1.3
rsa-md4 2 16 6.1.2
rsa-md4-des 3 24 6.2.5
des-mac 4 16 6.2.7
des-mac-k 5 8 6.2.8
rsa-md4-des-k 6 16 6.2.6
rsa-md5 7 16 6.1.1
rsa-md5-des 8 24 6.2.4
rsa-md5-des3 9 24 ??
sha1 (unkeyed) 10 20 ??
hmac-sha1-des3-kd 12 20 6.3
hmac-sha1-des3 13 20 ??
sha1 (unkeyed) 14 20 ??
hmac-sha1-96-aes128 15 20 [KRB5-AES]
hmac-sha1-96-aes256 16 20 [KRB5-AES]
[reserved] 0x8003 ? [GSS-KRB5]
Linux kernel now mainly supports type 15,16 so max cksum size is 20bytes.
(GSS_KRB5_MAX_CKSUM_LEN)
Re-use already existing define of GSS_KRB5_MAX_SLACK_NEEDED that's used
for encoding the gss_wrap tokens (same tokens are used in reply).
Fixes: 2c94b8eca1a2 ("SUNRPC: Use au_rslack when computing reply buffer size")
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-26 22:24:51 +08:00
|
|
|
#include <linux/sunrpc/gss_krb5.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sunrpc/svcauth_gss.h>
|
|
|
|
#include <linux/sunrpc/gss_err.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/sunrpc/rpc_pipe_fs.h>
|
|
|
|
#include <linux/sunrpc/gss_api.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2013-08-29 03:26:25 +08:00
|
|
|
#include <linux/hashtable.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-05-16 02:28:54 +08:00
|
|
|
#include "../netns.h"
|
|
|
|
|
2019-02-12 00:25:04 +08:00
|
|
|
#include <trace/events/rpcgss.h>
|
|
|
|
|
2007-06-24 08:17:58 +08:00
|
|
|
static const struct rpc_authops authgss_ops;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-24 08:17:58 +08:00
|
|
|
static const struct rpc_credops gss_credops;
|
2007-06-27 05:04:57 +08:00
|
|
|
static const struct rpc_credops gss_nullops;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-05-14 00:55:38 +08:00
|
|
|
#define GSS_RETRY_EXPIRED 5
|
|
|
|
static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
|
|
|
|
|
2013-08-14 23:59:15 +08:00
|
|
|
#define GSS_KEY_EXPIRE_TIMEO 240
|
|
|
|
static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
|
|
|
|
|
2014-11-18 05:58:04 +08:00
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
2005-04-17 06:20:36 +08:00
|
|
|
# define RPCDBG_FACILITY RPCDBG_AUTH
|
|
|
|
#endif
|
|
|
|
|
2010-03-18 01:02:46 +08:00
|
|
|
#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
|
2005-04-17 06:20:36 +08:00
|
|
|
/* length of a krb5 verifier (48), plus data added before arguments when
|
|
|
|
* using integrity (two 4-byte integers): */
|
2006-12-05 09:22:34 +08:00
|
|
|
#define GSS_VERF_SLACK 100
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-12 22:16:31 +08:00
|
|
|
static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
|
2013-08-29 03:26:25 +08:00
|
|
|
static DEFINE_SPINLOCK(gss_auth_hash_lock);
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_pipe {
|
|
|
|
struct rpc_pipe_dir_object pdo;
|
|
|
|
struct rpc_pipe *pipe;
|
|
|
|
struct rpc_clnt *clnt;
|
|
|
|
const char *name;
|
2013-08-28 04:52:16 +08:00
|
|
|
struct kref kref;
|
2013-08-27 04:44:42 +08:00
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_auth {
|
2007-06-28 02:29:12 +08:00
|
|
|
struct kref kref;
|
2013-08-29 03:26:25 +08:00
|
|
|
struct hlist_node hash;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_auth rpc_auth;
|
|
|
|
struct gss_api_mech *mech;
|
|
|
|
enum rpc_gss_svc service;
|
|
|
|
struct rpc_clnt *client;
|
2013-08-24 02:42:29 +08:00
|
|
|
struct net *net;
|
2008-12-24 05:16:37 +08:00
|
|
|
/*
|
|
|
|
* There are two upcall pipes; dentry[1], named "gssd", is used
|
|
|
|
* for the new text-based upcall; dentry[0] is named after the
|
|
|
|
* mechanism (for example, "krb5") and exists for
|
|
|
|
* backwards-compatibility with older gssd's.
|
|
|
|
*/
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_pipe *gss_pipe[2];
|
2013-08-24 02:02:24 +08:00
|
|
|
const char *target_name;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-12-24 05:10:52 +08:00
|
|
|
/* pipe_version >= 0 if and only if someone has a pipe open. */
|
|
|
|
static DEFINE_SPINLOCK(pipe_version_lock);
|
|
|
|
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
|
2014-02-17 01:14:13 +08:00
|
|
|
static void gss_put_auth(struct gss_auth *gss_auth);
|
2008-12-24 05:10:19 +08:00
|
|
|
|
2007-06-27 07:18:38 +08:00
|
|
|
static void gss_free_ctx(struct gss_cl_ctx *);
|
2009-08-10 03:14:15 +08:00
|
|
|
static const struct rpc_pipe_ops gss_upcall_ops_v0;
|
|
|
|
static const struct rpc_pipe_ops gss_upcall_ops_v1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static inline struct gss_cl_ctx *
|
|
|
|
gss_get_ctx(struct gss_cl_ctx *ctx)
|
|
|
|
{
|
2017-07-04 20:53:13 +08:00
|
|
|
refcount_inc(&ctx->count);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
gss_put_ctx(struct gss_cl_ctx *ctx)
|
|
|
|
{
|
2017-07-04 20:53:13 +08:00
|
|
|
if (refcount_dec_and_test(&ctx->count))
|
2007-06-27 07:18:38 +08:00
|
|
|
gss_free_ctx(ctx);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-06-27 07:18:38 +08:00
|
|
|
/* gss_cred_set_ctx:
|
|
|
|
* called by gss_upcall_callback and gss_create_upcall in order
|
|
|
|
* to set the gss context. The actual exchange of an old context
|
2011-12-26 20:43:57 +08:00
|
|
|
* and a new one is protected by the pipe->lock.
|
2007-06-27 07:18:38 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
2007-06-27 07:18:38 +08:00
|
|
|
|
2008-04-18 05:03:58 +08:00
|
|
|
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
|
|
|
return;
|
2008-04-18 04:53:01 +08:00
|
|
|
gss_get_ctx(ctx);
|
2012-01-12 12:41:32 +08:00
|
|
|
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
|
2007-06-25 22:15:15 +08:00
|
|
|
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const void *
|
|
|
|
simple_get_bytes(const void *p, const void *end, void *res, size_t len)
|
|
|
|
{
|
|
|
|
const void *q = (const void *)((const char *)p + len);
|
|
|
|
if (unlikely(q > end || q < p))
|
|
|
|
return ERR_PTR(-EFAULT);
|
|
|
|
memcpy(res, p, len);
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const void *
|
|
|
|
simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
|
|
|
|
{
|
|
|
|
const void *q;
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
p = simple_get_bytes(p, end, &len, sizeof(len));
|
|
|
|
if (IS_ERR(p))
|
|
|
|
return p;
|
|
|
|
q = (const void *)((const char *)p + len);
|
|
|
|
if (unlikely(q > end || q < p))
|
|
|
|
return ERR_PTR(-EFAULT);
|
2008-06-11 06:31:01 +08:00
|
|
|
dest->data = kmemdup(p, len, GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (unlikely(dest->data == NULL))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
dest->len = len;
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_cl_ctx *
|
|
|
|
gss_cred_get_ctx(struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
|
|
|
struct gss_cl_ctx *ctx = NULL;
|
|
|
|
|
2007-06-27 07:18:38 +08:00
|
|
|
rcu_read_lock();
|
2014-07-16 18:52:19 +08:00
|
|
|
ctx = rcu_dereference(gss_cred->gc_ctx);
|
|
|
|
if (ctx)
|
|
|
|
gss_get_ctx(ctx);
|
2007-06-27 07:18:38 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_cl_ctx *
|
|
|
|
gss_alloc_context(void)
|
|
|
|
{
|
|
|
|
struct gss_cl_ctx *ctx;
|
|
|
|
|
2008-06-11 06:31:01 +08:00
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ctx != NULL) {
|
|
|
|
ctx->gc_proc = RPC_GSS_PROC_DATA;
|
|
|
|
ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
|
|
|
|
spin_lock_init(&ctx->gc_seq_lock);
|
2017-07-04 20:53:13 +08:00
|
|
|
refcount_set(&ctx->count,1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GSSD_MIN_TIMEOUT (60 * 60)
|
|
|
|
static const void *
|
|
|
|
gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
|
|
|
|
{
|
|
|
|
const void *q;
|
|
|
|
unsigned int seclen;
|
|
|
|
unsigned int timeout;
|
2012-11-27 23:34:20 +08:00
|
|
|
unsigned long now = jiffies;
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 window_size;
|
|
|
|
int ret;
|
|
|
|
|
2012-11-27 23:34:20 +08:00
|
|
|
/* First unsigned int gives the remaining lifetime in seconds of the
|
|
|
|
* credential - e.g. the remaining TGT lifetime for Kerberos or
|
|
|
|
* the -t value passed to GSSD.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
|
|
|
|
if (IS_ERR(p))
|
|
|
|
goto err;
|
|
|
|
if (timeout == 0)
|
|
|
|
timeout = GSSD_MIN_TIMEOUT;
|
2012-11-27 23:34:20 +08:00
|
|
|
ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
|
|
|
|
/* Sequence number window. Determines the maximum number of
|
|
|
|
* simultaneous requests
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
|
|
|
|
if (IS_ERR(p))
|
|
|
|
goto err;
|
|
|
|
ctx->gc_win = window_size;
|
|
|
|
/* gssd signals an error by passing ctx->gc_win = 0: */
|
|
|
|
if (ctx->gc_win == 0) {
|
2010-01-07 22:42:02 +08:00
|
|
|
/*
|
|
|
|
* in which case, p points to an error code. Anything other
|
|
|
|
* than -EKEYEXPIRED gets converted to -EACCES.
|
|
|
|
*/
|
|
|
|
p = simple_get_bytes(p, end, &ret, sizeof(ret));
|
|
|
|
if (!IS_ERR(p))
|
|
|
|
p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
|
|
|
|
ERR_PTR(-EACCES);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* copy the opaque wire context */
|
|
|
|
p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
|
|
|
|
if (IS_ERR(p))
|
|
|
|
goto err;
|
|
|
|
/* import the opaque security context */
|
|
|
|
p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
|
|
|
|
if (IS_ERR(p))
|
|
|
|
goto err;
|
|
|
|
q = (const void *)((const char *)p + seclen);
|
|
|
|
if (unlikely(q > end || q < p)) {
|
|
|
|
p = ERR_PTR(-EFAULT);
|
|
|
|
goto err;
|
|
|
|
}
|
2012-05-26 06:09:53 +08:00
|
|
|
ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret < 0) {
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_import_ctx(ret);
|
2005-04-17 06:20:36 +08:00
|
|
|
p = ERR_PTR(ret);
|
|
|
|
goto err;
|
|
|
|
}
|
2014-06-22 08:52:15 +08:00
|
|
|
|
|
|
|
/* is there any trailing data? */
|
|
|
|
if (q == end) {
|
|
|
|
p = q;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pull in acceptor name (if there is one) */
|
|
|
|
p = simple_get_netobj(q, end, &ctx->gc_acceptor);
|
|
|
|
if (IS_ERR(p))
|
|
|
|
goto err;
|
|
|
|
done:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_context(ctx->gc_expiry, now, timeout,
|
|
|
|
ctx->gc_acceptor.len, ctx->gc_acceptor.data);
|
2005-04-17 06:20:36 +08:00
|
|
|
err:
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2018-08-17 00:05:54 +08:00
|
|
|
/* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
|
|
|
|
* Is user space expecting no more than UPCALL_BUF_LEN bytes?
|
|
|
|
* Note that there are now _two_ NI_MAXHOST sized data items
|
|
|
|
* being passed in this string.
|
|
|
|
*/
|
|
|
|
#define UPCALL_BUF_LEN 256
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct gss_upcall_msg {
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_t count;
|
2013-02-02 08:31:17 +08:00
|
|
|
kuid_t uid;
|
2019-04-25 05:46:45 +08:00
|
|
|
const char *service_name;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_pipe_msg msg;
|
|
|
|
struct list_head list;
|
|
|
|
struct gss_auth *auth;
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_wait_queue rpc_waitqueue;
|
|
|
|
wait_queue_head_t waitqueue;
|
|
|
|
struct gss_cl_ctx *ctx;
|
2008-12-24 05:16:37 +08:00
|
|
|
char databuf[UPCALL_BUF_LEN];
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2013-05-16 01:27:32 +08:00
|
|
|
static int get_pipe_version(struct net *net)
|
2008-12-24 05:10:52 +08:00
|
|
|
{
|
2013-05-16 01:27:32 +08:00
|
|
|
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
2008-12-24 05:10:52 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock(&pipe_version_lock);
|
2013-05-16 01:27:32 +08:00
|
|
|
if (sn->pipe_version >= 0) {
|
|
|
|
atomic_inc(&sn->pipe_users);
|
|
|
|
ret = sn->pipe_version;
|
2008-12-24 05:10:52 +08:00
|
|
|
} else
|
|
|
|
ret = -EAGAIN;
|
|
|
|
spin_unlock(&pipe_version_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-16 01:27:32 +08:00
|
|
|
static void put_pipe_version(struct net *net)
|
2008-12-24 05:10:52 +08:00
|
|
|
{
|
2013-05-16 01:27:32 +08:00
|
|
|
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
|
|
|
|
|
|
|
if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
|
|
|
|
sn->pipe_version = -1;
|
2008-12-24 05:10:52 +08:00
|
|
|
spin_unlock(&pipe_version_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
gss_release_msg(struct gss_upcall_msg *gss_msg)
|
|
|
|
{
|
2013-08-24 02:42:29 +08:00
|
|
|
struct net *net = gss_msg->auth->net;
|
2017-07-04 20:53:14 +08:00
|
|
|
if (!refcount_dec_and_test(&gss_msg->count))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
2013-05-16 01:27:32 +08:00
|
|
|
put_pipe_version(net);
|
2005-04-17 06:20:36 +08:00
|
|
|
BUG_ON(!list_empty(&gss_msg->list));
|
|
|
|
if (gss_msg->ctx != NULL)
|
|
|
|
gss_put_ctx(gss_msg->ctx);
|
2008-02-23 06:06:55 +08:00
|
|
|
rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
|
2014-02-17 01:14:13 +08:00
|
|
|
gss_put_auth(gss_msg->auth);
|
2019-04-25 05:46:45 +08:00
|
|
|
kfree_const(gss_msg->service_name);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(gss_msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_upcall_msg *
|
2016-08-04 08:19:48 +08:00
|
|
|
__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct gss_upcall_msg *pos;
|
2011-12-26 20:43:57 +08:00
|
|
|
list_for_each_entry(pos, &pipe->in_downcall, list) {
|
2013-02-02 08:39:32 +08:00
|
|
|
if (!uid_eq(pos->uid, uid))
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
2016-08-04 08:19:48 +08:00
|
|
|
if (auth && pos->auth->service != auth->service)
|
|
|
|
continue;
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&pos->count);
|
2005-04-17 06:20:36 +08:00
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-06-10 04:51:33 +08:00
|
|
|
/* Try to add an upcall to the pipefs queue.
|
2005-04-17 06:20:36 +08:00
|
|
|
* If an upcall owned by our uid already exists, then we return a reference
|
|
|
|
* to that upcall instead of adding the new upcall.
|
|
|
|
*/
|
|
|
|
static inline struct gss_upcall_msg *
|
2009-12-10 01:45:22 +08:00
|
|
|
gss_add_msg(struct gss_upcall_msg *gss_msg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe = gss_msg->pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_upcall_msg *old;
|
|
|
|
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2016-08-04 08:19:48 +08:00
|
|
|
old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (old == NULL) {
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&gss_msg->count);
|
2011-12-26 20:43:57 +08:00
|
|
|
list_add(&gss_msg->list, &pipe->in_downcall);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else
|
|
|
|
gss_msg = old;
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
return gss_msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
|
|
|
|
{
|
|
|
|
list_del_init(&gss_msg->list);
|
|
|
|
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
|
|
|
|
wake_up_all(&gss_msg->waitqueue);
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_dec(&gss_msg->count);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gss_unhash_msg(struct gss_upcall_msg *gss_msg)
|
|
|
|
{
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe = gss_msg->pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-07 22:14:15 +08:00
|
|
|
if (list_empty(&gss_msg->list))
|
|
|
|
return;
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2007-06-07 22:14:15 +08:00
|
|
|
if (!list_empty(&gss_msg->list))
|
|
|
|
__gss_unhash_msg(gss_msg);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-05-14 00:55:38 +08:00
|
|
|
static void
|
|
|
|
gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
|
|
|
|
{
|
|
|
|
switch (gss_msg->msg.errno) {
|
|
|
|
case 0:
|
|
|
|
if (gss_msg->ctx == NULL)
|
|
|
|
break;
|
|
|
|
clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
|
|
|
|
gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
|
|
|
|
break;
|
|
|
|
case -EKEYEXPIRED:
|
|
|
|
set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
|
|
|
|
}
|
|
|
|
gss_cred->gc_upcall_timestamp = jiffies;
|
|
|
|
gss_cred->gc_upcall = NULL;
|
|
|
|
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
gss_upcall_callback(struct rpc_task *task)
|
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cred, gc_base);
|
|
|
|
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe = gss_msg->pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2010-05-14 00:55:38 +08:00
|
|
|
gss_handle_downcall_result(gss_cred, gss_msg);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2010-05-14 00:55:38 +08:00
|
|
|
task->tk_status = gss_msg->msg.errno;
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_release_msg(gss_msg);
|
|
|
|
}
|
|
|
|
|
2019-04-25 05:46:45 +08:00
|
|
|
static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
|
|
|
|
const struct cred *cred)
|
2008-12-24 05:16:37 +08:00
|
|
|
{
|
2019-04-25 05:46:45 +08:00
|
|
|
struct user_namespace *userns = cred->user_ns;
|
2019-04-25 05:46:44 +08:00
|
|
|
|
|
|
|
uid_t uid = from_kuid_munged(userns, gss_msg->uid);
|
2013-02-02 16:25:43 +08:00
|
|
|
memcpy(gss_msg->databuf, &uid, sizeof(uid));
|
|
|
|
gss_msg->msg.data = gss_msg->databuf;
|
|
|
|
gss_msg->msg.len = sizeof(uid);
|
2013-10-29 06:18:00 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
|
2008-12-24 05:16:37 +08:00
|
|
|
}
|
|
|
|
|
2019-04-25 05:46:45 +08:00
|
|
|
static ssize_t
|
|
|
|
gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
|
|
|
|
char __user *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct gss_upcall_msg *gss_msg = container_of(msg,
|
|
|
|
struct gss_upcall_msg,
|
|
|
|
msg);
|
|
|
|
if (msg->copied == 0)
|
|
|
|
gss_encode_v0_msg(gss_msg, file->f_cred);
|
|
|
|
return rpc_pipe_generic_upcall(file, msg, buf, buflen);
|
|
|
|
}
|
|
|
|
|
2013-10-29 06:18:00 +08:00
|
|
|
static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
|
2013-08-24 02:02:24 +08:00
|
|
|
const char *service_name,
|
2019-04-25 05:46:45 +08:00
|
|
|
const char *target_name,
|
|
|
|
const struct cred *cred)
|
2008-12-24 05:16:37 +08:00
|
|
|
{
|
2019-04-25 05:46:45 +08:00
|
|
|
struct user_namespace *userns = cred->user_ns;
|
2010-04-09 02:09:58 +08:00
|
|
|
struct gss_api_mech *mech = gss_msg->auth->mech;
|
2008-12-24 05:19:26 +08:00
|
|
|
char *p = gss_msg->databuf;
|
2013-10-29 06:18:00 +08:00
|
|
|
size_t buflen = sizeof(gss_msg->databuf);
|
|
|
|
int len;
|
|
|
|
|
2019-02-12 00:25:04 +08:00
|
|
|
len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
|
2019-04-25 05:46:44 +08:00
|
|
|
from_kuid_munged(userns, gss_msg->uid));
|
2013-10-29 06:18:00 +08:00
|
|
|
buflen -= len;
|
|
|
|
p += len;
|
|
|
|
gss_msg->msg.len = len;
|
2018-08-20 22:39:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* target= is a full service principal that names the remote
|
|
|
|
* identity that we are authenticating to.
|
|
|
|
*/
|
2013-08-24 02:02:24 +08:00
|
|
|
if (target_name) {
|
2019-02-12 00:25:04 +08:00
|
|
|
len = scnprintf(p, buflen, " target=%s", target_name);
|
2013-10-29 06:18:00 +08:00
|
|
|
buflen -= len;
|
2008-12-24 05:19:26 +08:00
|
|
|
p += len;
|
|
|
|
gss_msg->msg.len += len;
|
|
|
|
}
|
2018-08-20 22:39:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* gssd uses service= and srchost= to select a matching key from
|
|
|
|
* the system's keytab to use as the source principal.
|
|
|
|
*
|
|
|
|
* service= is the service name part of the source principal,
|
|
|
|
* or "*" (meaning choose any).
|
|
|
|
*
|
|
|
|
* srchost= is the hostname part of the source principal. When
|
|
|
|
* not provided, gssd uses the local hostname.
|
|
|
|
*/
|
2018-08-17 00:05:54 +08:00
|
|
|
if (service_name) {
|
|
|
|
char *c = strchr(service_name, '@');
|
|
|
|
|
|
|
|
if (!c)
|
2019-02-12 00:25:04 +08:00
|
|
|
len = scnprintf(p, buflen, " service=%s",
|
2018-08-17 00:05:54 +08:00
|
|
|
service_name);
|
|
|
|
else
|
|
|
|
len = scnprintf(p, buflen,
|
2019-02-12 00:25:04 +08:00
|
|
|
" service=%.*s srchost=%s",
|
2018-08-17 00:05:54 +08:00
|
|
|
(int)(c - service_name),
|
|
|
|
service_name, c + 1);
|
2013-10-29 06:18:00 +08:00
|
|
|
buflen -= len;
|
2008-12-24 05:19:56 +08:00
|
|
|
p += len;
|
|
|
|
gss_msg->msg.len += len;
|
|
|
|
}
|
2018-08-20 22:39:16 +08:00
|
|
|
|
2010-04-09 02:09:58 +08:00
|
|
|
if (mech->gm_upcall_enctypes) {
|
2019-02-12 00:25:04 +08:00
|
|
|
len = scnprintf(p, buflen, " enctypes=%s",
|
2013-10-29 06:18:00 +08:00
|
|
|
mech->gm_upcall_enctypes);
|
|
|
|
buflen -= len;
|
2010-04-09 02:09:58 +08:00
|
|
|
p += len;
|
|
|
|
gss_msg->msg.len += len;
|
|
|
|
}
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_upcall_msg(gss_msg->databuf);
|
2013-10-29 06:18:00 +08:00
|
|
|
len = scnprintf(p, buflen, "\n");
|
|
|
|
if (len == 0)
|
|
|
|
goto out_overflow;
|
2008-12-24 05:19:26 +08:00
|
|
|
gss_msg->msg.len += len;
|
2008-12-24 05:16:37 +08:00
|
|
|
gss_msg->msg.data = gss_msg->databuf;
|
2013-10-29 06:18:00 +08:00
|
|
|
return 0;
|
|
|
|
out_overflow:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return -ENOMEM;
|
2008-12-24 05:16:37 +08:00
|
|
|
}
|
|
|
|
|
2019-04-25 05:46:45 +08:00
|
|
|
static ssize_t
|
|
|
|
gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
|
|
|
|
char __user *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct gss_upcall_msg *gss_msg = container_of(msg,
|
|
|
|
struct gss_upcall_msg,
|
|
|
|
msg);
|
|
|
|
int err;
|
|
|
|
if (msg->copied == 0) {
|
|
|
|
err = gss_encode_v1_msg(gss_msg,
|
|
|
|
gss_msg->service_name,
|
|
|
|
gss_msg->auth->target_name,
|
|
|
|
file->f_cred);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return rpc_pipe_generic_upcall(file, msg, buf, buflen);
|
|
|
|
}
|
|
|
|
|
2012-01-04 02:22:46 +08:00
|
|
|
static struct gss_upcall_msg *
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_alloc_msg(struct gss_auth *gss_auth,
|
2013-02-02 08:31:17 +08:00
|
|
|
kuid_t uid, const char *service_name)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct gss_upcall_msg *gss_msg;
|
2008-12-24 05:10:52 +08:00
|
|
|
int vers;
|
2013-10-29 06:18:00 +08:00
|
|
|
int err = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-11 06:31:01 +08:00
|
|
|
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
|
2008-12-24 05:07:13 +08:00
|
|
|
if (gss_msg == NULL)
|
2013-10-29 06:18:00 +08:00
|
|
|
goto err;
|
2013-08-24 02:42:29 +08:00
|
|
|
vers = get_pipe_version(gss_auth->net);
|
2013-10-29 06:18:00 +08:00
|
|
|
err = vers;
|
|
|
|
if (err < 0)
|
|
|
|
goto err_free_msg;
|
2013-08-27 04:44:42 +08:00
|
|
|
gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
|
2008-12-24 05:07:13 +08:00
|
|
|
INIT_LIST_HEAD(&gss_msg->list);
|
|
|
|
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
|
|
|
|
init_waitqueue_head(&gss_msg->waitqueue);
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_set(&gss_msg->count, 1);
|
2008-12-24 05:07:13 +08:00
|
|
|
gss_msg->uid = uid;
|
|
|
|
gss_msg->auth = gss_auth;
|
2019-05-09 23:00:07 +08:00
|
|
|
kref_get(&gss_auth->kref);
|
2019-04-25 05:46:45 +08:00
|
|
|
if (service_name) {
|
|
|
|
gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
|
2019-05-03 20:30:09 +08:00
|
|
|
if (!gss_msg->service_name) {
|
|
|
|
err = -ENOMEM;
|
2014-02-17 02:28:01 +08:00
|
|
|
goto err_put_pipe_version;
|
2019-05-03 20:30:09 +08:00
|
|
|
}
|
2018-08-04 19:41:41 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return gss_msg;
|
2014-02-17 02:28:01 +08:00
|
|
|
err_put_pipe_version:
|
|
|
|
put_pipe_version(gss_auth->net);
|
2013-10-29 06:18:00 +08:00
|
|
|
err_free_msg:
|
|
|
|
kfree(gss_msg);
|
|
|
|
err:
|
|
|
|
return ERR_PTR(err);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_upcall_msg *
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-04-08 08:50:11 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred,
|
|
|
|
struct gss_cred, gc_base);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_upcall_msg *gss_new, *gss_msg;
|
2018-12-03 08:30:31 +08:00
|
|
|
kuid_t uid = cred->cr_cred->fsuid;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
|
2008-12-24 05:07:13 +08:00
|
|
|
if (IS_ERR(gss_new))
|
|
|
|
return gss_new;
|
2009-12-10 01:45:22 +08:00
|
|
|
gss_msg = gss_add_msg(gss_new);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (gss_msg == gss_new) {
|
SUNRPC: fix refcounting problems with auth_gss messages.
There are two problems with refcounting of auth_gss messages.
First, the reference on the pipe->pipe list (taken by a call
to rpc_queue_upcall()) is not counted. It seems to be
assumed that a message in pipe->pipe will always also be in
pipe->in_downcall, where it is correctly reference counted.
However there is no guaranty of this. I have a report of a
NULL dereferences in rpc_pipe_read() which suggests a msg
that has been freed is still on the pipe->pipe list.
One way I imagine this might happen is:
- message is queued for uid=U and auth->service=S1
- rpc.gssd reads this message and starts processing.
This removes the message from pipe->pipe
- message is queued for uid=U and auth->service=S2
- rpc.gssd replies to the first message. gss_pipe_downcall()
calls __gss_find_upcall(pipe, U, NULL) and it finds the
*second* message, as new messages are placed at the head
of ->in_downcall, and the service type is not checked.
- This second message is removed from ->in_downcall and freed
by gss_release_msg() (even though it is still on pipe->pipe)
- rpc.gssd tries to read another message, and dereferences a pointer
to this message that has just been freed.
I fix this by incrementing the reference count before calling
rpc_queue_upcall(), and decrementing it if that fails, or normally in
gss_pipe_destroy_msg().
It seems strange that the reply doesn't target the message more
precisely, but I don't know all the details. In any case, I think the
reference counting irregularity became a measureable bug when the
extra arg was added to __gss_find_upcall(), hence the Fixes: line
below.
The second problem is that if rpc_queue_upcall() fails, the new
message is not freed. gss_alloc_msg() set the ->count to 1,
gss_add_msg() increments this to 2, gss_unhash_msg() decrements to 1,
then the pointer is discarded so the memory never gets freed.
Fixes: 9130b8dbc6ac ("SUNRPC: allow for upcalls for same uid but different gss service")
Cc: stable@vger.kernel.org
Link: https://bugzilla.opensuse.org/show_bug.cgi?id=1011250
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-12-05 12:10:11 +08:00
|
|
|
int res;
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&gss_msg->count);
|
SUNRPC: fix refcounting problems with auth_gss messages.
There are two problems with refcounting of auth_gss messages.
First, the reference on the pipe->pipe list (taken by a call
to rpc_queue_upcall()) is not counted. It seems to be
assumed that a message in pipe->pipe will always also be in
pipe->in_downcall, where it is correctly reference counted.
However there is no guaranty of this. I have a report of a
NULL dereferences in rpc_pipe_read() which suggests a msg
that has been freed is still on the pipe->pipe list.
One way I imagine this might happen is:
- message is queued for uid=U and auth->service=S1
- rpc.gssd reads this message and starts processing.
This removes the message from pipe->pipe
- message is queued for uid=U and auth->service=S2
- rpc.gssd replies to the first message. gss_pipe_downcall()
calls __gss_find_upcall(pipe, U, NULL) and it finds the
*second* message, as new messages are placed at the head
of ->in_downcall, and the service type is not checked.
- This second message is removed from ->in_downcall and freed
by gss_release_msg() (even though it is still on pipe->pipe)
- rpc.gssd tries to read another message, and dereferences a pointer
to this message that has just been freed.
I fix this by incrementing the reference count before calling
rpc_queue_upcall(), and decrementing it if that fails, or normally in
gss_pipe_destroy_msg().
It seems strange that the reply doesn't target the message more
precisely, but I don't know all the details. In any case, I think the
reference counting irregularity became a measureable bug when the
extra arg was added to __gss_find_upcall(), hence the Fixes: line
below.
The second problem is that if rpc_queue_upcall() fails, the new
message is not freed. gss_alloc_msg() set the ->count to 1,
gss_add_msg() increments this to 2, gss_unhash_msg() decrements to 1,
then the pointer is discarded so the memory never gets freed.
Fixes: 9130b8dbc6ac ("SUNRPC: allow for upcalls for same uid but different gss service")
Cc: stable@vger.kernel.org
Link: https://bugzilla.opensuse.org/show_bug.cgi?id=1011250
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-12-05 12:10:11 +08:00
|
|
|
res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res) {
|
|
|
|
gss_unhash_msg(gss_new);
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_dec(&gss_msg->count);
|
SUNRPC: fix refcounting problems with auth_gss messages.
There are two problems with refcounting of auth_gss messages.
First, the reference on the pipe->pipe list (taken by a call
to rpc_queue_upcall()) is not counted. It seems to be
assumed that a message in pipe->pipe will always also be in
pipe->in_downcall, where it is correctly reference counted.
However there is no guaranty of this. I have a report of a
NULL dereferences in rpc_pipe_read() which suggests a msg
that has been freed is still on the pipe->pipe list.
One way I imagine this might happen is:
- message is queued for uid=U and auth->service=S1
- rpc.gssd reads this message and starts processing.
This removes the message from pipe->pipe
- message is queued for uid=U and auth->service=S2
- rpc.gssd replies to the first message. gss_pipe_downcall()
calls __gss_find_upcall(pipe, U, NULL) and it finds the
*second* message, as new messages are placed at the head
of ->in_downcall, and the service type is not checked.
- This second message is removed from ->in_downcall and freed
by gss_release_msg() (even though it is still on pipe->pipe)
- rpc.gssd tries to read another message, and dereferences a pointer
to this message that has just been freed.
I fix this by incrementing the reference count before calling
rpc_queue_upcall(), and decrementing it if that fails, or normally in
gss_pipe_destroy_msg().
It seems strange that the reply doesn't target the message more
precisely, but I don't know all the details. In any case, I think the
reference counting irregularity became a measureable bug when the
extra arg was added to __gss_find_upcall(), hence the Fixes: line
below.
The second problem is that if rpc_queue_upcall() fails, the new
message is not freed. gss_alloc_msg() set the ->count to 1,
gss_add_msg() increments this to 2, gss_unhash_msg() decrements to 1,
then the pointer is discarded so the memory never gets freed.
Fixes: 9130b8dbc6ac ("SUNRPC: allow for upcalls for same uid but different gss service")
Cc: stable@vger.kernel.org
Link: https://bugzilla.opensuse.org/show_bug.cgi?id=1011250
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-12-05 12:10:11 +08:00
|
|
|
gss_release_msg(gss_new);
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_msg = ERR_PTR(res);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
gss_release_msg(gss_new);
|
|
|
|
return gss_msg;
|
|
|
|
}
|
|
|
|
|
2008-12-24 05:06:55 +08:00
|
|
|
static void warn_gssd(void)
|
|
|
|
{
|
2014-01-28 03:53:27 +08:00
|
|
|
dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
|
2008-12-24 05:06:55 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline int
|
|
|
|
gss_refresh_upcall(struct rpc_task *task)
|
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
2007-06-07 22:14:14 +08:00
|
|
|
struct gss_auth *gss_auth = container_of(cred->cr_auth,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_auth, rpc_auth);
|
|
|
|
struct gss_cred *gss_cred = container_of(cred,
|
|
|
|
struct gss_cred, gc_base);
|
|
|
|
struct gss_upcall_msg *gss_msg;
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
int err = 0;
|
|
|
|
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_msg = gss_setup_upcall(gss_auth, cred);
|
2009-12-09 02:13:03 +08:00
|
|
|
if (PTR_ERR(gss_msg) == -EAGAIN) {
|
2008-12-24 05:10:52 +08:00
|
|
|
/* XXX: warning on the first, under the assumption we
|
|
|
|
* shouldn't normally hit this case on a refresh. */
|
|
|
|
warn_gssd();
|
2019-04-08 01:58:49 +08:00
|
|
|
rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
|
|
|
|
task, NULL, jiffies + (15 * HZ));
|
2019-02-12 00:25:04 +08:00
|
|
|
err = -EAGAIN;
|
|
|
|
goto out;
|
2008-12-24 05:10:52 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(gss_msg)) {
|
|
|
|
err = PTR_ERR(gss_msg);
|
|
|
|
goto out;
|
|
|
|
}
|
2011-12-26 20:43:57 +08:00
|
|
|
pipe = gss_msg->pipe;
|
|
|
|
spin_lock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (gss_cred->gc_upcall != NULL)
|
2008-02-23 05:34:17 +08:00
|
|
|
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
|
2010-05-14 00:55:38 +08:00
|
|
|
else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_cred->gc_upcall = gss_msg;
|
|
|
|
/* gss_upcall_callback will release the reference to gss_upcall_msg */
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&gss_msg->count);
|
2008-02-23 05:34:17 +08:00
|
|
|
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
|
2010-05-14 00:55:38 +08:00
|
|
|
} else {
|
|
|
|
gss_handle_downcall_result(gss_cred, gss_msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
err = gss_msg->msg.errno;
|
2010-05-14 00:55:38 +08:00
|
|
|
}
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_release_msg(gss_msg);
|
|
|
|
out:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
|
|
|
|
cred->cr_cred->fsuid), err);
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
|
|
|
|
{
|
2013-08-24 02:42:29 +08:00
|
|
|
struct net *net = gss_auth->net;
|
2013-05-16 02:28:54 +08:00
|
|
|
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_cred *cred = &gss_cred->gc_base;
|
|
|
|
struct gss_upcall_msg *gss_msg;
|
|
|
|
DEFINE_WAIT(wait);
|
2013-05-16 01:46:33 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-24 05:10:52 +08:00
|
|
|
retry:
|
2013-05-16 01:46:33 +08:00
|
|
|
err = 0;
|
2013-11-14 20:25:18 +08:00
|
|
|
/* if gssd is down, just skip upcalling altogether */
|
|
|
|
if (!gssd_running(net)) {
|
|
|
|
warn_gssd();
|
2019-02-12 00:25:04 +08:00
|
|
|
err = -EACCES;
|
|
|
|
goto out;
|
2013-11-14 20:25:18 +08:00
|
|
|
}
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_msg = gss_setup_upcall(gss_auth, cred);
|
2008-12-24 05:10:52 +08:00
|
|
|
if (PTR_ERR(gss_msg) == -EAGAIN) {
|
|
|
|
err = wait_event_interruptible_timeout(pipe_version_waitqueue,
|
2013-11-14 20:25:18 +08:00
|
|
|
sn->pipe_version >= 0, 15 * HZ);
|
2013-05-16 01:27:32 +08:00
|
|
|
if (sn->pipe_version < 0) {
|
2011-04-14 02:31:28 +08:00
|
|
|
warn_gssd();
|
|
|
|
err = -EACCES;
|
|
|
|
}
|
2013-05-16 01:46:33 +08:00
|
|
|
if (err < 0)
|
2008-12-24 05:10:52 +08:00
|
|
|
goto out;
|
|
|
|
goto retry;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(gss_msg)) {
|
|
|
|
err = PTR_ERR(gss_msg);
|
|
|
|
goto out;
|
|
|
|
}
|
2011-12-26 20:43:57 +08:00
|
|
|
pipe = gss_msg->pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
for (;;) {
|
2011-06-17 22:14:59 +08:00
|
|
|
prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2011-06-17 22:14:59 +08:00
|
|
|
if (fatal_signal_pending(current)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -ERESTARTSYS;
|
|
|
|
goto out_intr;
|
|
|
|
}
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
if (gss_msg->ctx)
|
2008-04-18 04:53:01 +08:00
|
|
|
gss_cred_set_ctx(cred, gss_msg->ctx);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
|
|
|
err = gss_msg->msg.errno;
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
out_intr:
|
|
|
|
finish_wait(&gss_msg->waitqueue, &wait);
|
|
|
|
gss_release_msg(gss_msg);
|
|
|
|
out:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
|
|
|
|
cred->cr_cred->fsuid), err);
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MSG_BUF_MAXSIZE 1024
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
|
|
|
{
|
|
|
|
const void *p, *end;
|
|
|
|
void *buf;
|
|
|
|
struct gss_upcall_msg *gss_msg;
|
2013-01-24 06:07:38 +08:00
|
|
|
struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cl_ctx *ctx;
|
2013-02-02 16:25:43 +08:00
|
|
|
uid_t id;
|
|
|
|
kuid_t uid;
|
2007-06-07 22:14:15 +08:00
|
|
|
ssize_t err = -EFBIG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (mlen > MSG_BUF_MAXSIZE)
|
|
|
|
goto out;
|
|
|
|
err = -ENOMEM;
|
2008-06-11 06:31:01 +08:00
|
|
|
buf = kmalloc(mlen, GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!buf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(buf, src, mlen))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
end = (const void *)((char *)buf + mlen);
|
2013-02-02 16:25:43 +08:00
|
|
|
p = simple_get_bytes(buf, end, &id, sizeof(id));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(p)) {
|
|
|
|
err = PTR_ERR(p);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-04-25 05:46:44 +08:00
|
|
|
uid = make_kuid(current_user_ns(), id);
|
2013-02-02 16:25:43 +08:00
|
|
|
if (!uid_valid(uid)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
ctx = gss_alloc_context();
|
|
|
|
if (ctx == NULL)
|
|
|
|
goto err;
|
2007-06-07 22:14:15 +08:00
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
/* Find a matching upcall */
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2016-08-04 08:19:48 +08:00
|
|
|
gss_msg = __gss_find_upcall(pipe, uid, NULL);
|
2007-06-07 22:14:15 +08:00
|
|
|
if (gss_msg == NULL) {
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2007-06-07 22:14:15 +08:00
|
|
|
goto err_put_ctx;
|
|
|
|
}
|
|
|
|
list_del_init(&gss_msg->list);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2007-06-07 22:14:15 +08:00
|
|
|
|
2007-06-08 03:31:36 +08:00
|
|
|
p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(p)) {
|
|
|
|
err = PTR_ERR(p);
|
2009-12-19 05:28:20 +08:00
|
|
|
switch (err) {
|
|
|
|
case -EACCES:
|
2010-01-07 22:42:02 +08:00
|
|
|
case -EKEYEXPIRED:
|
2009-12-19 05:28:20 +08:00
|
|
|
gss_msg->msg.errno = err;
|
|
|
|
err = mlen;
|
|
|
|
break;
|
|
|
|
case -EFAULT:
|
|
|
|
case -ENOMEM:
|
|
|
|
case -EINVAL:
|
|
|
|
case -ENOSYS:
|
|
|
|
gss_msg->msg.errno = -EAGAIN;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk(KERN_CRIT "%s: bad return from "
|
2010-01-07 06:26:27 +08:00
|
|
|
"gss_fill_context: %zd\n", __func__, err);
|
auth_gss: fix panic in gss_pipe_downcall() in fips mode
On Mon, 15 Feb 2016, Trond Myklebust wrote:
> Hi Scott,
>
> On Mon, Feb 15, 2016 at 2:28 PM, Scott Mayhew <smayhew@redhat.com> wrote:
> > md5 is disabled in fips mode, and attempting to import a gss context
> > using md5 while in fips mode will result in crypto_alg_mod_lookup()
> > returning -ENOENT, which will make its way back up to
> > gss_pipe_downcall(), where the BUG() is triggered. Handling the -ENOENT
> > allows for a more graceful failure.
> >
> > Signed-off-by: Scott Mayhew <smayhew@redhat.com>
> > ---
> > net/sunrpc/auth_gss/auth_gss.c | 3 +++
> > 1 file changed, 3 insertions(+)
> >
> > diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
> > index 799e65b..c30fc3b 100644
> > --- a/net/sunrpc/auth_gss/auth_gss.c
> > +++ b/net/sunrpc/auth_gss/auth_gss.c
> > @@ -737,6 +737,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
> > case -ENOSYS:
> > gss_msg->msg.errno = -EAGAIN;
> > break;
> > + case -ENOENT:
> > + gss_msg->msg.errno = -EPROTONOSUPPORT;
> > + break;
> > default:
> > printk(KERN_CRIT "%s: bad return from "
> > "gss_fill_context: %zd\n", __func__, err);
> > --
> > 2.4.3
> >
>
> Well debugged, but I unfortunately do have to ask if this patch is
> sufficient? In addition to -ENOENT, and -ENOMEM, it looks to me as if
> crypto_alg_mod_lookup() can also fail with -EINTR, -ETIMEDOUT, and
> -EAGAIN. Don't we also want to handle those?
You're right, I was focusing on the panic that I could easily reproduce.
I'm still not sure how I could trigger those other conditions.
>
> In fact, peering into the rats nest that is
> gss_import_sec_context_kerberos(), it looks as if that is just a tiny
> subset of all the errors that we might run into. Perhaps the right
> thing to do here is to get rid of the BUG() (but keep the above
> printk) and just return a generic error?
That sounds fine to me -- updated patch attached.
-Scott
>From d54c6b64a107a90a38cab97577de05f9a4625052 Mon Sep 17 00:00:00 2001
From: Scott Mayhew <smayhew@redhat.com>
Date: Mon, 15 Feb 2016 15:12:19 -0500
Subject: [PATCH] auth_gss: remove the BUG() from gss_pipe_downcall()
Instead return a generic error via gss_msg->msg.errno. None of the
errors returned by gss_fill_context() should necessarily trigger a
kernel panic.
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-02-17 05:20:25 +08:00
|
|
|
gss_msg->msg.errno = -EIO;
|
2009-12-19 05:28:20 +08:00
|
|
|
}
|
2007-06-07 22:14:15 +08:00
|
|
|
goto err_release_msg;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-06-07 22:14:15 +08:00
|
|
|
gss_msg->ctx = gss_get_ctx(ctx);
|
|
|
|
err = mlen;
|
|
|
|
|
|
|
|
err_release_msg:
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
2007-06-07 22:14:15 +08:00
|
|
|
__gss_unhash_msg(gss_msg);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2007-06-07 22:14:15 +08:00
|
|
|
gss_release_msg(gss_msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
err_put_ctx:
|
|
|
|
gss_put_ctx(ctx);
|
|
|
|
err:
|
|
|
|
kfree(buf);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-12-24 05:16:37 +08:00
|
|
|
static int gss_pipe_open(struct inode *inode, int new_version)
|
2008-12-24 05:10:19 +08:00
|
|
|
{
|
2013-05-16 01:27:32 +08:00
|
|
|
struct net *net = inode->i_sb->s_fs_info;
|
|
|
|
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
2008-12-24 05:16:37 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2008-12-24 05:10:52 +08:00
|
|
|
spin_lock(&pipe_version_lock);
|
2013-05-16 01:27:32 +08:00
|
|
|
if (sn->pipe_version < 0) {
|
2008-12-24 05:16:37 +08:00
|
|
|
/* First open of any gss pipe determines the version: */
|
2013-05-16 01:27:32 +08:00
|
|
|
sn->pipe_version = new_version;
|
2008-12-24 05:10:52 +08:00
|
|
|
rpc_wake_up(&pipe_version_rpc_waitqueue);
|
|
|
|
wake_up(&pipe_version_waitqueue);
|
2013-05-16 01:27:32 +08:00
|
|
|
} else if (sn->pipe_version != new_version) {
|
2008-12-24 05:16:37 +08:00
|
|
|
/* Trying to open a pipe of a different version */
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
2008-12-24 05:10:52 +08:00
|
|
|
}
|
2013-05-16 01:27:32 +08:00
|
|
|
atomic_inc(&sn->pipe_users);
|
2008-12-24 05:16:37 +08:00
|
|
|
out:
|
2008-12-24 05:10:52 +08:00
|
|
|
spin_unlock(&pipe_version_lock);
|
2008-12-24 05:16:37 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gss_pipe_open_v0(struct inode *inode)
|
|
|
|
{
|
|
|
|
return gss_pipe_open(inode, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gss_pipe_open_v1(struct inode *inode)
|
|
|
|
{
|
|
|
|
return gss_pipe_open(inode, 1);
|
2008-12-24 05:10:19 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
gss_pipe_release(struct inode *inode)
|
|
|
|
{
|
2013-05-16 01:27:32 +08:00
|
|
|
struct net *net = inode->i_sb->s_fs_info;
|
2011-12-26 20:43:57 +08:00
|
|
|
struct rpc_pipe *pipe = RPC_I(inode)->pipe;
|
2007-06-08 03:31:36 +08:00
|
|
|
struct gss_upcall_msg *gss_msg;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-13 07:55:25 +08:00
|
|
|
restart:
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_lock(&pipe->lock);
|
|
|
|
list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-13 07:55:25 +08:00
|
|
|
if (!list_empty(&gss_msg->msg.list))
|
|
|
|
continue;
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_msg->msg.errno = -EPIPE;
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&gss_msg->count);
|
2005-04-17 06:20:36 +08:00
|
|
|
__gss_unhash_msg(gss_msg);
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_release_msg(gss_msg);
|
2010-09-13 07:55:25 +08:00
|
|
|
goto restart;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-12-26 20:43:57 +08:00
|
|
|
spin_unlock(&pipe->lock);
|
2008-12-24 05:10:19 +08:00
|
|
|
|
2013-05-16 01:27:32 +08:00
|
|
|
put_pipe_version(net);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
|
|
|
|
{
|
|
|
|
struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
|
|
|
|
|
|
|
|
if (msg->errno < 0) {
|
2017-07-04 20:53:14 +08:00
|
|
|
refcount_inc(&gss_msg->count);
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_unhash_msg(gss_msg);
|
2008-12-24 05:06:55 +08:00
|
|
|
if (msg->errno == -ETIMEDOUT)
|
|
|
|
warn_gssd();
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_release_msg(gss_msg);
|
|
|
|
}
|
SUNRPC: fix refcounting problems with auth_gss messages.
There are two problems with refcounting of auth_gss messages.
First, the reference on the pipe->pipe list (taken by a call
to rpc_queue_upcall()) is not counted. It seems to be
assumed that a message in pipe->pipe will always also be in
pipe->in_downcall, where it is correctly reference counted.
However there is no guaranty of this. I have a report of a
NULL dereferences in rpc_pipe_read() which suggests a msg
that has been freed is still on the pipe->pipe list.
One way I imagine this might happen is:
- message is queued for uid=U and auth->service=S1
- rpc.gssd reads this message and starts processing.
This removes the message from pipe->pipe
- message is queued for uid=U and auth->service=S2
- rpc.gssd replies to the first message. gss_pipe_downcall()
calls __gss_find_upcall(pipe, U, NULL) and it finds the
*second* message, as new messages are placed at the head
of ->in_downcall, and the service type is not checked.
- This second message is removed from ->in_downcall and freed
by gss_release_msg() (even though it is still on pipe->pipe)
- rpc.gssd tries to read another message, and dereferences a pointer
to this message that has just been freed.
I fix this by incrementing the reference count before calling
rpc_queue_upcall(), and decrementing it if that fails, or normally in
gss_pipe_destroy_msg().
It seems strange that the reply doesn't target the message more
precisely, but I don't know all the details. In any case, I think the
reference counting irregularity became a measureable bug when the
extra arg was added to __gss_find_upcall(), hence the Fixes: line
below.
The second problem is that if rpc_queue_upcall() fails, the new
message is not freed. gss_alloc_msg() set the ->count to 1,
gss_add_msg() increments this to 2, gss_unhash_msg() decrements to 1,
then the pointer is discarded so the memory never gets freed.
Fixes: 9130b8dbc6ac ("SUNRPC: allow for upcalls for same uid but different gss service")
Cc: stable@vger.kernel.org
Link: https://bugzilla.opensuse.org/show_bug.cgi?id=1011250
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-12-05 12:10:11 +08:00
|
|
|
gss_release_msg(gss_msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
static void gss_pipe_dentry_destroy(struct dentry *dir,
|
|
|
|
struct rpc_pipe_dir_object *pdo)
|
2012-01-11 23:18:09 +08:00
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_pipe *gss_pipe = pdo->pdo_data;
|
|
|
|
struct rpc_pipe *pipe = gss_pipe->pipe;
|
2012-01-11 23:18:09 +08:00
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
if (pipe->dentry != NULL) {
|
|
|
|
rpc_unlink(pipe->dentry);
|
|
|
|
pipe->dentry = NULL;
|
2013-08-27 04:05:11 +08:00
|
|
|
}
|
2012-01-11 23:18:09 +08:00
|
|
|
}
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
static int gss_pipe_dentry_create(struct dentry *dir,
|
|
|
|
struct rpc_pipe_dir_object *pdo)
|
2012-01-11 23:18:09 +08:00
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_pipe *p = pdo->pdo_data;
|
2013-08-27 04:05:11 +08:00
|
|
|
struct dentry *dentry;
|
2012-01-11 23:18:09 +08:00
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
return PTR_ERR(dentry);
|
|
|
|
p->pipe->dentry = dentry;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
|
|
|
|
.create = gss_pipe_dentry_create,
|
|
|
|
.destroy = gss_pipe_dentry_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
|
|
|
|
const char *name,
|
|
|
|
const struct rpc_pipe_ops *upcall_ops)
|
|
|
|
{
|
|
|
|
struct gss_pipe *p;
|
|
|
|
int err = -ENOMEM;
|
2012-01-11 23:18:09 +08:00
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
|
|
|
if (p == NULL)
|
2013-08-27 04:05:11 +08:00
|
|
|
goto err;
|
2013-08-27 04:44:42 +08:00
|
|
|
p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
|
|
|
|
if (IS_ERR(p->pipe)) {
|
|
|
|
err = PTR_ERR(p->pipe);
|
|
|
|
goto err_free_gss_pipe;
|
2013-08-27 04:05:11 +08:00
|
|
|
}
|
2013-08-27 04:44:42 +08:00
|
|
|
p->name = name;
|
|
|
|
p->clnt = clnt;
|
2013-08-28 04:52:16 +08:00
|
|
|
kref_init(&p->kref);
|
2013-08-27 04:44:42 +08:00
|
|
|
rpc_init_pipe_dir_object(&p->pdo,
|
|
|
|
&gss_pipe_dir_object_ops,
|
|
|
|
p);
|
2013-08-28 04:52:16 +08:00
|
|
|
return p;
|
2013-08-27 04:44:42 +08:00
|
|
|
err_free_gss_pipe:
|
|
|
|
kfree(p);
|
2013-08-27 04:05:11 +08:00
|
|
|
err:
|
2013-08-27 04:44:42 +08:00
|
|
|
return ERR_PTR(err);
|
2012-01-11 23:18:09 +08:00
|
|
|
}
|
|
|
|
|
2013-08-28 04:52:16 +08:00
|
|
|
struct gss_alloc_pdo {
|
|
|
|
struct rpc_clnt *clnt;
|
|
|
|
const char *name;
|
|
|
|
const struct rpc_pipe_ops *upcall_ops;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
|
|
|
|
{
|
|
|
|
struct gss_pipe *gss_pipe;
|
|
|
|
struct gss_alloc_pdo *args = data;
|
|
|
|
|
|
|
|
if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
|
|
|
|
return 0;
|
|
|
|
gss_pipe = container_of(pdo, struct gss_pipe, pdo);
|
|
|
|
if (strcmp(gss_pipe->name, args->name) != 0)
|
|
|
|
return 0;
|
|
|
|
if (!kref_get_unless_zero(&gss_pipe->kref))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
|
|
|
|
{
|
|
|
|
struct gss_pipe *gss_pipe;
|
|
|
|
struct gss_alloc_pdo *args = data;
|
|
|
|
|
|
|
|
gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
|
|
|
|
if (!IS_ERR(gss_pipe))
|
|
|
|
return &gss_pipe->pdo;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
|
|
|
|
const char *name,
|
|
|
|
const struct rpc_pipe_ops *upcall_ops)
|
|
|
|
{
|
|
|
|
struct net *net = rpc_net_ns(clnt);
|
|
|
|
struct rpc_pipe_dir_object *pdo;
|
|
|
|
struct gss_alloc_pdo args = {
|
|
|
|
.clnt = clnt,
|
|
|
|
.name = name,
|
|
|
|
.upcall_ops = upcall_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
pdo = rpc_find_or_alloc_pipe_dir_object(net,
|
|
|
|
&clnt->cl_pipedir_objects,
|
|
|
|
gss_pipe_match_pdo,
|
|
|
|
gss_pipe_alloc_pdo,
|
|
|
|
&args);
|
|
|
|
if (pdo != NULL)
|
|
|
|
return container_of(pdo, struct gss_pipe, pdo);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
static void __gss_pipe_free(struct gss_pipe *p)
|
2012-01-11 23:18:09 +08:00
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
struct rpc_clnt *clnt = p->clnt;
|
|
|
|
struct net *net = rpc_net_ns(clnt);
|
2012-01-11 23:18:09 +08:00
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
rpc_remove_pipe_dir_object(net,
|
|
|
|
&clnt->cl_pipedir_objects,
|
|
|
|
&p->pdo);
|
|
|
|
rpc_destroy_pipe_data(p->pipe);
|
|
|
|
kfree(p);
|
2012-01-11 23:18:09 +08:00
|
|
|
}
|
|
|
|
|
2013-08-28 04:52:16 +08:00
|
|
|
static void __gss_pipe_release(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
|
|
|
|
|
|
|
|
__gss_pipe_free(p);
|
|
|
|
}
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
static void gss_pipe_free(struct gss_pipe *p)
|
2012-01-11 23:18:09 +08:00
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
if (p != NULL)
|
2013-08-28 04:52:16 +08:00
|
|
|
kref_put(&p->kref, __gss_pipe_release);
|
2012-01-11 23:18:09 +08:00
|
|
|
}
|
|
|
|
|
2007-02-10 07:38:13 +08:00
|
|
|
/*
|
|
|
|
* NOTE: we have the opportunity to use different
|
2005-04-17 06:20:36 +08:00
|
|
|
* parameters based on the input flavor (which must be a pseudoflavor)
|
|
|
|
*/
|
2013-08-29 03:26:25 +08:00
|
|
|
static struct gss_auth *
|
2018-07-06 00:48:50 +08:00
|
|
|
gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-08-27 07:23:04 +08:00
|
|
|
rpc_authflavor_t flavor = args->pseudoflavor;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_auth *gss_auth;
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_pipe *gss_pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_auth * auth;
|
2005-06-23 01:16:23 +08:00
|
|
|
int err = -ENOMEM; /* XXX? */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!try_module_get(THIS_MODULE))
|
2005-06-23 01:16:23 +08:00
|
|
|
return ERR_PTR(err);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
|
|
|
|
goto out_dec;
|
2013-08-29 03:26:25 +08:00
|
|
|
INIT_HLIST_NODE(&gss_auth->hash);
|
2013-08-24 02:02:24 +08:00
|
|
|
gss_auth->target_name = NULL;
|
2013-08-27 07:23:04 +08:00
|
|
|
if (args->target_name) {
|
|
|
|
gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
|
2013-08-24 02:02:24 +08:00
|
|
|
if (gss_auth->target_name == NULL)
|
|
|
|
goto err_free;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_auth->client = clnt;
|
2013-08-24 02:42:29 +08:00
|
|
|
gss_auth->net = get_net(rpc_net_ns(clnt));
|
2005-06-23 01:16:23 +08:00
|
|
|
err = -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
|
2019-02-12 00:25:04 +08:00
|
|
|
if (!gss_auth->mech)
|
2013-08-24 02:42:29 +08:00
|
|
|
goto err_put_net;
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
|
2005-06-23 01:16:23 +08:00
|
|
|
if (gss_auth->service == 0)
|
|
|
|
goto err_put_mech;
|
2014-02-11 05:28:52 +08:00
|
|
|
if (!gssd_running(gss_auth->net))
|
|
|
|
goto err_put_mech;
|
2005-04-17 06:20:36 +08:00
|
|
|
auth = &gss_auth->rpc_auth;
|
|
|
|
auth->au_cslack = GSS_CRED_SLACK >> 2;
|
SUNRPC: fix krb5p mount to provide large enough buffer in rq_rcvsize
Ever since commit 2c94b8eca1a2 ("SUNRPC: Use au_rslack when computing
reply buffer size"). It changed how "req->rq_rcvsize" is calculated. It
used to use au_cslack value which was nice and large and changed it to
au_rslack value which turns out to be too small.
Since 5.1, v3 mount with sec=krb5p fails against an Ontap server
because client's receive buffer it too small.
For gss krb5p, we need to account for the mic token in the verifier,
and the wrap token in the wrap token.
RFC 4121 defines:
mic token
Octet no Name Description
--------------------------------------------------------------
0..1 TOK_ID Identification field. Tokens emitted by
GSS_GetMIC() contain the hex value 04 04
expressed in big-endian order in this
field.
2 Flags Attributes field, as described in section
4.2.2.
3..7 Filler Contains five octets of hex value FF.
8..15 SND_SEQ Sequence number field in clear text,
expressed in big-endian order.
16..last SGN_CKSUM Checksum of the "to-be-signed" data and
octet 0..15, as described in section 4.2.4.
that's 16bytes (GSS_KRB5_TOK_HDR_LEN) + chksum
wrap token
Octet no Name Description
--------------------------------------------------------------
0..1 TOK_ID Identification field. Tokens emitted by
GSS_Wrap() contain the hex value 05 04
expressed in big-endian order in this
field.
2 Flags Attributes field, as described in section
4.2.2.
3 Filler Contains the hex value FF.
4..5 EC Contains the "extra count" field, in big-
endian order as described in section 4.2.3.
6..7 RRC Contains the "right rotation count" in big-
endian order, as described in section
4.2.5.
8..15 SND_SEQ Sequence number field in clear text,
expressed in big-endian order.
16..last Data Encrypted data for Wrap tokens with
confidentiality, or plaintext data followed
by the checksum for Wrap tokens without
confidentiality, as described in section
4.2.4.
Also 16bytes of header (GSS_KRB5_TOK_HDR_LEN), encrypted data, and cksum
(other things like padding)
RFC 3961 defines known cksum sizes:
Checksum type sumtype checksum section or
value size reference
---------------------------------------------------------------------
CRC32 1 4 6.1.3
rsa-md4 2 16 6.1.2
rsa-md4-des 3 24 6.2.5
des-mac 4 16 6.2.7
des-mac-k 5 8 6.2.8
rsa-md4-des-k 6 16 6.2.6
rsa-md5 7 16 6.1.1
rsa-md5-des 8 24 6.2.4
rsa-md5-des3 9 24 ??
sha1 (unkeyed) 10 20 ??
hmac-sha1-des3-kd 12 20 6.3
hmac-sha1-des3 13 20 ??
sha1 (unkeyed) 14 20 ??
hmac-sha1-96-aes128 15 20 [KRB5-AES]
hmac-sha1-96-aes256 16 20 [KRB5-AES]
[reserved] 0x8003 ? [GSS-KRB5]
Linux kernel now mainly supports type 15,16 so max cksum size is 20bytes.
(GSS_KRB5_MAX_CKSUM_LEN)
Re-use already existing define of GSS_KRB5_MAX_SLACK_NEEDED that's used
for encoding the gss_wrap tokens (same tokens are used in reply).
Fixes: 2c94b8eca1a2 ("SUNRPC: Use au_rslack when computing reply buffer size")
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-26 22:24:51 +08:00
|
|
|
auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
|
2019-02-12 00:25:31 +08:00
|
|
|
auth->au_verfsize = GSS_VERF_SLACK >> 2;
|
2019-02-12 00:25:36 +08:00
|
|
|
auth->au_ralign = GSS_VERF_SLACK >> 2;
|
sunrpc: move NO_CRKEY_TIMEOUT to the auth->au_flags
A generic_cred can be used to look up a unx_cred or a gss_cred, so it's
not really safe to use the the generic_cred->acred->ac_flags to store
the NO_CRKEY_TIMEOUT flag. A lookup for a unx_cred triggered while the
KEY_EXPIRE_SOON flag is already set will cause both NO_CRKEY_TIMEOUT and
KEY_EXPIRE_SOON to be set in the ac_flags, leaving the user associated
with the auth_cred to be in a state where they're perpetually doing 4K
NFS_FILE_SYNC writes.
This can be reproduced as follows:
1. Mount two NFS filesystems, one with sec=krb5 and one with sec=sys.
They do not need to be the same export, nor do they even need to be from
the same NFS server. Also, v3 is fine.
$ sudo mount -o v3,sec=krb5 server1:/export /mnt/krb5
$ sudo mount -o v3,sec=sys server2:/export /mnt/sys
2. As the normal user, before accessing the kerberized mount, kinit with
a short lifetime (but not so short that renewing the ticket would leave
you within the 4-minute window again by the time the original ticket
expires), e.g.
$ kinit -l 10m -r 60m
3. Do some I/O to the kerberized mount and verify that the writes are
wsize, UNSTABLE:
$ dd if=/dev/zero of=/mnt/krb5/file bs=1M count=1
4. Wait until you're within 4 minutes of key expiry, then do some more
I/O to the kerberized mount to ensure that RPC_CRED_KEY_EXPIRE_SOON gets
set. Verify that the writes are 4K, FILE_SYNC:
$ dd if=/dev/zero of=/mnt/krb5/file bs=1M count=1
5. Now do some I/O to the sec=sys mount. This will cause
RPC_CRED_NO_CRKEY_TIMEOUT to be set:
$ dd if=/dev/zero of=/mnt/sys/file bs=1M count=1
6. Writes for that user will now be permanently 4K, FILE_SYNC for that
user, regardless of which mount is being written to, until you reboot
the client. Renewing the kerberos ticket (assuming it hasn't already
expired) will have no effect. Grabbing a new kerberos ticket at this
point will have no effect either.
Move the flag to the auth->au_flags field (which is currently unused)
and rename it slightly to reflect that it's no longer associated with
the auth_cred->ac_flags. Add the rpc_auth to the arg list of
rpcauth_cred_key_to_expire and check the au_flags there too. Finally,
add the inode to the arg list of nfs_ctx_key_to_expire so we can
determine the rpc_auth to pass to rpcauth_cred_key_to_expire.
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-06-08 03:14:48 +08:00
|
|
|
auth->au_flags = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
auth->au_ops = &authgss_ops;
|
|
|
|
auth->au_flavor = flavor;
|
2016-06-30 01:55:06 +08:00
|
|
|
if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
|
|
|
|
auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
|
2018-10-14 22:40:29 +08:00
|
|
|
refcount_set(&auth->au_count, 1);
|
2007-06-28 02:29:12 +08:00
|
|
|
kref_init(&gss_auth->kref);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
err = rpcauth_init_credcache(auth);
|
|
|
|
if (err)
|
|
|
|
goto err_put_mech;
|
2008-12-24 05:16:37 +08:00
|
|
|
/*
|
|
|
|
* Note: if we created the old pipe first, then someone who
|
|
|
|
* examined the directory at the right moment might conclude
|
|
|
|
* that we supported only the old pipe. So we instead create
|
|
|
|
* the new pipe first.
|
|
|
|
*/
|
2013-08-28 04:52:16 +08:00
|
|
|
gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
|
2013-08-27 04:44:42 +08:00
|
|
|
if (IS_ERR(gss_pipe)) {
|
|
|
|
err = PTR_ERR(gss_pipe);
|
|
|
|
goto err_destroy_credcache;
|
2005-06-23 01:16:23 +08:00
|
|
|
}
|
2013-08-27 04:44:42 +08:00
|
|
|
gss_auth->gss_pipe[1] = gss_pipe;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-28 04:52:16 +08:00
|
|
|
gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
|
2013-08-27 04:44:42 +08:00
|
|
|
&gss_upcall_ops_v0);
|
|
|
|
if (IS_ERR(gss_pipe)) {
|
|
|
|
err = PTR_ERR(gss_pipe);
|
2011-12-26 20:44:06 +08:00
|
|
|
goto err_destroy_pipe_1;
|
|
|
|
}
|
2013-08-27 04:44:42 +08:00
|
|
|
gss_auth->gss_pipe[0] = gss_pipe;
|
2007-06-10 03:42:01 +08:00
|
|
|
|
2013-08-29 03:26:25 +08:00
|
|
|
return gss_auth;
|
2011-12-26 20:44:06 +08:00
|
|
|
err_destroy_pipe_1:
|
2013-08-28 04:52:16 +08:00
|
|
|
gss_pipe_free(gss_auth->gss_pipe[1]);
|
2013-08-27 04:44:42 +08:00
|
|
|
err_destroy_credcache:
|
|
|
|
rpcauth_destroy_credcache(auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
err_put_mech:
|
|
|
|
gss_mech_put(gss_auth->mech);
|
2013-08-24 02:42:29 +08:00
|
|
|
err_put_net:
|
|
|
|
put_net(gss_auth->net);
|
2005-04-17 06:20:36 +08:00
|
|
|
err_free:
|
2013-08-24 02:02:24 +08:00
|
|
|
kfree(gss_auth->target_name);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(gss_auth);
|
|
|
|
out_dec:
|
|
|
|
module_put(THIS_MODULE);
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_createauth(flavor, err);
|
2005-06-23 01:16:23 +08:00
|
|
|
return ERR_PTR(err);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-06-28 02:29:12 +08:00
|
|
|
static void
|
|
|
|
gss_free(struct gss_auth *gss_auth)
|
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
gss_pipe_free(gss_auth->gss_pipe[0]);
|
|
|
|
gss_pipe_free(gss_auth->gss_pipe[1]);
|
2007-06-28 02:29:12 +08:00
|
|
|
gss_mech_put(gss_auth->mech);
|
2013-08-24 02:42:29 +08:00
|
|
|
put_net(gss_auth->net);
|
2013-08-24 02:02:24 +08:00
|
|
|
kfree(gss_auth->target_name);
|
2007-06-28 02:29:12 +08:00
|
|
|
|
|
|
|
kfree(gss_auth);
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gss_free_callback(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
|
|
|
|
|
|
|
|
gss_free(gss_auth);
|
|
|
|
}
|
|
|
|
|
2014-02-17 01:14:13 +08:00
|
|
|
static void
|
|
|
|
gss_put_auth(struct gss_auth *gss_auth)
|
|
|
|
{
|
|
|
|
kref_put(&gss_auth->kref, gss_free_callback);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
|
|
|
gss_destroy(struct rpc_auth *auth)
|
|
|
|
{
|
2013-08-27 04:44:42 +08:00
|
|
|
struct gss_auth *gss_auth = container_of(auth,
|
|
|
|
struct gss_auth, rpc_auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-29 03:26:25 +08:00
|
|
|
if (hash_hashed(&gss_auth->hash)) {
|
|
|
|
spin_lock(&gss_auth_hash_lock);
|
|
|
|
hash_del(&gss_auth->hash);
|
|
|
|
spin_unlock(&gss_auth_hash_lock);
|
|
|
|
}
|
|
|
|
|
2013-08-27 04:44:42 +08:00
|
|
|
gss_pipe_free(gss_auth->gss_pipe[0]);
|
|
|
|
gss_auth->gss_pipe[0] = NULL;
|
|
|
|
gss_pipe_free(gss_auth->gss_pipe[1]);
|
|
|
|
gss_auth->gss_pipe[1] = NULL;
|
2007-06-10 03:41:42 +08:00
|
|
|
rpcauth_destroy_credcache(auth);
|
|
|
|
|
2014-02-17 01:14:13 +08:00
|
|
|
gss_put_auth(gss_auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-09-18 23:16:03 +08:00
|
|
|
/*
|
|
|
|
* Auths may be shared between rpc clients that were cloned from a
|
|
|
|
* common client with the same xprt, if they also share the flavor and
|
|
|
|
* target_name.
|
|
|
|
*
|
|
|
|
* The auth is looked up from the oldest parent sharing the same
|
|
|
|
* cl_xprt, and the auth itself references only that common parent
|
|
|
|
* (which is guaranteed to last as long as any of its descendants).
|
|
|
|
*/
|
2013-08-29 03:26:25 +08:00
|
|
|
static struct gss_auth *
|
2018-07-06 00:48:50 +08:00
|
|
|
gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
|
2013-08-29 03:26:25 +08:00
|
|
|
struct rpc_clnt *clnt,
|
|
|
|
struct gss_auth *new)
|
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth;
|
|
|
|
unsigned long hashval = (unsigned long)clnt;
|
|
|
|
|
|
|
|
spin_lock(&gss_auth_hash_lock);
|
|
|
|
hash_for_each_possible(gss_auth_hash_table,
|
|
|
|
gss_auth,
|
|
|
|
hash,
|
|
|
|
hashval) {
|
2013-09-18 23:16:03 +08:00
|
|
|
if (gss_auth->client != clnt)
|
|
|
|
continue;
|
2013-08-29 03:26:25 +08:00
|
|
|
if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
|
|
|
|
continue;
|
|
|
|
if (gss_auth->target_name != args->target_name) {
|
|
|
|
if (gss_auth->target_name == NULL)
|
|
|
|
continue;
|
|
|
|
if (args->target_name == NULL)
|
|
|
|
continue;
|
|
|
|
if (strcmp(gss_auth->target_name, args->target_name))
|
|
|
|
continue;
|
|
|
|
}
|
2018-10-14 22:40:29 +08:00
|
|
|
if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
|
2013-08-29 03:26:25 +08:00
|
|
|
continue;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (new)
|
|
|
|
hash_add(gss_auth_hash_table, &new->hash, hashval);
|
|
|
|
gss_auth = new;
|
|
|
|
out:
|
|
|
|
spin_unlock(&gss_auth_hash_lock);
|
|
|
|
return gss_auth;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gss_auth *
|
2018-07-06 00:48:50 +08:00
|
|
|
gss_create_hashed(const struct rpc_auth_create_args *args,
|
|
|
|
struct rpc_clnt *clnt)
|
2013-08-29 03:26:25 +08:00
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth;
|
|
|
|
struct gss_auth *new;
|
|
|
|
|
|
|
|
gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
|
|
|
|
if (gss_auth != NULL)
|
|
|
|
goto out;
|
|
|
|
new = gss_create_new(args, clnt);
|
|
|
|
if (IS_ERR(new))
|
|
|
|
return new;
|
|
|
|
gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
|
|
|
|
if (gss_auth != new)
|
|
|
|
gss_destroy(&new->rpc_auth);
|
|
|
|
out:
|
|
|
|
return gss_auth;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rpc_auth *
|
2018-07-06 00:48:50 +08:00
|
|
|
gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
2013-08-29 03:26:25 +08:00
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth;
|
2016-01-31 03:17:26 +08:00
|
|
|
struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
|
2013-08-29 03:26:25 +08:00
|
|
|
|
|
|
|
while (clnt != clnt->cl_parent) {
|
|
|
|
struct rpc_clnt *parent = clnt->cl_parent;
|
|
|
|
/* Find the original parent for this transport */
|
2016-01-31 03:17:26 +08:00
|
|
|
if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
|
2013-08-29 03:26:25 +08:00
|
|
|
break;
|
|
|
|
clnt = parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
gss_auth = gss_create_hashed(args, clnt);
|
|
|
|
if (IS_ERR(gss_auth))
|
|
|
|
return ERR_CAST(gss_auth);
|
|
|
|
return &gss_auth->rpc_auth;
|
|
|
|
}
|
|
|
|
|
2018-11-13 04:30:52 +08:00
|
|
|
static struct gss_cred *
|
|
|
|
gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
|
|
|
|
{
|
|
|
|
struct gss_cred *new;
|
|
|
|
|
|
|
|
/* Make a copy of the cred so that we can reference count it */
|
2019-03-02 23:14:02 +08:00
|
|
|
new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
|
2018-11-13 04:30:52 +08:00
|
|
|
if (new) {
|
|
|
|
struct auth_cred acred = {
|
2018-12-03 08:30:30 +08:00
|
|
|
.cred = gss_cred->gc_base.cr_cred,
|
2018-11-13 04:30:52 +08:00
|
|
|
};
|
|
|
|
struct gss_cl_ctx *ctx =
|
|
|
|
rcu_dereference_protected(gss_cred->gc_ctx, 1);
|
|
|
|
|
|
|
|
rpcauth_init_cred(&new->gc_base, &acred,
|
|
|
|
&gss_auth->rpc_auth,
|
|
|
|
&gss_nullops);
|
|
|
|
new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
|
|
|
|
new->gc_service = gss_cred->gc_service;
|
|
|
|
new->gc_principal = gss_cred->gc_principal;
|
|
|
|
kref_get(&gss_auth->kref);
|
|
|
|
rcu_assign_pointer(new->gc_ctx, ctx);
|
|
|
|
gss_get_ctx(ctx);
|
|
|
|
}
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2007-06-27 05:04:57 +08:00
|
|
|
/*
|
2018-11-13 04:30:52 +08:00
|
|
|
* gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
|
2007-06-27 05:04:57 +08:00
|
|
|
* to the server with the GSS control procedure field set to
|
|
|
|
* RPC_GSS_PROC_DESTROY. This should normally cause the server to release
|
|
|
|
* all RPCSEC_GSS state associated with that context.
|
|
|
|
*/
|
2018-11-13 04:30:52 +08:00
|
|
|
static void
|
|
|
|
gss_send_destroy_context(struct rpc_cred *cred)
|
2007-06-27 05:04:57 +08:00
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
|
|
|
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
|
2014-07-16 18:52:19 +08:00
|
|
|
struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
|
2018-11-13 04:30:52 +08:00
|
|
|
struct gss_cred *new;
|
2007-06-27 05:04:57 +08:00
|
|
|
struct rpc_task *task;
|
|
|
|
|
2018-11-13 04:30:52 +08:00
|
|
|
new = gss_dup_cred(gss_auth, gss_cred);
|
|
|
|
if (new) {
|
|
|
|
ctx->gc_proc = RPC_GSS_PROC_DESTROY;
|
2007-06-27 05:04:57 +08:00
|
|
|
|
2018-11-13 04:30:52 +08:00
|
|
|
task = rpc_call_null(gss_auth->client, &new->gc_base,
|
|
|
|
RPC_TASK_ASYNC|RPC_TASK_SOFT);
|
|
|
|
if (!IS_ERR(task))
|
|
|
|
rpc_put_task(task);
|
2007-06-27 05:04:57 +08:00
|
|
|
|
2018-11-13 04:30:52 +08:00
|
|
|
put_rpccred(&new->gc_base);
|
|
|
|
}
|
2007-06-27 05:04:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
|
2005-04-17 06:20:36 +08:00
|
|
|
* to create a new cred or context, so they check that things have been
|
|
|
|
* allocated before freeing them. */
|
|
|
|
static void
|
2007-06-27 07:18:38 +08:00
|
|
|
gss_do_free_ctx(struct gss_cl_ctx *ctx)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-02 08:14:55 +08:00
|
|
|
gss_delete_sec_context(&ctx->gc_gss_ctx);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(ctx->gc_wire_ctx.data);
|
2014-06-22 08:52:15 +08:00
|
|
|
kfree(ctx->gc_acceptor.data);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(ctx);
|
|
|
|
}
|
|
|
|
|
2007-06-27 07:18:38 +08:00
|
|
|
static void
|
|
|
|
gss_free_ctx_callback(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
|
|
|
|
gss_do_free_ctx(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gss_free_ctx(struct gss_cl_ctx *ctx)
|
|
|
|
{
|
|
|
|
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
2007-06-25 03:55:26 +08:00
|
|
|
gss_free_cred(struct gss_cred *gss_cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-06-25 03:55:26 +08:00
|
|
|
kfree(gss_cred);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-25 03:55:26 +08:00
|
|
|
static void
|
|
|
|
gss_free_cred_callback(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
|
|
|
|
gss_free_cred(gss_cred);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-25 03:55:26 +08:00
|
|
|
static void
|
2008-12-24 04:21:57 +08:00
|
|
|
gss_destroy_nullcred(struct rpc_cred *cred)
|
2007-06-25 03:55:26 +08:00
|
|
|
{
|
2007-06-27 07:18:38 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
2007-06-28 02:29:12 +08:00
|
|
|
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
|
2014-07-16 18:52:19 +08:00
|
|
|
struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
|
2007-06-27 07:18:38 +08:00
|
|
|
|
2011-08-02 00:19:00 +08:00
|
|
|
RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
|
2018-12-03 08:30:30 +08:00
|
|
|
put_cred(cred->cr_cred);
|
2007-06-25 03:55:26 +08:00
|
|
|
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
|
2007-06-27 07:18:38 +08:00
|
|
|
if (ctx)
|
|
|
|
gss_put_ctx(ctx);
|
2014-02-17 01:14:13 +08:00
|
|
|
gss_put_auth(gss_auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:57 +08:00
|
|
|
static void
|
|
|
|
gss_destroy_cred(struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
|
2018-11-13 04:30:52 +08:00
|
|
|
if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
|
|
|
|
gss_send_destroy_context(cred);
|
2008-12-24 04:21:57 +08:00
|
|
|
gss_destroy_nullcred(cred);
|
|
|
|
}
|
|
|
|
|
2016-09-29 23:44:40 +08:00
|
|
|
static int
|
|
|
|
gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
|
|
|
|
{
|
2018-12-03 08:30:30 +08:00
|
|
|
return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
|
2016-09-29 23:44:40 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Lookup RPCSEC_GSS cred for the current process
|
|
|
|
*/
|
|
|
|
static struct rpc_cred *
|
2006-02-02 01:18:36 +08:00
|
|
|
gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-04-22 08:51:54 +08:00
|
|
|
return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct rpc_cred *
|
2016-04-22 08:51:54 +08:00
|
|
|
gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
|
|
|
|
struct gss_cred *cred = NULL;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
2016-04-22 08:51:54 +08:00
|
|
|
if (!(cred = kzalloc(sizeof(*cred), gfp)))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_err;
|
|
|
|
|
2007-06-24 07:55:31 +08:00
|
|
|
rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Note: in order to force a call to call_refresh(), we deliberately
|
|
|
|
* fail to flag the credential as RPCAUTH_CRED_UPTODATE.
|
|
|
|
*/
|
2007-06-25 22:15:15 +08:00
|
|
|
cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
|
2005-04-17 06:20:36 +08:00
|
|
|
cred->gc_service = gss_auth->service;
|
2018-12-03 08:30:30 +08:00
|
|
|
cred->gc_principal = acred->principal;
|
2007-06-28 02:29:12 +08:00
|
|
|
kref_get(&gss_auth->kref);
|
2005-04-17 06:20:36 +08:00
|
|
|
return &cred->gc_base;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2006-02-02 01:19:27 +08:00
|
|
|
static int
|
|
|
|
gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
|
|
|
|
struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = gss_create_upcall(gss_auth, gss_cred);
|
|
|
|
} while (err == -EAGAIN);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-06-22 08:52:16 +08:00
|
|
|
static char *
|
|
|
|
gss_stringify_acceptor(struct rpc_cred *cred)
|
|
|
|
{
|
2014-07-16 18:52:19 +08:00
|
|
|
char *string = NULL;
|
2014-06-22 08:52:16 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
2014-07-16 18:52:19 +08:00
|
|
|
struct gss_cl_ctx *ctx;
|
2014-11-13 20:30:46 +08:00
|
|
|
unsigned int len;
|
2014-07-16 18:52:19 +08:00
|
|
|
struct xdr_netobj *acceptor;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ctx = rcu_dereference(gss_cred->gc_ctx);
|
|
|
|
if (!ctx)
|
|
|
|
goto out;
|
|
|
|
|
2014-11-13 20:30:46 +08:00
|
|
|
len = ctx->gc_acceptor.len;
|
|
|
|
rcu_read_unlock();
|
2014-06-22 08:52:16 +08:00
|
|
|
|
|
|
|
/* no point if there's no string */
|
2014-11-13 20:30:46 +08:00
|
|
|
if (!len)
|
|
|
|
return NULL;
|
|
|
|
realloc:
|
|
|
|
string = kmalloc(len + 1, GFP_KERNEL);
|
2014-06-22 08:52:16 +08:00
|
|
|
if (!string)
|
2014-11-13 20:30:46 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ctx = rcu_dereference(gss_cred->gc_ctx);
|
|
|
|
|
|
|
|
/* did the ctx disappear or was it replaced by one with no acceptor? */
|
|
|
|
if (!ctx || !ctx->gc_acceptor.len) {
|
|
|
|
kfree(string);
|
|
|
|
string = NULL;
|
2014-07-16 18:52:19 +08:00
|
|
|
goto out;
|
2014-11-13 20:30:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
acceptor = &ctx->gc_acceptor;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Did we find a new acceptor that's longer than the original? Allocate
|
|
|
|
* a longer buffer and try again.
|
|
|
|
*/
|
|
|
|
if (len < acceptor->len) {
|
|
|
|
len = acceptor->len;
|
|
|
|
rcu_read_unlock();
|
|
|
|
kfree(string);
|
|
|
|
goto realloc;
|
|
|
|
}
|
2014-06-22 08:52:16 +08:00
|
|
|
|
|
|
|
memcpy(string, acceptor->data, acceptor->len);
|
|
|
|
string[acceptor->len] = '\0';
|
2014-07-16 18:52:19 +08:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
2014-06-22 08:52:16 +08:00
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2013-08-14 23:59:15 +08:00
|
|
|
/*
|
|
|
|
* Returns -EACCES if GSS context is NULL or will expire within the
|
|
|
|
* timeout (miliseconds)
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
gss_key_timeout(struct rpc_cred *rc)
|
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
|
2014-07-16 18:52:19 +08:00
|
|
|
struct gss_cl_ctx *ctx;
|
2015-10-09 22:13:45 +08:00
|
|
|
unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
|
|
|
|
int ret = 0;
|
2013-08-14 23:59:15 +08:00
|
|
|
|
2014-07-16 18:52:19 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
ctx = rcu_dereference(gss_cred->gc_ctx);
|
2015-10-09 22:13:45 +08:00
|
|
|
if (!ctx || time_after(timeout, ctx->gc_expiry))
|
|
|
|
ret = -EACCES;
|
2014-07-16 18:52:19 +08:00
|
|
|
rcu_read_unlock();
|
2015-10-09 22:13:45 +08:00
|
|
|
|
|
|
|
return ret;
|
2013-08-14 23:59:15 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int
|
2006-02-02 01:18:36 +08:00
|
|
|
gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
|
2014-07-16 18:52:19 +08:00
|
|
|
struct gss_cl_ctx *ctx;
|
2013-08-14 23:59:15 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-18 05:03:58 +08:00
|
|
|
if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
|
2006-02-02 01:18:36 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Don't match with creds that have expired. */
|
2014-07-16 18:52:19 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
ctx = rcu_dereference(gss_cred->gc_ctx);
|
|
|
|
if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
|
|
|
|
rcu_read_unlock();
|
2008-04-18 05:03:58 +08:00
|
|
|
return 0;
|
2014-07-16 18:52:19 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2008-04-18 05:03:58 +08:00
|
|
|
if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2006-02-02 01:18:36 +08:00
|
|
|
out:
|
2012-01-04 02:22:46 +08:00
|
|
|
if (acred->principal != NULL) {
|
|
|
|
if (gss_cred->gc_principal == NULL)
|
|
|
|
return 0;
|
2013-08-14 23:59:15 +08:00
|
|
|
ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
|
2018-12-03 08:30:30 +08:00
|
|
|
} else {
|
|
|
|
if (gss_cred->gc_principal != NULL)
|
|
|
|
return 0;
|
2018-12-03 08:30:31 +08:00
|
|
|
ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
|
2013-08-14 23:59:15 +08:00
|
|
|
}
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-02-12 00:24:48 +08:00
|
|
|
* Marshal credentials.
|
|
|
|
*
|
|
|
|
* The expensive part is computing the verifier. We can't cache a
|
|
|
|
* pre-computed version of the verifier because the seqno, which
|
|
|
|
* is different every time, is included in the MIC.
|
|
|
|
*/
|
|
|
|
static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_cred *cred = req->rq_cred;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
|
|
|
gc_base);
|
|
|
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
2019-02-12 00:24:48 +08:00
|
|
|
__be32 *p, *cred_len;
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 maj_stat = 0;
|
|
|
|
struct xdr_netobj mic;
|
|
|
|
struct kvec iov;
|
|
|
|
struct xdr_buf verf_buf;
|
2019-02-12 00:25:04 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
/* Credential */
|
|
|
|
|
|
|
|
p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
|
|
|
|
ctx->gc_wire_ctx.len);
|
|
|
|
if (!p)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto marshal_failed;
|
2019-02-12 00:24:48 +08:00
|
|
|
*p++ = rpc_auth_gss;
|
2005-04-17 06:20:36 +08:00
|
|
|
cred_len = p++;
|
|
|
|
|
|
|
|
spin_lock(&ctx->gc_seq_lock);
|
2019-01-03 06:53:13 +08:00
|
|
|
req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock(&ctx->gc_seq_lock);
|
2019-01-03 06:53:13 +08:00
|
|
|
if (req->rq_seqno == MAXSEQ)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto expired;
|
|
|
|
trace_rpcgss_seqno(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
*p++ = cpu_to_be32(RPC_GSS_VERSION);
|
|
|
|
*p++ = cpu_to_be32(ctx->gc_proc);
|
|
|
|
*p++ = cpu_to_be32(req->rq_seqno);
|
|
|
|
*p++ = cpu_to_be32(gss_cred->gc_service);
|
2005-04-17 06:20:36 +08:00
|
|
|
p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
|
2019-02-12 00:24:48 +08:00
|
|
|
*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
|
|
|
|
|
|
|
|
/* Verifier */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* We compute the checksum for the verifier over the xdr-encoded bytes
|
|
|
|
* starting with the xid and ending at the end of the credential: */
|
2019-02-12 00:24:37 +08:00
|
|
|
iov.iov_base = req->rq_snd_buf.head[0].iov_base;
|
2005-04-17 06:20:36 +08:00
|
|
|
iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
|
|
|
|
xdr_buf_from_iov(&iov, &verf_buf);
|
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
p = xdr_reserve_space(xdr, sizeof(*p));
|
|
|
|
if (!p)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto marshal_failed;
|
2019-02-12 00:24:48 +08:00
|
|
|
*p++ = rpc_auth_gss;
|
2005-04-17 06:20:36 +08:00
|
|
|
mic.data = (u8 *)(p + 1);
|
2005-10-14 04:55:18 +08:00
|
|
|
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
|
2019-02-12 00:24:48 +08:00
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto expired;
|
2019-02-12 00:24:48 +08:00
|
|
|
else if (maj_stat != 0)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto bad_mic;
|
2019-02-12 00:24:48 +08:00
|
|
|
if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto marshal_failed;
|
|
|
|
status = 0;
|
|
|
|
out:
|
2019-02-12 00:24:48 +08:00
|
|
|
gss_put_ctx(ctx);
|
2019-02-12 00:25:04 +08:00
|
|
|
return status;
|
|
|
|
expired:
|
2019-01-03 06:53:13 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2019-02-12 00:25:04 +08:00
|
|
|
status = -EKEYEXPIRED;
|
|
|
|
goto out;
|
|
|
|
marshal_failed:
|
|
|
|
status = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
bad_mic:
|
|
|
|
trace_rpcgss_get_mic(task, maj_stat);
|
|
|
|
status = -EIO;
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-04-18 05:03:58 +08:00
|
|
|
static int gss_renew_cred(struct rpc_task *task)
|
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
|
2008-04-18 05:03:58 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(oldcred,
|
|
|
|
struct gss_cred,
|
|
|
|
gc_base);
|
|
|
|
struct rpc_auth *auth = oldcred->cr_auth;
|
|
|
|
struct auth_cred acred = {
|
2018-12-03 08:30:30 +08:00
|
|
|
.cred = oldcred->cr_cred,
|
2012-01-04 02:22:46 +08:00
|
|
|
.principal = gss_cred->gc_principal,
|
2008-04-18 05:03:58 +08:00
|
|
|
};
|
|
|
|
struct rpc_cred *new;
|
|
|
|
|
|
|
|
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
|
|
|
|
if (IS_ERR(new))
|
|
|
|
return PTR_ERR(new);
|
2010-08-01 02:29:08 +08:00
|
|
|
task->tk_rqstp->rq_cred = new;
|
2008-04-18 05:03:58 +08:00
|
|
|
put_rpccred(oldcred);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-14 00:55:38 +08:00
|
|
|
static int gss_cred_is_negative_entry(struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
unsigned long begin, expire;
|
2018-07-25 03:29:15 +08:00
|
|
|
struct gss_cred *gss_cred;
|
2010-05-14 00:55:38 +08:00
|
|
|
|
|
|
|
gss_cred = container_of(cred, struct gss_cred, gc_base);
|
|
|
|
begin = gss_cred->gc_upcall_timestamp;
|
|
|
|
expire = begin + gss_expired_cred_retry_delay * HZ;
|
|
|
|
|
|
|
|
if (time_in_range_open(now, begin, expire))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Refresh credentials. XXX - finish
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
gss_refresh(struct rpc_task *task)
|
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
2008-04-18 05:03:58 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-05-14 00:55:38 +08:00
|
|
|
if (gss_cred_is_negative_entry(cred))
|
|
|
|
return -EKEYEXPIRED;
|
|
|
|
|
2008-04-18 05:03:58 +08:00
|
|
|
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
|
|
|
|
!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
|
|
|
|
ret = gss_renew_cred(task);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2010-08-01 02:29:08 +08:00
|
|
|
cred = task->tk_rqstp->rq_cred;
|
2008-04-18 05:03:58 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-18 05:03:58 +08:00
|
|
|
if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
|
|
|
ret = gss_refresh_upcall(task);
|
|
|
|
out:
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-06-27 05:04:57 +08:00
|
|
|
/* Dummy refresh routine: used only when destroying the context */
|
|
|
|
static int
|
|
|
|
gss_refresh_null(struct rpc_task *task)
|
|
|
|
{
|
2013-11-21 02:00:17 +08:00
|
|
|
return 0;
|
2007-06-27 05:04:57 +08:00
|
|
|
}
|
|
|
|
|
2019-02-12 00:24:58 +08:00
|
|
|
static int
|
|
|
|
gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
2019-02-12 00:24:58 +08:00
|
|
|
__be32 *p, *seq = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct kvec iov;
|
|
|
|
struct xdr_buf verf_buf;
|
|
|
|
struct xdr_netobj mic;
|
2019-02-12 00:24:58 +08:00
|
|
|
u32 len, maj_stat;
|
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:58 +08:00
|
|
|
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
|
|
|
|
if (!p)
|
|
|
|
goto validate_failed;
|
|
|
|
if (*p++ != rpc_auth_gss)
|
|
|
|
goto validate_failed;
|
|
|
|
len = be32_to_cpup(p);
|
|
|
|
if (len > RPC_MAX_AUTH_SIZE)
|
|
|
|
goto validate_failed;
|
|
|
|
p = xdr_inline_decode(xdr, len);
|
|
|
|
if (!p)
|
|
|
|
goto validate_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-10-19 04:30:09 +08:00
|
|
|
seq = kmalloc(4, GFP_NOFS);
|
|
|
|
if (!seq)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto validate_failed;
|
|
|
|
*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
|
2016-10-19 04:30:09 +08:00
|
|
|
iov.iov_base = seq;
|
|
|
|
iov.iov_len = 4;
|
2005-04-17 06:20:36 +08:00
|
|
|
xdr_buf_from_iov(&iov, &verf_buf);
|
|
|
|
mic.data = (u8 *)p;
|
|
|
|
mic.len = len;
|
2005-10-14 04:55:18 +08:00
|
|
|
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2019-02-12 00:24:58 +08:00
|
|
|
if (maj_stat)
|
|
|
|
goto bad_mic;
|
|
|
|
|
2005-10-14 04:54:53 +08:00
|
|
|
/* We leave it to unwrap to calculate au_rslack. For now we just
|
|
|
|
* calculate the length of the verifier: */
|
2007-06-28 02:29:04 +08:00
|
|
|
cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
|
2019-02-12 00:24:58 +08:00
|
|
|
status = 0;
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_put_ctx(ctx);
|
2016-10-19 04:30:09 +08:00
|
|
|
kfree(seq);
|
2019-02-12 00:24:58 +08:00
|
|
|
return status;
|
|
|
|
|
|
|
|
validate_failed:
|
|
|
|
status = -EIO;
|
|
|
|
goto out;
|
|
|
|
bad_mic:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_verify_mic(task, maj_stat);
|
2019-02-12 00:24:58 +08:00
|
|
|
status = -EACCES;
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-03-11 23:21:17 +08:00
|
|
|
static noinline_for_stack int
|
|
|
|
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|
|
|
struct rpc_task *task, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-02-12 00:24:48 +08:00
|
|
|
struct rpc_rqst *rqstp = task->tk_rqstp;
|
|
|
|
struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct xdr_netobj mic;
|
2019-02-12 00:24:48 +08:00
|
|
|
__be32 *p, *integ_len;
|
|
|
|
u32 offset, maj_stat;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
p = xdr_reserve_space(xdr, 2 * sizeof(*p));
|
|
|
|
if (!p)
|
|
|
|
goto wrap_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
integ_len = p++;
|
2019-02-12 00:24:48 +08:00
|
|
|
*p = cpu_to_be32(rqstp->rq_seqno);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
if (rpcauth_wrap_req_encode(task, xdr))
|
|
|
|
goto wrap_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (xdr_buf_subsegment(snd_buf, &integ_buf,
|
|
|
|
offset, snd_buf->len - offset))
|
2019-02-12 00:24:48 +08:00
|
|
|
goto wrap_failed;
|
|
|
|
*integ_len = cpu_to_be32(integ_buf.len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
p = xdr_reserve_space(xdr, 0);
|
|
|
|
if (!p)
|
|
|
|
goto wrap_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
mic.data = (u8 *)(p + 1);
|
2005-10-14 04:55:18 +08:00
|
|
|
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
else if (maj_stat)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto bad_mic;
|
2019-02-12 00:24:48 +08:00
|
|
|
/* Check that the trailing MIC fit in the buffer, after the fact */
|
|
|
|
if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
|
|
|
|
goto wrap_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2019-02-12 00:24:48 +08:00
|
|
|
wrap_failed:
|
|
|
|
return -EMSGSIZE;
|
2019-02-12 00:25:04 +08:00
|
|
|
bad_mic:
|
|
|
|
trace_rpcgss_get_mic(task, maj_stat);
|
|
|
|
return -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-10-14 04:54:58 +08:00
|
|
|
static void
|
|
|
|
priv_release_snd_buf(struct rpc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i=0; i < rqstp->rq_enc_pages_num; i++)
|
|
|
|
__free_page(rqstp->rq_enc_pages[i]);
|
|
|
|
kfree(rqstp->rq_enc_pages);
|
2018-12-01 04:39:57 +08:00
|
|
|
rqstp->rq_release_snd_buf = NULL;
|
2005-10-14 04:54:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
alloc_enc_pages(struct rpc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
|
|
|
|
int first, last, i;
|
|
|
|
|
2018-12-01 04:39:57 +08:00
|
|
|
if (rqstp->rq_release_snd_buf)
|
|
|
|
rqstp->rq_release_snd_buf(rqstp);
|
|
|
|
|
2005-10-14 04:54:58 +08:00
|
|
|
if (snd_buf->page_len == 0) {
|
|
|
|
rqstp->rq_enc_pages_num = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
first = snd_buf->page_base >> PAGE_SHIFT;
|
|
|
|
last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
|
2005-10-14 04:54:58 +08:00
|
|
|
rqstp->rq_enc_pages_num = last - first + 1 + 1;
|
|
|
|
rqstp->rq_enc_pages
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:55:00 +08:00
|
|
|
= kmalloc_array(rqstp->rq_enc_pages_num,
|
|
|
|
sizeof(struct page *),
|
2005-10-14 04:54:58 +08:00
|
|
|
GFP_NOFS);
|
|
|
|
if (!rqstp->rq_enc_pages)
|
|
|
|
goto out;
|
|
|
|
for (i=0; i < rqstp->rq_enc_pages_num; i++) {
|
|
|
|
rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
|
|
|
|
if (rqstp->rq_enc_pages[i] == NULL)
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
rqstp->rq_release_snd_buf = priv_release_snd_buf;
|
|
|
|
return 0;
|
|
|
|
out_free:
|
2010-03-20 03:36:22 +08:00
|
|
|
rqstp->rq_enc_pages_num = i;
|
|
|
|
priv_release_snd_buf(rqstp);
|
2005-10-14 04:54:58 +08:00
|
|
|
out:
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2020-03-11 23:21:17 +08:00
|
|
|
static noinline_for_stack int
|
|
|
|
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|
|
|
struct rpc_task *task, struct xdr_stream *xdr)
|
2005-10-14 04:54:58 +08:00
|
|
|
{
|
2019-02-12 00:24:48 +08:00
|
|
|
struct rpc_rqst *rqstp = task->tk_rqstp;
|
2005-10-14 04:54:58 +08:00
|
|
|
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
|
2019-02-12 00:24:48 +08:00
|
|
|
u32 pad, offset, maj_stat;
|
2005-10-14 04:54:58 +08:00
|
|
|
int status;
|
2019-02-12 00:24:48 +08:00
|
|
|
__be32 *p, *opaque_len;
|
2005-10-14 04:54:58 +08:00
|
|
|
struct page **inpages;
|
|
|
|
int first;
|
|
|
|
struct kvec *iov;
|
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
status = -EIO;
|
|
|
|
p = xdr_reserve_space(xdr, 2 * sizeof(*p));
|
|
|
|
if (!p)
|
|
|
|
goto wrap_failed;
|
2005-10-14 04:54:58 +08:00
|
|
|
opaque_len = p++;
|
2019-02-12 00:24:48 +08:00
|
|
|
*p = cpu_to_be32(rqstp->rq_seqno);
|
2005-10-14 04:54:58 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
if (rpcauth_wrap_req_encode(task, xdr))
|
|
|
|
goto wrap_failed;
|
2005-10-14 04:54:58 +08:00
|
|
|
|
|
|
|
status = alloc_enc_pages(rqstp);
|
2019-02-12 00:24:48 +08:00
|
|
|
if (unlikely(status))
|
|
|
|
goto wrap_failed;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
first = snd_buf->page_base >> PAGE_SHIFT;
|
2005-10-14 04:54:58 +08:00
|
|
|
inpages = snd_buf->pages + first;
|
|
|
|
snd_buf->pages = rqstp->rq_enc_pages;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
snd_buf->page_base -= first << PAGE_SHIFT;
|
2010-03-18 01:02:47 +08:00
|
|
|
/*
|
2019-02-12 00:24:48 +08:00
|
|
|
* Move the tail into its own page, in case gss_wrap needs
|
|
|
|
* more space in the head when wrapping.
|
2010-03-18 01:02:47 +08:00
|
|
|
*
|
2019-02-12 00:24:48 +08:00
|
|
|
* Still... Why can't gss_wrap just slide the tail down?
|
2010-03-18 01:02:47 +08:00
|
|
|
*/
|
2005-10-14 04:54:58 +08:00
|
|
|
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
|
2019-02-12 00:24:48 +08:00
|
|
|
char *tmp;
|
|
|
|
|
2005-10-14 04:54:58 +08:00
|
|
|
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
|
|
|
|
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
|
|
|
|
snd_buf->tail[0].iov_base = tmp;
|
|
|
|
}
|
2019-02-12 00:24:48 +08:00
|
|
|
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
|
2005-10-14 04:55:18 +08:00
|
|
|
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
|
2010-03-18 01:02:47 +08:00
|
|
|
/* slack space should prevent this ever happening: */
|
2019-02-12 00:24:48 +08:00
|
|
|
if (unlikely(snd_buf->len > snd_buf->buflen))
|
|
|
|
goto wrap_failed;
|
2005-10-14 04:54:58 +08:00
|
|
|
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
|
|
|
|
* done anyway, so it's safe to put the request on the wire: */
|
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2005-10-14 04:54:58 +08:00
|
|
|
else if (maj_stat)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto bad_wrap;
|
2005-10-14 04:54:58 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
*opaque_len = cpu_to_be32(snd_buf->len - offset);
|
|
|
|
/* guess whether the pad goes into the head or the tail: */
|
2005-10-14 04:54:58 +08:00
|
|
|
if (snd_buf->page_len || snd_buf->tail[0].iov_len)
|
|
|
|
iov = snd_buf->tail;
|
|
|
|
else
|
|
|
|
iov = snd_buf->head;
|
|
|
|
p = iov->iov_base + iov->iov_len;
|
2020-03-03 04:01:08 +08:00
|
|
|
pad = xdr_pad_size(snd_buf->len - offset);
|
2005-10-14 04:54:58 +08:00
|
|
|
memset(p, 0, pad);
|
|
|
|
iov->iov_len += pad;
|
|
|
|
snd_buf->len += pad;
|
|
|
|
|
|
|
|
return 0;
|
2019-02-12 00:24:48 +08:00
|
|
|
wrap_failed:
|
|
|
|
return status;
|
2019-02-12 00:25:04 +08:00
|
|
|
bad_wrap:
|
|
|
|
trace_rpcgss_wrap(task, maj_stat);
|
|
|
|
return -EIO;
|
2005-10-14 04:54:58 +08:00
|
|
|
}
|
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
|
|
|
gc_base);
|
|
|
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
2019-02-12 00:24:48 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-02-12 00:24:48 +08:00
|
|
|
status = -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
|
|
|
|
/* The spec seems a little ambiguous here, but I think that not
|
|
|
|
* wrapping context destruction requests makes the most sense.
|
|
|
|
*/
|
2019-02-12 00:24:48 +08:00
|
|
|
status = rpcauth_wrap_req_encode(task, xdr);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
switch (gss_cred->gc_service) {
|
2011-07-01 17:43:12 +08:00
|
|
|
case RPC_GSS_SVC_NONE:
|
2019-02-12 00:24:48 +08:00
|
|
|
status = rpcauth_wrap_req_encode(task, xdr);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
|
|
|
case RPC_GSS_SVC_INTEGRITY:
|
2019-02-12 00:24:48 +08:00
|
|
|
status = gss_wrap_req_integ(cred, ctx, task, xdr);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
|
|
|
case RPC_GSS_SVC_PRIVACY:
|
2019-02-12 00:24:48 +08:00
|
|
|
status = gss_wrap_req_priv(cred, ctx, task, xdr);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
2019-02-12 00:25:04 +08:00
|
|
|
default:
|
|
|
|
status = -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
gss_put_ctx(ctx);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-02-12 00:24:58 +08:00
|
|
|
static int
|
|
|
|
gss_unwrap_resp_auth(struct rpc_cred *cred)
|
|
|
|
{
|
2019-02-12 00:25:36 +08:00
|
|
|
struct rpc_auth *auth = cred->cr_auth;
|
|
|
|
|
|
|
|
auth->au_rslack = auth->au_verfsize;
|
|
|
|
auth->au_ralign = auth->au_verfsize;
|
2019-02-12 00:24:58 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
/*
|
|
|
|
* RFC 2203, Section 5.3.2.2
|
|
|
|
*
|
|
|
|
* struct rpc_gss_integ_data {
|
|
|
|
* opaque databody_integ<>;
|
|
|
|
* opaque checksum<>;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* struct rpc_gss_data_t {
|
|
|
|
* unsigned int seq_num;
|
|
|
|
* proc_req_arg_t arg;
|
|
|
|
* };
|
|
|
|
*/
|
2020-03-11 23:21:17 +08:00
|
|
|
static noinline_for_stack int
|
2019-02-12 00:25:04 +08:00
|
|
|
gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
|
|
|
|
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
|
|
|
|
struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
|
2019-02-12 00:25:36 +08:00
|
|
|
struct rpc_auth *auth = cred->cr_auth;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
u32 len, offset, seqno, maj_stat;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct xdr_netobj mic;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
ret = -EIO;
|
|
|
|
mic.data = NULL;
|
|
|
|
|
|
|
|
/* opaque databody_integ<>; */
|
|
|
|
if (xdr_stream_decode_u32(xdr, &len))
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
if (len & 3)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
offset = rcv_buf->len - xdr_stream_remaining(xdr);
|
|
|
|
if (xdr_stream_decode_u32(xdr, &seqno))
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
if (seqno != rqstp->rq_seqno)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto bad_seqno;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
|
|
|
|
goto unwrap_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
/*
|
|
|
|
* The xdr_stream now points to the beginning of the
|
|
|
|
* upper layer payload, to be passed below to
|
|
|
|
* rpcauth_unwrap_resp_decode(). The checksum, which
|
|
|
|
* follows the upper layer payload in @rcv_buf, is
|
|
|
|
* located and parsed without updating the xdr_stream.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
/* opaque checksum<>; */
|
|
|
|
offset += len;
|
|
|
|
if (xdr_decode_word(rcv_buf, offset, &len))
|
|
|
|
goto unwrap_failed;
|
|
|
|
offset += sizeof(__be32);
|
|
|
|
if (offset + len > rcv_buf->len)
|
|
|
|
goto unwrap_failed;
|
|
|
|
mic.len = len;
|
|
|
|
mic.data = kmalloc(len, GFP_NOFS);
|
|
|
|
if (!mic.data)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
|
|
|
|
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (maj_stat != GSS_S_COMPLETE)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto bad_mic;
|
|
|
|
|
2019-02-12 00:25:36 +08:00
|
|
|
auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
|
|
|
|
auth->au_ralign = auth->au_verfsize + 2;
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(mic.data);
|
|
|
|
return ret;
|
|
|
|
|
2019-02-12 00:24:58 +08:00
|
|
|
unwrap_failed:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_unwrap_failed(task);
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
goto out;
|
2019-02-12 00:25:04 +08:00
|
|
|
bad_seqno:
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
|
|
|
|
goto out;
|
2019-02-12 00:24:58 +08:00
|
|
|
bad_mic:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_verify_mic(task, maj_stat);
|
sunrpc: Fix gss_unwrap_resp_integ() again
xdr_buf_read_mic() tries to find unused contiguous space in a
received xdr_buf in order to linearize the checksum for the call
to gss_verify_mic. However, the corner cases in this code are
numerous and we seem to keep missing them. I've just hit yet
another buffer overrun related to it.
This overrun is at the end of xdr_buf_read_mic():
1284 if (buf->tail[0].iov_len != 0)
1285 mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1286 else
1287 mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
1288 __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
1289 return 0;
This logic assumes the transport has set the length of the tail
based on the size of the received message. base + len is then
supposed to be off the end of the message but still within the
actual buffer.
In fact, the length of the tail is set by the upper layer when the
Call is encoded so that the end of the tail is actually the end of
the allocated buffer itself. This causes the logic above to set
mic->data to point past the end of the receive buffer.
The "mic->data = head" arm of this if statement is no less fragile.
As near as I can tell, this has been a problem forever. I'm not sure
that minimizing au_rslack recently changed this pathology much.
So instead, let's use a more straightforward approach: kmalloc a
separate buffer to linearize the checksum. This is similar to
how gss_validate() currently works.
Coming back to this code, I had some trouble understanding what
was going on. So I've cleaned up the variable naming and added
a few comments that point back to the XDR definition in RFC 2203
to help guide future spelunkers, including myself.
As an added clean up, the functionality that was in
xdr_buf_read_mic() is folded directly into gss_unwrap_resp_integ(),
as that is its only caller.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2020-03-11 23:21:07 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-03-11 23:21:17 +08:00
|
|
|
static noinline_for_stack int
|
2019-02-12 00:25:04 +08:00
|
|
|
gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|
|
|
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
|
|
|
|
struct xdr_stream *xdr)
|
2019-02-12 00:24:58 +08:00
|
|
|
{
|
|
|
|
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
|
|
|
|
struct kvec *head = rqstp->rq_rcv_buf.head;
|
2019-02-12 00:25:36 +08:00
|
|
|
struct rpc_auth *auth = cred->cr_auth;
|
2019-02-12 00:24:58 +08:00
|
|
|
unsigned int savedlen = rcv_buf->len;
|
|
|
|
u32 offset, opaque_len, maj_stat;
|
|
|
|
__be32 *p;
|
|
|
|
|
|
|
|
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
|
|
|
|
if (unlikely(!p))
|
|
|
|
goto unwrap_failed;
|
|
|
|
opaque_len = be32_to_cpup(p++);
|
|
|
|
offset = (u8 *)(p) - (u8 *)head->iov_base;
|
2005-10-14 04:54:58 +08:00
|
|
|
if (offset + opaque_len > rcv_buf->len)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto unwrap_failed;
|
2005-10-14 04:54:58 +08:00
|
|
|
rcv_buf->len = offset + opaque_len;
|
|
|
|
|
2005-10-14 04:55:18 +08:00
|
|
|
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
|
2005-10-14 04:54:58 +08:00
|
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
2007-06-25 22:15:15 +08:00
|
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
2005-10-14 04:54:58 +08:00
|
|
|
if (maj_stat != GSS_S_COMPLETE)
|
2019-02-12 00:24:58 +08:00
|
|
|
goto bad_unwrap;
|
|
|
|
/* gss_unwrap decrypted the sequence number */
|
|
|
|
if (be32_to_cpup(p++) != rqstp->rq_seqno)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto bad_seqno;
|
2005-10-14 04:54:58 +08:00
|
|
|
|
2019-02-12 00:24:58 +08:00
|
|
|
/* gss_unwrap redacts the opaque blob from the head iovec.
|
|
|
|
* rcv_buf has changed, thus the stream needs to be reset.
|
|
|
|
*/
|
|
|
|
xdr_init_decode(xdr, rcv_buf, p, rqstp);
|
2010-12-14 22:59:29 +08:00
|
|
|
|
2019-02-12 00:25:36 +08:00
|
|
|
auth->au_rslack = auth->au_verfsize + 2 +
|
|
|
|
XDR_QUADLEN(savedlen - rcv_buf->len);
|
|
|
|
auth->au_ralign = auth->au_verfsize + 2 +
|
|
|
|
XDR_QUADLEN(savedlen - rcv_buf->len);
|
2019-02-12 00:24:58 +08:00
|
|
|
return 0;
|
|
|
|
unwrap_failed:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_unwrap_failed(task);
|
|
|
|
return -EIO;
|
|
|
|
bad_seqno:
|
|
|
|
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
|
2019-02-12 00:24:58 +08:00
|
|
|
return -EIO;
|
|
|
|
bad_unwrap:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_rpcgss_unwrap(task, maj_stat);
|
2019-02-12 00:24:58 +08:00
|
|
|
return -EIO;
|
2010-12-14 22:59:29 +08:00
|
|
|
}
|
2005-10-14 04:54:58 +08:00
|
|
|
|
2018-08-15 01:50:21 +08:00
|
|
|
static bool
|
|
|
|
gss_seq_is_newer(u32 new, u32 old)
|
|
|
|
{
|
|
|
|
return (s32)(new - old) > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
gss_xmit_need_reencode(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_cred *cred = req->rq_cred;
|
|
|
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
2019-02-12 00:25:04 +08:00
|
|
|
u32 win, seq_xmit = 0;
|
2018-08-15 01:50:21 +08:00
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
if (!ctx)
|
2019-02-12 00:25:04 +08:00
|
|
|
goto out;
|
2018-08-15 01:50:21 +08:00
|
|
|
|
|
|
|
if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
|
2019-02-12 00:25:04 +08:00
|
|
|
goto out_ctx;
|
2018-08-15 01:50:21 +08:00
|
|
|
|
|
|
|
seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
|
|
|
|
while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
|
|
|
|
u32 tmp = seq_xmit;
|
|
|
|
|
|
|
|
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
|
|
|
|
if (seq_xmit == tmp) {
|
|
|
|
ret = false;
|
2019-02-12 00:25:04 +08:00
|
|
|
goto out_ctx;
|
2018-08-15 01:50:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
win = ctx->gc_win;
|
|
|
|
if (win > 0)
|
|
|
|
ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
|
2019-02-12 00:25:04 +08:00
|
|
|
|
|
|
|
out_ctx:
|
2018-08-15 01:50:21 +08:00
|
|
|
gss_put_ctx(ctx);
|
2019-02-12 00:25:04 +08:00
|
|
|
out:
|
|
|
|
trace_rpcgss_need_reencode(task, seq_xmit, ret);
|
2018-08-15 01:50:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int
|
2019-02-12 00:24:58 +08:00
|
|
|
gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-02-12 00:24:58 +08:00
|
|
|
struct rpc_rqst *rqstp = task->tk_rqstp;
|
|
|
|
struct rpc_cred *cred = rqstp->rq_cred;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
|
|
|
gc_base);
|
|
|
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
2019-02-12 00:24:58 +08:00
|
|
|
int status = -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (ctx->gc_proc != RPC_GSS_PROC_DATA)
|
|
|
|
goto out_decode;
|
|
|
|
switch (gss_cred->gc_service) {
|
2011-07-01 17:43:12 +08:00
|
|
|
case RPC_GSS_SVC_NONE:
|
2019-02-12 00:24:58 +08:00
|
|
|
status = gss_unwrap_resp_auth(cred);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
|
|
|
case RPC_GSS_SVC_INTEGRITY:
|
2019-02-12 00:25:04 +08:00
|
|
|
status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
|
|
|
case RPC_GSS_SVC_PRIVACY:
|
2019-02-12 00:25:04 +08:00
|
|
|
status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
|
2011-07-01 17:43:12 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2019-02-12 00:24:58 +08:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
out_decode:
|
2019-02-12 00:24:58 +08:00
|
|
|
status = rpcauth_unwrap_resp_decode(task, xdr);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
gss_put_ctx(ctx);
|
|
|
|
return status;
|
|
|
|
}
|
2007-02-10 07:38:13 +08:00
|
|
|
|
2007-06-24 08:17:58 +08:00
|
|
|
static const struct rpc_authops authgss_ops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.au_flavor = RPC_AUTH_GSS,
|
|
|
|
.au_name = "RPCSEC_GSS",
|
|
|
|
.create = gss_create,
|
|
|
|
.destroy = gss_destroy,
|
2016-09-29 23:44:40 +08:00
|
|
|
.hash_cred = gss_hash_cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
.lookup_cred = gss_lookup_cred,
|
2012-01-11 23:18:17 +08:00
|
|
|
.crcreate = gss_create_cred,
|
2013-03-17 03:54:43 +08:00
|
|
|
.info2flavor = gss_mech_info2flavor,
|
2013-03-17 03:55:10 +08:00
|
|
|
.flavor2info = gss_mech_flavor2info,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-06-24 08:17:58 +08:00
|
|
|
static const struct rpc_credops gss_credops = {
|
2014-06-22 08:52:16 +08:00
|
|
|
.cr_name = "AUTH_GSS",
|
|
|
|
.crdestroy = gss_destroy_cred,
|
|
|
|
.cr_init = gss_cred_init,
|
|
|
|
.crmatch = gss_match,
|
|
|
|
.crmarshal = gss_marshal,
|
|
|
|
.crrefresh = gss_refresh,
|
|
|
|
.crvalidate = gss_validate,
|
|
|
|
.crwrap_req = gss_wrap_req,
|
|
|
|
.crunwrap_resp = gss_unwrap_resp,
|
|
|
|
.crkey_timeout = gss_key_timeout,
|
|
|
|
.crstringify_acceptor = gss_stringify_acceptor,
|
2018-08-15 01:50:21 +08:00
|
|
|
.crneed_reencode = gss_xmit_need_reencode,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-06-27 05:04:57 +08:00
|
|
|
static const struct rpc_credops gss_nullops = {
|
2014-06-22 08:52:16 +08:00
|
|
|
.cr_name = "AUTH_GSS",
|
|
|
|
.crdestroy = gss_destroy_nullcred,
|
|
|
|
.crmatch = gss_match,
|
|
|
|
.crmarshal = gss_marshal,
|
|
|
|
.crrefresh = gss_refresh_null,
|
|
|
|
.crvalidate = gss_validate,
|
|
|
|
.crwrap_req = gss_wrap_req,
|
|
|
|
.crunwrap_resp = gss_unwrap_resp,
|
|
|
|
.crstringify_acceptor = gss_stringify_acceptor,
|
2007-06-27 05:04:57 +08:00
|
|
|
};
|
|
|
|
|
2009-08-10 03:14:15 +08:00
|
|
|
static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
|
2019-04-25 05:46:45 +08:00
|
|
|
.upcall = gss_v0_upcall,
|
2008-12-24 05:16:37 +08:00
|
|
|
.downcall = gss_pipe_downcall,
|
|
|
|
.destroy_msg = gss_pipe_destroy_msg,
|
|
|
|
.open_pipe = gss_pipe_open_v0,
|
|
|
|
.release_pipe = gss_pipe_release,
|
|
|
|
};
|
|
|
|
|
2009-08-10 03:14:15 +08:00
|
|
|
static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
|
2019-04-25 05:46:45 +08:00
|
|
|
.upcall = gss_v1_upcall,
|
2005-04-17 06:20:36 +08:00
|
|
|
.downcall = gss_pipe_downcall,
|
|
|
|
.destroy_msg = gss_pipe_destroy_msg,
|
2008-12-24 05:16:37 +08:00
|
|
|
.open_pipe = gss_pipe_open_v1,
|
2005-04-17 06:20:36 +08:00
|
|
|
.release_pipe = gss_pipe_release,
|
|
|
|
};
|
|
|
|
|
2012-01-20 01:42:37 +08:00
|
|
|
static __net_init int rpcsec_gss_init_net(struct net *net)
|
|
|
|
{
|
|
|
|
return gss_svc_init_net(net);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __net_exit void rpcsec_gss_exit_net(struct net *net)
|
|
|
|
{
|
|
|
|
gss_svc_shutdown_net(net);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations rpcsec_gss_net_ops = {
|
|
|
|
.init = rpcsec_gss_init_net,
|
|
|
|
.exit = rpcsec_gss_exit_net,
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Initialize RPCSEC_GSS module
|
|
|
|
*/
|
|
|
|
static int __init init_rpcsec_gss(void)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
err = rpcauth_register(&authgss_ops);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = gss_svc_init();
|
|
|
|
if (err)
|
|
|
|
goto out_unregister;
|
2012-01-20 01:42:37 +08:00
|
|
|
err = register_pernet_subsys(&rpcsec_gss_net_ops);
|
|
|
|
if (err)
|
|
|
|
goto out_svc_exit;
|
2008-12-24 05:10:52 +08:00
|
|
|
rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2012-01-20 01:42:37 +08:00
|
|
|
out_svc_exit:
|
|
|
|
gss_svc_shutdown();
|
2005-04-17 06:20:36 +08:00
|
|
|
out_unregister:
|
|
|
|
rpcauth_unregister(&authgss_ops);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_rpcsec_gss(void)
|
|
|
|
{
|
2012-01-20 01:42:37 +08:00
|
|
|
unregister_pernet_subsys(&rpcsec_gss_net_ops);
|
2005-04-17 06:20:36 +08:00
|
|
|
gss_svc_shutdown();
|
|
|
|
rpcauth_unregister(&authgss_ops);
|
2009-06-08 11:11:48 +08:00
|
|
|
rcu_barrier(); /* Wait for completion of call_rcu()'s */
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
SUNRPC: Missing module alias for auth_rpcgss.ko
Commit f344f6df "SUNRPC: Auto-load RPC authentication kernel
modules", Mon Mar 20 13:44:08 2006, adds a request_module() call
in rpcauth_create() to auto-load RPC security modules when a ULP
tries to create a credential of that flavor.
In rpcauth_create(), the name of the module to load is built like
this:
request_module("rpc-auth-%u", flavor);
This means that for, say, RPC_AUTH_GSS, request_module() is looking
for a module or alias called "rpc-auth-6".
The GSS module is named "auth_rpcgss", and commit f344f6df does not
add any new module aliases. There is also no such alias provided in
/etc/modprobe.d on my system (Fedora 16). Without this alias, the
GSS module is not loaded on demand.
This is used by rpcauth_create(). The pseudoflavor_to_flavor() call
can return RPC_AUTH_GSS, which is passed to request_module().
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-03-17 03:54:16 +08:00
|
|
|
MODULE_ALIAS("rpc-auth-6");
|
2005-04-17 06:20:36 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
2010-05-14 00:55:38 +08:00
|
|
|
module_param_named(expired_cred_retry_delay,
|
|
|
|
gss_expired_cred_retry_delay,
|
|
|
|
uint, 0644);
|
|
|
|
MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
|
|
|
|
"the RPC engine retries an expired credential");
|
|
|
|
|
2013-08-14 23:59:15 +08:00
|
|
|
module_param_named(key_expire_timeo,
|
|
|
|
gss_key_expire_timeo,
|
|
|
|
uint, 0644);
|
|
|
|
MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
|
|
|
|
"credential keys lifetime where the NFS layer cleans up "
|
|
|
|
"prior to key expiration");
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
module_init(init_rpcsec_gss)
|
|
|
|
module_exit(exit_rpcsec_gss)
|