2009-10-07 02:31:13 +08:00
|
|
|
#ifndef __FS_CEPH_MESSENGER_H
|
|
|
|
#define __FS_CEPH_MESSENGER_H
|
|
|
|
|
2013-08-08 05:30:24 +08:00
|
|
|
#include <linux/blk_types.h>
|
2009-12-08 07:55:05 +08:00
|
|
|
#include <linux/kref.h>
|
2009-10-07 02:31:13 +08:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/radix-tree.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/workqueue.h>
|
2015-06-25 22:47:45 +08:00
|
|
|
#include <net/net_namespace.h>
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2012-10-03 01:01:25 +08:00
|
|
|
#include <linux/ceph/types.h>
|
|
|
|
#include <linux/ceph/buffer.h>
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
struct ceph_msg;
|
|
|
|
struct ceph_connection;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ceph defines these callbacks for handling connection events.
|
|
|
|
*/
|
|
|
|
struct ceph_connection_operations {
|
|
|
|
struct ceph_connection *(*get)(struct ceph_connection *);
|
|
|
|
void (*put)(struct ceph_connection *);
|
|
|
|
|
|
|
|
/* handle an incoming message. */
|
|
|
|
void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
|
|
|
|
|
2009-11-19 08:19:57 +08:00
|
|
|
/* authorize an outgoing connection */
|
2012-05-17 04:16:39 +08:00
|
|
|
struct ceph_auth_handshake *(*get_authorizer) (
|
|
|
|
struct ceph_connection *con,
|
2012-05-17 04:16:39 +08:00
|
|
|
int *proto, int force_new);
|
2009-11-19 08:19:57 +08:00
|
|
|
int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
|
2010-02-03 08:21:06 +08:00
|
|
|
int (*invalidate_authorizer)(struct ceph_connection *con);
|
2009-11-19 08:19:57 +08:00
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
/* there was some error on the socket (disconnect, whatever) */
|
|
|
|
void (*fault) (struct ceph_connection *con);
|
|
|
|
|
|
|
|
/* a remote host as terminated a message exchange session, and messages
|
|
|
|
* we sent (or they tried to send us) may be lost. */
|
|
|
|
void (*peer_reset) (struct ceph_connection *con);
|
|
|
|
|
|
|
|
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
|
2010-01-09 05:58:34 +08:00
|
|
|
struct ceph_msg_header *hdr,
|
|
|
|
int *skip);
|
2014-11-04 16:33:37 +08:00
|
|
|
|
2015-10-27 05:23:56 +08:00
|
|
|
int (*sign_message) (struct ceph_msg *msg);
|
|
|
|
int (*check_message_signature) (struct ceph_msg *msg);
|
2009-10-07 02:31:13 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* use format string %s%d */
|
2010-05-13 05:48:20 +08:00
|
|
|
#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
struct ceph_messenger {
|
|
|
|
struct ceph_entity_inst inst; /* my name+address */
|
2009-11-04 07:17:56 +08:00
|
|
|
struct ceph_entity_addr my_enc_addr;
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2012-07-09 10:50:33 +08:00
|
|
|
atomic_t stopping;
|
2015-06-25 22:47:45 +08:00
|
|
|
possible_net_t net;
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the global_seq counts connections i (attempt to) initiate
|
|
|
|
* in order to disambiguate certain connect race conditions.
|
|
|
|
*/
|
|
|
|
u32 global_seq;
|
|
|
|
spinlock_t global_seq_lock;
|
|
|
|
};
|
|
|
|
|
2013-03-02 08:00:16 +08:00
|
|
|
enum ceph_msg_data_type {
|
|
|
|
CEPH_MSG_DATA_NONE, /* message contains no data payload */
|
|
|
|
CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */
|
|
|
|
CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
};
|
|
|
|
|
|
|
|
static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case CEPH_MSG_DATA_NONE:
|
|
|
|
case CEPH_MSG_DATA_PAGES:
|
|
|
|
case CEPH_MSG_DATA_PAGELIST:
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
case CEPH_MSG_DATA_BIO:
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-15 03:09:06 +08:00
|
|
|
struct ceph_msg_data {
|
2013-03-15 03:09:06 +08:00
|
|
|
struct list_head links; /* ceph_msg->data */
|
2013-03-15 03:09:06 +08:00
|
|
|
enum ceph_msg_data_type type;
|
|
|
|
union {
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
struct {
|
|
|
|
struct bio *bio;
|
|
|
|
size_t bio_length;
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
struct {
|
|
|
|
struct page **pages; /* NOT OWNER. */
|
|
|
|
size_t length; /* total # bytes */
|
|
|
|
unsigned int alignment; /* first page */
|
|
|
|
};
|
|
|
|
struct ceph_pagelist *pagelist;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2013-03-07 13:39:39 +08:00
|
|
|
struct ceph_msg_data_cursor {
|
2013-04-06 03:46:01 +08:00
|
|
|
size_t total_resid; /* across all data items */
|
|
|
|
struct list_head *data_head; /* = &ceph_msg->data */
|
|
|
|
|
|
|
|
struct ceph_msg_data *data; /* current data item */
|
2013-03-15 03:09:06 +08:00
|
|
|
size_t resid; /* bytes not yet consumed */
|
|
|
|
bool last_piece; /* current is last piece */
|
|
|
|
bool need_crc; /* crc update needed */
|
2013-03-07 13:39:39 +08:00
|
|
|
union {
|
2013-03-07 13:39:39 +08:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
struct { /* bio */
|
|
|
|
struct bio *bio; /* bio from list */
|
2013-08-08 05:30:24 +08:00
|
|
|
struct bvec_iter bvec_iter;
|
2013-03-07 13:39:39 +08:00
|
|
|
};
|
|
|
|
#endif /* CONFIG_BLOCK */
|
2013-03-08 05:38:28 +08:00
|
|
|
struct { /* pages */
|
|
|
|
unsigned int page_offset; /* offset in page */
|
|
|
|
unsigned short page_index; /* index in array */
|
|
|
|
unsigned short page_count; /* pages in array */
|
|
|
|
};
|
2013-03-07 13:39:39 +08:00
|
|
|
struct { /* pagelist */
|
|
|
|
struct page *page; /* page from list */
|
|
|
|
size_t offset; /* bytes from list */
|
|
|
|
};
|
|
|
|
};
|
2013-03-07 13:39:39 +08:00
|
|
|
};
|
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
/*
|
|
|
|
* a single message. it contains a header (src, dest, message type, etc.),
|
|
|
|
* footer (crc values, mainly), a "front" message body, and possibly a
|
|
|
|
* data payload (stored in some number of pages).
|
|
|
|
*/
|
|
|
|
struct ceph_msg {
|
|
|
|
struct ceph_msg_header hdr; /* header */
|
2014-11-04 16:33:37 +08:00
|
|
|
union {
|
|
|
|
struct ceph_msg_footer footer; /* footer */
|
|
|
|
struct ceph_msg_footer_old old_footer; /* old format footer */
|
|
|
|
};
|
2009-10-07 02:31:13 +08:00
|
|
|
struct kvec front; /* unaligned blobs of message */
|
|
|
|
struct ceph_buffer *middle;
|
2012-06-02 03:56:43 +08:00
|
|
|
|
2013-03-15 03:09:06 +08:00
|
|
|
size_t data_length;
|
2013-03-15 03:09:06 +08:00
|
|
|
struct list_head data;
|
2013-03-15 03:09:06 +08:00
|
|
|
struct ceph_msg_data_cursor cursor;
|
2013-02-15 02:16:43 +08:00
|
|
|
|
|
|
|
struct ceph_connection *con;
|
|
|
|
struct list_head list_head; /* links for connection lists */
|
|
|
|
|
|
|
|
struct kref kref;
|
2009-10-07 02:31:13 +08:00
|
|
|
bool more_to_follow;
|
2010-05-12 12:20:38 +08:00
|
|
|
bool needs_out_seq;
|
2014-01-10 02:08:21 +08:00
|
|
|
int front_alloc_len;
|
2011-07-27 02:27:24 +08:00
|
|
|
unsigned long ack_stamp; /* tx: when we were acked */
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
struct ceph_msgpool *pool;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ceph connection fault delay defaults, for exponential backoff */
|
|
|
|
#define BASE_DELAY_INTERVAL (HZ/2)
|
|
|
|
#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A single connection with another host.
|
|
|
|
*
|
|
|
|
* We maintain a queue of outgoing messages, and some session state to
|
|
|
|
* ensure that we can preserve the lossless, ordered delivery of
|
|
|
|
* messages in the case of a TCP disconnect.
|
|
|
|
*/
|
|
|
|
struct ceph_connection {
|
|
|
|
void *private;
|
|
|
|
|
|
|
|
const struct ceph_connection_operations *ops;
|
|
|
|
|
|
|
|
struct ceph_messenger *msgr;
|
2012-05-23 11:15:49 +08:00
|
|
|
|
|
|
|
atomic_t sock_state;
|
2009-10-07 02:31:13 +08:00
|
|
|
struct socket *sock;
|
2012-05-23 11:15:49 +08:00
|
|
|
struct ceph_entity_addr peer_addr; /* peer address */
|
|
|
|
struct ceph_entity_addr peer_addr_for_me;
|
|
|
|
|
2012-05-23 00:41:43 +08:00
|
|
|
unsigned long flags;
|
|
|
|
unsigned long state;
|
2009-10-07 02:31:13 +08:00
|
|
|
const char *error_msg; /* error message, if any */
|
|
|
|
|
|
|
|
struct ceph_entity_name peer_name; /* peer name */
|
2012-05-23 11:15:49 +08:00
|
|
|
|
2013-12-25 03:19:23 +08:00
|
|
|
u64 peer_features;
|
2009-10-07 02:31:13 +08:00
|
|
|
u32 connect_seq; /* identify the most recent connection
|
|
|
|
attempt for this connection, client */
|
|
|
|
u32 peer_global_seq; /* peer's global seq for this connection */
|
|
|
|
|
2009-11-19 08:19:57 +08:00
|
|
|
int auth_retry; /* true if we need a newer authorizer */
|
|
|
|
void *auth_reply_buf; /* where to put the authorizer reply */
|
|
|
|
int auth_reply_buf_len;
|
|
|
|
|
2009-12-23 02:43:42 +08:00
|
|
|
struct mutex mutex;
|
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
/* out queue */
|
|
|
|
struct list_head out_queue;
|
|
|
|
struct list_head out_sent; /* sending or sent but unacked */
|
|
|
|
u64 out_seq; /* last message queued for send */
|
|
|
|
|
|
|
|
u64 in_seq, in_seq_acked; /* last message received, acked */
|
|
|
|
|
|
|
|
/* connection negotiation temps */
|
|
|
|
char in_banner[CEPH_BANNER_MAX_LEN];
|
2012-07-11 02:53:34 +08:00
|
|
|
struct ceph_msg_connect out_connect;
|
|
|
|
struct ceph_msg_connect_reply in_reply;
|
2009-10-07 02:31:13 +08:00
|
|
|
struct ceph_entity_addr actual_peer_addr;
|
|
|
|
|
|
|
|
/* message out temps */
|
libceph: fix ceph_msg_revoke()
There are a number of problems with revoking a "was sending" message:
(1) We never make any attempt to revoke data - only kvecs contibute to
con->out_skip. However, once the header (envelope) is written to the
socket, our peer learns data_len and sets itself to expect at least
data_len bytes to follow front or front+middle. If ceph_msg_revoke()
is called while the messenger is sending message's data portion,
anything we send after that call is counted by the OSD towards the now
revoked message's data portion. The effects vary, the most common one
is the eventual hang - higher layers get stuck waiting for the reply to
the message that was sent out after ceph_msg_revoke() returned and
treated by the OSD as a bunch of data bytes. This is what Matt ran
into.
(2) Flat out zeroing con->out_kvec_bytes worth of bytes to handle kvecs
is wrong. If ceph_msg_revoke() is called before the tag is sent out or
while the messenger is sending the header, we will get a connection
reset, either due to a bad tag (0 is not a valid tag) or a bad header
CRC, which kind of defeats the purpose of revoke. Currently the kernel
client refuses to work with header CRCs disabled, but that will likely
change in the future, making this even worse.
(3) con->out_skip is not reset on connection reset, leading to one or
more spurious connection resets if we happen to get a real one between
con->out_skip is set in ceph_msg_revoke() and before it's cleared in
write_partial_skip().
Fixing (1) and (3) is trivial. The idea behind fixing (2) is to never
zero the tag or the header, i.e. send out tag+header regardless of when
ceph_msg_revoke() is called. That way the header is always correct, no
unnecessary resets are induced and revoke stands ready for disabled
CRCs. Since ceph_msg_revoke() rips out con->out_msg, introduce a new
"message out temp" and copy the header into it before sending.
Cc: stable@vger.kernel.org # 4.0+
Reported-by: Matt Conner <matt.conner@keepertech.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Tested-by: Matt Conner <matt.conner@keepertech.com>
Reviewed-by: Sage Weil <sage@redhat.com>
2015-12-28 18:18:34 +08:00
|
|
|
struct ceph_msg_header out_hdr;
|
2009-10-07 02:31:13 +08:00
|
|
|
struct ceph_msg *out_msg; /* sending message (== tail of
|
|
|
|
out_sent) */
|
2009-12-15 06:04:30 +08:00
|
|
|
bool out_msg_done;
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
struct kvec out_kvec[8], /* sending header/footer data */
|
|
|
|
*out_kvec_cur;
|
|
|
|
int out_kvec_left; /* kvec's left in out_kvec */
|
|
|
|
int out_skip; /* skip this many bytes */
|
|
|
|
int out_kvec_bytes; /* total bytes left */
|
|
|
|
int out_more; /* there is more data after the kvecs */
|
|
|
|
__le64 out_temp_ack; /* for writing an ack */
|
2015-09-14 21:01:05 +08:00
|
|
|
struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
|
|
|
|
stamp */
|
2009-10-07 02:31:13 +08:00
|
|
|
|
|
|
|
/* message in temps */
|
|
|
|
struct ceph_msg_header in_hdr;
|
|
|
|
struct ceph_msg *in_msg;
|
|
|
|
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
|
|
|
|
|
|
|
|
char in_tag; /* protocol control byte */
|
|
|
|
int in_base_pos; /* bytes read */
|
|
|
|
__le64 in_temp_ack; /* for reading an ack */
|
|
|
|
|
2015-09-14 21:01:05 +08:00
|
|
|
struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
|
2015-09-01 17:19:38 +08:00
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
struct delayed_work work; /* send|recv work */
|
|
|
|
unsigned long delay; /* current delay interval */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-04-07 06:14:15 +08:00
|
|
|
extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
|
2009-10-07 02:31:13 +08:00
|
|
|
extern int ceph_parse_ips(const char *c, const char *end,
|
|
|
|
struct ceph_entity_addr *addr,
|
|
|
|
int max_count, int *count);
|
|
|
|
|
|
|
|
|
|
|
|
extern int ceph_msgr_init(void);
|
|
|
|
extern void ceph_msgr_exit(void);
|
2010-05-30 00:41:23 +08:00
|
|
|
extern void ceph_msgr_flush(void);
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2012-05-27 12:26:43 +08:00
|
|
|
extern void ceph_messenger_init(struct ceph_messenger *msgr,
|
2015-10-29 06:50:58 +08:00
|
|
|
struct ceph_entity_addr *myaddr);
|
2015-06-25 22:47:45 +08:00
|
|
|
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2012-05-27 12:26:43 +08:00
|
|
|
extern void ceph_con_init(struct ceph_connection *con, void *private,
|
|
|
|
const struct ceph_connection_operations *ops,
|
2012-06-28 03:24:08 +08:00
|
|
|
struct ceph_messenger *msgr);
|
2009-10-07 02:31:13 +08:00
|
|
|
extern void ceph_con_open(struct ceph_connection *con,
|
2012-06-28 03:24:08 +08:00
|
|
|
__u8 entity_type, __u64 entity_num,
|
2009-10-07 02:31:13 +08:00
|
|
|
struct ceph_entity_addr *addr);
|
ceph: avoid reopening osd connections when address hasn't changed
We get a fault callback on _every_ tcp connection fault. Normally, we
want to reopen the connection when that happens. If the address we have
is bad, however, and connection attempts always result in a connection
refused or similar error, explicitly closing and reopening the msgr
connection just prevents the messenger's backoff logic from kicking in.
The result can be a console full of
[ 3974.417106] ceph: osd11 10.3.14.138:6800 connection failed
[ 3974.423295] ceph: osd11 10.3.14.138:6800 connection failed
[ 3974.429709] ceph: osd11 10.3.14.138:6800 connection failed
Instead, if we get a fault, and have outstanding requests, but the osd
address hasn't changed and the connection never successfully connected in
the first place, do nothing to the osd connection. The messenger layer
will back off and retry periodically, because we never connected and thus
the lossy bit is not set.
Instead, touch each request's r_stamp so that handle_timeout can tell the
request is still alive and kicking.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-03-23 05:51:18 +08:00
|
|
|
extern bool ceph_con_opened(struct ceph_connection *con);
|
2009-10-07 02:31:13 +08:00
|
|
|
extern void ceph_con_close(struct ceph_connection *con);
|
|
|
|
extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
|
2012-06-02 03:56:43 +08:00
|
|
|
|
|
|
|
extern void ceph_msg_revoke(struct ceph_msg *msg);
|
2012-06-02 03:56:43 +08:00
|
|
|
extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
|
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
extern void ceph_con_keepalive(struct ceph_connection *con);
|
2015-09-01 17:19:38 +08:00
|
|
|
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
|
|
|
|
unsigned long interval);
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2013-04-06 03:46:01 +08:00
|
|
|
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
2013-03-08 05:38:26 +08:00
|
|
|
size_t length, size_t alignment);
|
2013-04-06 03:46:01 +08:00
|
|
|
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
|
2013-02-15 02:16:43 +08:00
|
|
|
struct ceph_pagelist *pagelist);
|
2013-04-06 03:46:01 +08:00
|
|
|
#ifdef CONFIG_BLOCK
|
2013-04-06 03:46:01 +08:00
|
|
|
extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
|
2013-03-15 03:09:06 +08:00
|
|
|
size_t length);
|
2013-04-06 03:46:01 +08:00
|
|
|
#endif /* CONFIG_BLOCK */
|
2013-02-15 02:16:43 +08:00
|
|
|
|
2011-08-10 06:03:46 +08:00
|
|
|
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
|
|
|
bool can_fail);
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2014-06-20 18:14:41 +08:00
|
|
|
extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg);
|
|
|
|
extern void ceph_msg_put(struct ceph_msg *msg);
|
2009-10-07 02:31:13 +08:00
|
|
|
|
2009-12-15 07:13:47 +08:00
|
|
|
extern void ceph_msg_dump(struct ceph_msg *msg);
|
|
|
|
|
2009-10-07 02:31:13 +08:00
|
|
|
#endif
|