2009-10-07 02:31:10 +08:00
|
|
|
#ifndef _FS_CEPH_OSD_CLIENT_H
|
|
|
|
#define _FS_CEPH_OSD_CLIENT_H
|
|
|
|
|
|
|
|
#include <linux/completion.h>
|
2009-12-08 05:37:03 +08:00
|
|
|
#include <linux/kref.h>
|
2009-10-07 02:31:10 +08:00
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
|
2012-05-17 04:16:38 +08:00
|
|
|
#include <linux/ceph/types.h>
|
|
|
|
#include <linux/ceph/osdmap.h>
|
|
|
|
#include <linux/ceph/messenger.h>
|
|
|
|
#include <linux/ceph/auth.h>
|
2012-11-14 11:11:15 +08:00
|
|
|
#include <linux/ceph/pagelist.h>
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
struct ceph_msg;
|
|
|
|
struct ceph_snap_context;
|
|
|
|
struct ceph_osd_request;
|
|
|
|
struct ceph_osd_client;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* completion callback for async writepages
|
|
|
|
*/
|
|
|
|
typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
|
|
|
|
struct ceph_msg *);
|
libceph: change how "safe" callback is used
An osd request currently has two callbacks. They inform the
initiator of the request when we've received confirmation for the
target osd that a request was received, and when the osd indicates
all changes described by the request are durable.
The only time the second callback is used is in the ceph file system
for a synchronous write. There's a race that makes some handling of
this case unsafe. This patch addresses this problem. The error
handling for this callback is also kind of gross, and this patch
changes that as well.
In ceph_sync_write(), if a safe callback is requested we want to add
the request on the ceph inode's unsafe items list. Because items on
this list must have their tid set (by ceph_osd_start_request()), the
request added *after* the call to that function returns. The
problem with this is that there's a race between starting the
request and adding it to the unsafe items list; the request may
already be complete before ceph_sync_write() even begins to put it
on the list.
To address this, we change the way the "safe" callback is used.
Rather than just calling it when the request is "safe", we use it to
notify the initiator the bounds (start and end) of the period during
which the request is *unsafe*. So the initiator gets notified just
before the request gets sent to the osd (when it is "unsafe"), and
again when it's known the results are durable (it's no longer
unsafe). The first call will get made in __send_request(), just
before the request message gets sent to the messenger for the first
time. That function is only called by __send_queued(), which is
always called with the osd client's request mutex held.
We then have this callback function insert the request on the ceph
inode's unsafe list when we're told the request is unsafe. This
will avoid the race because this call will be made under protection
of the osd client's request mutex. It also nicely groups the setup
and cleanup of the state associated with managing unsafe requests.
The name of the "safe" callback field is changed to "unsafe" to
better reflect its new purpose. It has a Boolean "unsafe" parameter
to indicate whether the request is becoming unsafe or is now safe.
Because the "msg" parameter wasn't used, we drop that.
This resolves the original problem reportedin:
http://tracker.ceph.com/issues/4706
Reported-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Yan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2013-04-16 00:20:42 +08:00
|
|
|
typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
/* a given osd we're communicating with */
|
|
|
|
struct ceph_osd {
|
|
|
|
atomic_t o_ref;
|
|
|
|
struct ceph_osd_client *o_osdc;
|
|
|
|
int o_osd;
|
|
|
|
int o_incarnation;
|
|
|
|
struct rb_node o_node;
|
|
|
|
struct ceph_connection o_con;
|
|
|
|
struct list_head o_requests;
|
2011-03-22 06:07:16 +08:00
|
|
|
struct list_head o_linger_requests;
|
2010-02-04 03:00:26 +08:00
|
|
|
struct list_head o_osd_lru;
|
2012-05-17 04:16:38 +08:00
|
|
|
struct ceph_auth_handshake o_auth;
|
2010-02-04 03:00:26 +08:00
|
|
|
unsigned long lru_ttl;
|
2010-02-27 07:32:31 +08:00
|
|
|
int o_marked_for_keepalive;
|
|
|
|
struct list_head o_keepalive_item;
|
2009-10-07 02:31:10 +08:00
|
|
|
};
|
|
|
|
|
2013-02-26 08:11:12 +08:00
|
|
|
|
2016-02-10 00:50:15 +08:00
|
|
|
#define CEPH_OSD_SLAB_OPS 2
|
|
|
|
#define CEPH_OSD_MAX_OPS 16
|
2013-02-26 08:11:12 +08:00
|
|
|
|
2013-02-15 02:16:43 +08:00
|
|
|
enum ceph_osd_data_type {
|
2013-04-05 14:27:12 +08:00
|
|
|
CEPH_OSD_DATA_TYPE_NONE = 0,
|
2013-02-15 02:16:43 +08:00
|
|
|
CEPH_OSD_DATA_TYPE_PAGES,
|
2013-03-09 03:35:36 +08:00
|
|
|
CEPH_OSD_DATA_TYPE_PAGELIST,
|
2013-02-15 02:16:43 +08:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
CEPH_OSD_DATA_TYPE_BIO,
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
};
|
|
|
|
|
2013-02-15 02:16:43 +08:00
|
|
|
struct ceph_osd_data {
|
2013-02-15 02:16:43 +08:00
|
|
|
enum ceph_osd_data_type type;
|
|
|
|
union {
|
2013-02-15 02:16:43 +08:00
|
|
|
struct {
|
|
|
|
struct page **pages;
|
2013-03-08 05:38:25 +08:00
|
|
|
u64 length;
|
2013-02-15 02:16:43 +08:00
|
|
|
u32 alignment;
|
|
|
|
bool pages_from_pool;
|
|
|
|
bool own_pages;
|
|
|
|
};
|
2013-03-09 03:35:36 +08:00
|
|
|
struct ceph_pagelist *pagelist;
|
2013-02-15 02:16:43 +08:00
|
|
|
#ifdef CONFIG_BLOCK
|
2013-03-15 03:09:06 +08:00
|
|
|
struct {
|
|
|
|
struct bio *bio; /* list of bios */
|
|
|
|
size_t bio_length; /* total in list */
|
|
|
|
};
|
2013-02-15 02:16:43 +08:00
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2013-04-04 10:32:51 +08:00
|
|
|
struct ceph_osd_req_op {
|
|
|
|
u16 op; /* CEPH_OSD_OP_* */
|
2014-02-25 22:22:26 +08:00
|
|
|
u32 flags; /* CEPH_OSD_OP_FLAG_* */
|
2016-02-08 20:39:46 +08:00
|
|
|
u32 indata_len; /* request */
|
2016-01-07 16:48:57 +08:00
|
|
|
u32 outdata_len; /* reply */
|
|
|
|
s32 rval;
|
|
|
|
|
2013-04-04 10:32:51 +08:00
|
|
|
union {
|
2013-02-12 02:33:24 +08:00
|
|
|
struct ceph_osd_data raw_data_in;
|
2013-04-04 10:32:51 +08:00
|
|
|
struct {
|
|
|
|
u64 offset, length;
|
|
|
|
u64 truncate_size;
|
|
|
|
u32 truncate_seq;
|
2013-04-05 14:27:12 +08:00
|
|
|
struct ceph_osd_data osd_data;
|
2013-04-04 10:32:51 +08:00
|
|
|
} extent;
|
2014-11-12 14:00:43 +08:00
|
|
|
struct {
|
2014-12-19 19:00:41 +08:00
|
|
|
u32 name_len;
|
|
|
|
u32 value_len;
|
2014-11-12 14:00:43 +08:00
|
|
|
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
|
|
|
|
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
|
|
|
|
struct ceph_osd_data osd_data;
|
|
|
|
} xattr;
|
2013-04-04 10:32:51 +08:00
|
|
|
struct {
|
|
|
|
const char *class_name;
|
|
|
|
const char *method_name;
|
2013-04-05 14:27:12 +08:00
|
|
|
struct ceph_osd_data request_info;
|
2013-04-06 03:46:02 +08:00
|
|
|
struct ceph_osd_data request_data;
|
2013-04-05 14:27:12 +08:00
|
|
|
struct ceph_osd_data response_data;
|
2013-04-04 10:32:51 +08:00
|
|
|
__u8 class_len;
|
|
|
|
__u8 method_len;
|
|
|
|
__u8 argc;
|
|
|
|
} cls;
|
|
|
|
struct {
|
|
|
|
u64 cookie;
|
|
|
|
u64 ver;
|
|
|
|
u32 prot_ver;
|
|
|
|
u32 timeout;
|
|
|
|
__u8 flag;
|
|
|
|
} watch;
|
2014-02-25 22:22:27 +08:00
|
|
|
struct {
|
|
|
|
u64 expected_object_size;
|
|
|
|
u64 expected_write_size;
|
|
|
|
} alloc_hint;
|
2013-04-04 10:32:51 +08:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2009-10-07 02:31:10 +08:00
|
|
|
/* an in-flight request */
|
|
|
|
struct ceph_osd_request {
|
|
|
|
u64 r_tid; /* unique for this client */
|
|
|
|
struct rb_node r_node;
|
2010-02-27 07:32:31 +08:00
|
|
|
struct list_head r_req_lru_item;
|
2009-10-07 02:31:10 +08:00
|
|
|
struct list_head r_osd_item;
|
2011-03-22 06:07:16 +08:00
|
|
|
struct list_head r_linger_item;
|
2014-06-20 18:14:41 +08:00
|
|
|
struct list_head r_linger_osd_item;
|
2009-10-07 02:31:10 +08:00
|
|
|
struct ceph_osd *r_osd;
|
2013-02-24 02:38:16 +08:00
|
|
|
struct ceph_pg r_pgid;
|
2010-05-11 01:24:48 +08:00
|
|
|
int r_pg_osds[CEPH_PG_MAX_SIZE];
|
|
|
|
int r_num_pg_osds;
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
struct ceph_msg *r_request, *r_reply;
|
|
|
|
int r_flags; /* any additional flags for the osd */
|
|
|
|
u32 r_sent; /* >0 if r_request is sending/sent */
|
2013-02-26 08:11:12 +08:00
|
|
|
|
2013-04-04 10:32:51 +08:00
|
|
|
/* request osd ops array */
|
|
|
|
unsigned int r_num_ops;
|
|
|
|
|
2013-02-26 08:11:12 +08:00
|
|
|
/* these are updated on each send */
|
|
|
|
__le32 *r_request_osdmap_epoch;
|
|
|
|
__le32 *r_request_flags;
|
|
|
|
__le64 *r_request_pool;
|
|
|
|
void *r_request_pgid;
|
|
|
|
__le32 *r_request_attempts;
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 11:11:48 +08:00
|
|
|
bool r_paused;
|
2013-02-26 08:11:12 +08:00
|
|
|
struct ceph_eversion *r_request_reassert_version;
|
|
|
|
|
|
|
|
int r_result;
|
2009-12-23 02:45:45 +08:00
|
|
|
int r_got_reply;
|
2011-03-22 06:07:16 +08:00
|
|
|
int r_linger;
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
struct ceph_osd_client *r_osdc;
|
2009-12-08 05:37:03 +08:00
|
|
|
struct kref r_kref;
|
2009-10-07 02:31:10 +08:00
|
|
|
bool r_mempool;
|
|
|
|
struct completion r_completion, r_safe_completion;
|
libceph: change how "safe" callback is used
An osd request currently has two callbacks. They inform the
initiator of the request when we've received confirmation for the
target osd that a request was received, and when the osd indicates
all changes described by the request are durable.
The only time the second callback is used is in the ceph file system
for a synchronous write. There's a race that makes some handling of
this case unsafe. This patch addresses this problem. The error
handling for this callback is also kind of gross, and this patch
changes that as well.
In ceph_sync_write(), if a safe callback is requested we want to add
the request on the ceph inode's unsafe items list. Because items on
this list must have their tid set (by ceph_osd_start_request()), the
request added *after* the call to that function returns. The
problem with this is that there's a race between starting the
request and adding it to the unsafe items list; the request may
already be complete before ceph_sync_write() even begins to put it
on the list.
To address this, we change the way the "safe" callback is used.
Rather than just calling it when the request is "safe", we use it to
notify the initiator the bounds (start and end) of the period during
which the request is *unsafe*. So the initiator gets notified just
before the request gets sent to the osd (when it is "unsafe"), and
again when it's known the results are durable (it's no longer
unsafe). The first call will get made in __send_request(), just
before the request message gets sent to the messenger for the first
time. That function is only called by __send_queued(), which is
always called with the osd client's request mutex held.
We then have this callback function insert the request on the ceph
inode's unsafe list when we're told the request is unsafe. This
will avoid the race because this call will be made under protection
of the osd client's request mutex. It also nicely groups the setup
and cleanup of the state associated with managing unsafe requests.
The name of the "safe" callback field is changed to "unsafe" to
better reflect its new purpose. It has a Boolean "unsafe" parameter
to indicate whether the request is becoming unsafe or is now safe.
Because the "msg" parameter wasn't used, we drop that.
This resolves the original problem reportedin:
http://tracker.ceph.com/issues/4706
Reported-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Yan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2013-04-16 00:20:42 +08:00
|
|
|
ceph_osdc_callback_t r_callback;
|
|
|
|
ceph_osdc_unsafe_callback_t r_unsafe_callback;
|
2009-10-07 02:31:10 +08:00
|
|
|
struct ceph_eversion r_reassert_version;
|
|
|
|
struct list_head r_unsafe_item;
|
|
|
|
|
|
|
|
struct inode *r_inode; /* for use by callbacks */
|
2010-04-07 06:14:15 +08:00
|
|
|
void *r_priv; /* ditto */
|
2009-10-07 02:31:10 +08:00
|
|
|
|
2014-01-27 23:40:20 +08:00
|
|
|
struct ceph_object_locator r_base_oloc;
|
|
|
|
struct ceph_object_id r_base_oid;
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 23:40:20 +08:00
|
|
|
struct ceph_object_locator r_target_oloc;
|
|
|
|
struct ceph_object_id r_target_oid;
|
2014-01-27 23:40:18 +08:00
|
|
|
|
2013-02-26 08:13:08 +08:00
|
|
|
u64 r_snapid;
|
2010-03-23 05:42:30 +08:00
|
|
|
unsigned long r_stamp; /* send OR check time */
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
struct ceph_snap_context *r_snapc; /* snap context for writes */
|
2016-02-10 00:50:15 +08:00
|
|
|
|
|
|
|
struct ceph_osd_req_op r_ops[];
|
2009-10-07 02:31:10 +08:00
|
|
|
};
|
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 23:40:20 +08:00
|
|
|
struct ceph_request_redirect {
|
|
|
|
struct ceph_object_locator oloc;
|
|
|
|
};
|
|
|
|
|
2011-03-22 06:07:16 +08:00
|
|
|
struct ceph_osd_event {
|
|
|
|
u64 cookie;
|
|
|
|
int one_shot;
|
|
|
|
struct ceph_osd_client *osdc;
|
|
|
|
void (*cb)(u64, u64, u8, void *);
|
|
|
|
void *data;
|
|
|
|
struct rb_node node;
|
|
|
|
struct list_head osd_node;
|
|
|
|
struct kref kref;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ceph_osd_event_work {
|
|
|
|
struct work_struct work;
|
|
|
|
struct ceph_osd_event *event;
|
|
|
|
u64 ver;
|
|
|
|
u64 notify_id;
|
|
|
|
u8 opcode;
|
|
|
|
};
|
|
|
|
|
2009-10-07 02:31:10 +08:00
|
|
|
struct ceph_osd_client {
|
|
|
|
struct ceph_client *client;
|
|
|
|
|
|
|
|
struct ceph_osdmap *osdmap; /* current map */
|
|
|
|
struct rw_semaphore map_sem;
|
|
|
|
struct completion map_waiters;
|
|
|
|
u64 last_requested_map;
|
|
|
|
|
|
|
|
struct mutex request_mutex;
|
|
|
|
struct rb_root osds; /* osds */
|
2010-02-04 03:00:26 +08:00
|
|
|
struct list_head osd_lru; /* idle osds */
|
2009-10-07 02:31:10 +08:00
|
|
|
u64 timeout_tid; /* tid of timeout triggering rq */
|
|
|
|
u64 last_tid; /* tid of last request */
|
|
|
|
struct rb_root requests; /* pending requests */
|
2011-01-18 12:34:08 +08:00
|
|
|
struct list_head req_lru; /* in-flight lru */
|
|
|
|
struct list_head req_unsent; /* unsent/need-resend queue */
|
|
|
|
struct list_head req_notarget; /* map to no osd */
|
2011-03-22 06:07:16 +08:00
|
|
|
struct list_head req_linger; /* lingering requests */
|
2009-10-07 02:31:10 +08:00
|
|
|
int num_requests;
|
|
|
|
struct delayed_work timeout_work;
|
2010-02-04 03:00:26 +08:00
|
|
|
struct delayed_work osds_timeout_work;
|
2009-11-13 07:05:52 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
2009-10-07 02:31:10 +08:00
|
|
|
struct dentry *debugfs_file;
|
2009-11-13 07:05:52 +08:00
|
|
|
#endif
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
mempool_t *req_mempool;
|
|
|
|
|
2010-01-14 09:03:23 +08:00
|
|
|
struct ceph_msgpool msgpool_op;
|
2010-03-02 05:02:00 +08:00
|
|
|
struct ceph_msgpool msgpool_op_reply;
|
2011-03-22 06:07:16 +08:00
|
|
|
|
|
|
|
spinlock_t event_lock;
|
|
|
|
struct rb_root event_tree;
|
|
|
|
u64 event_count;
|
|
|
|
|
|
|
|
struct workqueue_struct *notify_wq;
|
2009-10-07 02:31:10 +08:00
|
|
|
};
|
|
|
|
|
2013-05-02 01:43:04 +08:00
|
|
|
extern int ceph_osdc_setup(void);
|
|
|
|
extern void ceph_osdc_cleanup(void);
|
|
|
|
|
2009-10-07 02:31:10 +08:00
|
|
|
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_client *client);
|
|
|
|
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
|
|
|
|
|
|
|
|
extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_msg *msg);
|
|
|
|
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_msg *msg);
|
|
|
|
|
2013-02-12 02:33:24 +08:00
|
|
|
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
|
2015-04-27 11:09:54 +08:00
|
|
|
unsigned int which, u16 opcode, u32 flags);
|
2013-02-12 02:33:24 +08:00
|
|
|
|
|
|
|
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
|
|
|
|
unsigned int which,
|
|
|
|
struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool,
|
|
|
|
bool own_pages);
|
|
|
|
|
2013-04-05 14:27:11 +08:00
|
|
|
extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u16 opcode,
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 09:50:00 +08:00
|
|
|
u64 offset, u64 length,
|
|
|
|
u64 truncate_size, u32 truncate_seq);
|
2013-04-05 14:27:11 +08:00
|
|
|
extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u64 length);
|
2016-01-07 17:32:54 +08:00
|
|
|
extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u64 offset_inc);
|
2013-04-05 14:27:12 +08:00
|
|
|
|
|
|
|
extern struct ceph_osd_data *osd_req_op_extent_osd_data(
|
|
|
|
struct ceph_osd_request *osd_req,
|
2013-04-16 03:50:36 +08:00
|
|
|
unsigned int which);
|
2013-04-05 14:27:12 +08:00
|
|
|
extern struct ceph_osd_data *osd_req_op_cls_response_data(
|
|
|
|
struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which);
|
|
|
|
|
|
|
|
extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
|
2013-04-16 03:50:36 +08:00
|
|
|
unsigned int which,
|
2013-04-05 14:27:12 +08:00
|
|
|
struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool,
|
|
|
|
bool own_pages);
|
|
|
|
extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
|
2013-04-16 03:50:36 +08:00
|
|
|
unsigned int which,
|
2013-04-05 14:27:12 +08:00
|
|
|
struct ceph_pagelist *pagelist);
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
|
2013-04-16 03:50:36 +08:00
|
|
|
unsigned int which,
|
2013-04-05 14:27:12 +08:00
|
|
|
struct bio *bio, size_t bio_length);
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
2013-04-06 03:46:02 +08:00
|
|
|
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
|
|
|
|
unsigned int which,
|
|
|
|
struct ceph_pagelist *pagelist);
|
2013-04-20 04:34:49 +08:00
|
|
|
extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
|
|
|
|
unsigned int which,
|
|
|
|
struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool,
|
|
|
|
bool own_pages);
|
2013-04-05 14:27:12 +08:00
|
|
|
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
|
2013-04-05 14:27:11 +08:00
|
|
|
unsigned int which,
|
2013-04-05 14:27:12 +08:00
|
|
|
struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool,
|
|
|
|
bool own_pages);
|
|
|
|
|
2013-04-05 14:27:11 +08:00
|
|
|
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u16 opcode,
|
2013-04-06 03:46:02 +08:00
|
|
|
const char *class, const char *method);
|
2014-11-12 14:00:43 +08:00
|
|
|
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
|
|
|
|
u16 opcode, const char *name, const void *value,
|
|
|
|
size_t size, u8 cmp_op, u8 cmp_mode);
|
2013-04-05 14:27:11 +08:00
|
|
|
extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u16 opcode,
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 09:50:00 +08:00
|
|
|
u64 cookie, u64 version, int flag);
|
2014-02-25 22:22:27 +08:00
|
|
|
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which,
|
|
|
|
u64 expected_object_size,
|
|
|
|
u64 expected_write_size);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 09:50:00 +08:00
|
|
|
|
2010-04-07 05:51:47 +08:00
|
|
|
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_snap_context *snapc,
|
2013-03-15 03:09:05 +08:00
|
|
|
unsigned int num_ops,
|
2010-04-07 05:51:47 +08:00
|
|
|
bool use_mempool,
|
2012-11-14 11:11:15 +08:00
|
|
|
gfp_t gfp_flags);
|
2010-04-07 05:51:47 +08:00
|
|
|
|
2013-03-09 03:35:36 +08:00
|
|
|
extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
|
2010-04-07 06:01:27 +08:00
|
|
|
struct ceph_snap_context *snapc,
|
2012-11-14 11:11:15 +08:00
|
|
|
u64 snap_id,
|
2012-11-09 22:43:15 +08:00
|
|
|
struct timespec *mtime);
|
2010-04-07 05:51:47 +08:00
|
|
|
|
2009-10-07 02:31:10 +08:00
|
|
|
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
|
|
|
|
struct ceph_file_layout *layout,
|
|
|
|
struct ceph_vino vino,
|
2013-03-15 03:09:05 +08:00
|
|
|
u64 offset, u64 *len,
|
2014-11-13 14:40:37 +08:00
|
|
|
unsigned int which, int num_ops,
|
|
|
|
int opcode, int flags,
|
2009-10-07 02:31:10 +08:00
|
|
|
struct ceph_snap_context *snapc,
|
2013-03-15 03:09:05 +08:00
|
|
|
u32 truncate_seq, u64 truncate_size,
|
libceph: don't assign page info in ceph_osdc_new_request()
Currently ceph_osdc_new_request() assigns an osd request's
r_num_pages and r_alignment fields. The only thing it does
after that is call ceph_osdc_build_request(), and that doesn't
need those fields to be assigned.
Move the assignment of those fields out of ceph_osdc_new_request()
and into its caller. As a result, the page_align parameter is no
longer used, so get rid of it.
Note that in ceph_sync_write(), the value for req->r_num_pages had
already been calculated earlier (as num_pages, and fortunately
it was computed the same way). So don't bother recomputing it,
but because it's not needed earlier, move that calculation after the
call to ceph_osdc_new_request(). Hold off making the assignment to
r_alignment, doing it instead r_pages and r_num_pages are
getting set.
Similarly, in start_read(), nr_pages already holds the number of
pages in the array (and is calculated the same way), so there's no
need to recompute it. Move the assignment of the page alignment
down with the others there as well.
This and the next few patches are preparation work for:
http://tracker.ceph.com/issues/4127
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-02 08:00:15 +08:00
|
|
|
bool use_mempool);
|
2009-10-07 02:31:10 +08:00
|
|
|
|
2011-03-22 06:07:16 +08:00
|
|
|
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
|
|
|
|
2014-06-20 18:14:42 +08:00
|
|
|
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
|
|
|
|
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req,
|
|
|
|
bool nofail);
|
2014-06-19 15:38:13 +08:00
|
|
|
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
|
2009-10-07 02:31:10 +08:00
|
|
|
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
|
|
|
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
|
|
|
|
|
2013-08-29 12:43:09 +08:00
|
|
|
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
|
|
|
|
|
2009-10-07 02:31:10 +08:00
|
|
|
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_vino vino,
|
|
|
|
struct ceph_file_layout *layout,
|
|
|
|
u64 off, u64 *plen,
|
|
|
|
u32 truncate_seq, u64 truncate_size,
|
2010-11-10 04:43:12 +08:00
|
|
|
struct page **pages, int nr_pages,
|
|
|
|
int page_align);
|
2009-10-07 02:31:10 +08:00
|
|
|
|
|
|
|
extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_vino vino,
|
|
|
|
struct ceph_file_layout *layout,
|
|
|
|
struct ceph_snap_context *sc,
|
|
|
|
u64 off, u64 len,
|
|
|
|
u32 truncate_seq, u64 truncate_size,
|
|
|
|
struct timespec *mtime,
|
2013-02-16 01:42:29 +08:00
|
|
|
struct page **pages, int nr_pages);
|
2009-10-07 02:31:10 +08:00
|
|
|
|
2011-03-22 06:07:16 +08:00
|
|
|
/* watch/notify events */
|
|
|
|
extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
|
|
|
|
void (*event_cb)(u64, u64, u8, void *),
|
2013-02-16 01:42:30 +08:00
|
|
|
void *data, struct ceph_osd_event **pevent);
|
2011-03-22 06:07:16 +08:00
|
|
|
extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
|
|
|
|
extern void ceph_osdc_put_event(struct ceph_osd_event *event);
|
2009-10-07 02:31:10 +08:00
|
|
|
#endif
|
|
|
|
|