Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: ceph: add missing spin_unlock at ceph_mdsc_build_path() ceph: fix SEEK_CUR, SEEK_SET regression crush: fix mapping calculation when force argument doesn't exist ceph: use i_ceph_lock instead of i_lock rbd: remove buggy rollback functionality rbd: return an error when an invalid header is read ceph: fix rasize reporting by ceph_show_options
This commit is contained in:
commit
653f42f6b6
|
@ -57,13 +57,6 @@ create_snap
|
|||
|
||||
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
|
||||
|
||||
rollback_snap
|
||||
|
||||
Rolls back data to the specified snapshot. This goes over the entire
|
||||
list of rados blocks and sends a rollback command to each.
|
||||
|
||||
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
|
||||
|
||||
snap_*
|
||||
|
||||
A directory per each snapshot
|
||||
|
|
|
@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
|
|||
|
||||
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
|
||||
static void rbd_dev_release(struct device *dev);
|
||||
static ssize_t rbd_snap_rollback(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t size);
|
||||
static ssize_t rbd_snap_add(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
|
@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
|
|||
u32 snap_count = le32_to_cpu(ondisk->snap_count);
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
init_rwsem(&header->snap_rwsem);
|
||||
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
|
||||
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
|
||||
|
@ -1355,32 +1355,6 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request sync osd rollback
|
||||
*/
|
||||
static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
|
||||
u64 snapid,
|
||||
const char *obj)
|
||||
{
|
||||
struct ceph_osd_req_op *ops;
|
||||
int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ops[0].snap.snapid = snapid;
|
||||
|
||||
ret = rbd_req_sync_op(dev, NULL,
|
||||
CEPH_NOSNAP,
|
||||
0,
|
||||
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
|
||||
ops,
|
||||
1, obj, 0, 0, NULL, NULL, NULL);
|
||||
|
||||
rbd_destroy_ops(ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request sync osd read
|
||||
*/
|
||||
|
@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
|
|||
goto out_dh;
|
||||
|
||||
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
if (rc < 0) {
|
||||
if (rc == -ENXIO) {
|
||||
pr_warning("unrecognized header format"
|
||||
" for image %s", rbd_dev->obj);
|
||||
}
|
||||
goto out_dh;
|
||||
}
|
||||
|
||||
if (snap_count != header->total_snaps) {
|
||||
snap_count = header->total_snaps;
|
||||
|
@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
|
|||
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
|
||||
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
|
||||
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
|
||||
static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
|
||||
|
||||
static struct attribute *rbd_attrs[] = {
|
||||
&dev_attr_size.attr,
|
||||
|
@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
|
|||
&dev_attr_current_snap.attr,
|
||||
&dev_attr_refresh.attr,
|
||||
&dev_attr_create_snap.attr,
|
||||
&dev_attr_rollback_snap.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -2424,64 +2401,6 @@ err_unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t rbd_snap_rollback(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct rbd_device *rbd_dev = dev_to_rbd(dev);
|
||||
int ret;
|
||||
u64 snapid;
|
||||
u64 cur_ofs;
|
||||
char *seg_name = NULL;
|
||||
char *snap_name = kmalloc(count + 1, GFP_KERNEL);
|
||||
ret = -ENOMEM;
|
||||
if (!snap_name)
|
||||
return ret;
|
||||
|
||||
/* parse snaps add command */
|
||||
snprintf(snap_name, count, "%s", buf);
|
||||
seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
|
||||
if (!seg_name)
|
||||
goto done;
|
||||
|
||||
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
|
||||
|
||||
ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
|
||||
if (ret < 0)
|
||||
goto done_unlock;
|
||||
|
||||
dout("snapid=%lld\n", snapid);
|
||||
|
||||
cur_ofs = 0;
|
||||
while (cur_ofs < rbd_dev->header.image_size) {
|
||||
cur_ofs += rbd_get_segment(&rbd_dev->header,
|
||||
rbd_dev->obj,
|
||||
cur_ofs, (u64)-1,
|
||||
seg_name, NULL);
|
||||
dout("seg_name=%s\n", seg_name);
|
||||
|
||||
ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
|
||||
if (ret < 0)
|
||||
pr_warning("could not roll back obj %s err=%d\n",
|
||||
seg_name, ret);
|
||||
}
|
||||
|
||||
ret = __rbd_update_snaps(rbd_dev);
|
||||
if (ret < 0)
|
||||
goto done_unlock;
|
||||
|
||||
ret = count;
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(&ctl_mutex);
|
||||
done:
|
||||
kfree(seg_name);
|
||||
kfree(snap_name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bus_attribute rbd_bus_attrs[] = {
|
||||
__ATTR(add, S_IWUSR, NULL, rbd_add),
|
||||
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
|
||||
|
|
|
@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
|
|||
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
|
||||
|
||||
/* dirty the head */
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_head_snapc == NULL)
|
||||
ci->i_head_snapc = ceph_get_snap_context(snapc);
|
||||
++ci->i_wrbuffer_ref_head;
|
||||
|
@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
|
|||
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
|
||||
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
|
||||
snapc, snapc->seq, snapc->num_snaps);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
/* now adjust page */
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
|
@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
|
|||
struct ceph_snap_context *snapc = NULL;
|
||||
struct ceph_cap_snap *capsnap = NULL;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
|
||||
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
|
||||
capsnap->context, capsnap->dirty_pages);
|
||||
|
@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
|
|||
dout(" head snapc %p has %d dirty pages\n",
|
||||
snapc, ci->i_wrbuffer_ref_head);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return snapc;
|
||||
}
|
||||
|
||||
|
|
187
fs/ceph/caps.c
187
fs/ceph/caps.c
|
@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
|
|||
/*
|
||||
* Find ceph_cap for given mds, if any.
|
||||
*
|
||||
* Called with i_lock held.
|
||||
* Called with i_ceph_lock held.
|
||||
*/
|
||||
static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
|
||||
{
|
||||
|
@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
|
|||
{
|
||||
struct ceph_cap *cap;
|
||||
|
||||
spin_lock(&ci->vfs_inode.i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = __get_cap_for_mds(ci, mds);
|
||||
spin_unlock(&ci->vfs_inode.i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return cap;
|
||||
}
|
||||
|
||||
|
@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
|
|||
|
||||
int ceph_get_cap_mds(struct inode *inode)
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int mds;
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
mds = __ceph_get_cap_mds(ceph_inode(inode));
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return mds;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called under i_lock.
|
||||
* Called under i_ceph_lock.
|
||||
*/
|
||||
static void __insert_cap_node(struct ceph_inode_info *ci,
|
||||
struct ceph_cap *new)
|
||||
|
@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
|
|||
*
|
||||
* If I_FLUSH is set, leave the inode at the front of the list.
|
||||
*
|
||||
* Caller holds i_lock
|
||||
* Caller holds i_ceph_lock
|
||||
* -> we take mdsc->cap_delay_lock
|
||||
*/
|
||||
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
|
||||
|
@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
|
|||
/*
|
||||
* Cancel delayed work on cap.
|
||||
*
|
||||
* Caller must hold i_lock.
|
||||
* Caller must hold i_ceph_lock.
|
||||
*/
|
||||
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
|
||||
struct ceph_inode_info *ci)
|
||||
|
@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
|
|||
wanted |= ceph_caps_for_mode(fmode);
|
||||
|
||||
retry:
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = __get_cap_for_mds(ci, mds);
|
||||
if (!cap) {
|
||||
if (new_cap) {
|
||||
cap = new_cap;
|
||||
new_cap = NULL;
|
||||
} else {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
new_cap = get_cap(mdsc, caps_reservation);
|
||||
if (new_cap == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -625,7 +626,7 @@ retry:
|
|||
|
||||
if (fmode >= 0)
|
||||
__ceph_get_fmode(ci, fmode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
wake_up_all(&ci->i_cap_wq);
|
||||
return 0;
|
||||
}
|
||||
|
@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
|
|||
struct rb_node *p;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
|
||||
cap = rb_entry(p, struct ceph_cap, ci_node);
|
||||
if (__cap_is_valid(cap) &&
|
||||
|
@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout("ceph_caps_revoking %p %s = %d\n", inode,
|
||||
ceph_cap_string(mask), ret);
|
||||
return ret;
|
||||
|
@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
|
|||
}
|
||||
|
||||
/*
|
||||
* called under i_lock
|
||||
* called under i_ceph_lock
|
||||
*/
|
||||
static int __ceph_is_any_caps(struct ceph_inode_info *ci)
|
||||
{
|
||||
|
@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
|
|||
/*
|
||||
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
|
||||
*
|
||||
* caller should hold i_lock.
|
||||
* caller should hold i_ceph_lock.
|
||||
* caller will not hold session s_mutex if called from destroy_inode.
|
||||
*/
|
||||
void __ceph_remove_cap(struct ceph_cap *cap)
|
||||
|
@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
|
|||
|
||||
/*
|
||||
* Queue cap releases when an inode is dropped from our cache. Since
|
||||
* inode is about to be destroyed, there is no need for i_lock.
|
||||
* inode is about to be destroyed, there is no need for i_ceph_lock.
|
||||
*/
|
||||
void ceph_queue_caps_release(struct inode *inode)
|
||||
{
|
||||
|
@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
|
|||
|
||||
/*
|
||||
* Send a cap msg on the given inode. Update our caps state, then
|
||||
* drop i_lock and send the message.
|
||||
* drop i_ceph_lock and send the message.
|
||||
*
|
||||
* Make note of max_size reported/requested from mds, revoked caps
|
||||
* that have now been implemented.
|
||||
|
@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
|
|||
* Return non-zero if delayed release, or we experienced an error
|
||||
* such that the caller should requeue + retry later.
|
||||
*
|
||||
* called with i_lock, then drops it.
|
||||
* called with i_ceph_lock, then drops it.
|
||||
* caller should hold snap_rwsem (read), s_mutex.
|
||||
*/
|
||||
static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
|
||||
int op, int used, int want, int retain, int flushing,
|
||||
unsigned *pflush_tid)
|
||||
__releases(cap->ci->vfs_inode->i_lock)
|
||||
__releases(cap->ci->i_ceph_lock)
|
||||
{
|
||||
struct ceph_inode_info *ci = cap->ci;
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
|
@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
|
|||
xattr_version = ci->i_xattrs.version;
|
||||
}
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
|
||||
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
|
||||
|
@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
|
|||
* Unless @again is true, skip cap_snaps that were already sent to
|
||||
* the MDS (i.e., during this session).
|
||||
*
|
||||
* Called under i_lock. Takes s_mutex as needed.
|
||||
* Called under i_ceph_lock. Takes s_mutex as needed.
|
||||
*/
|
||||
void __ceph_flush_snaps(struct ceph_inode_info *ci,
|
||||
struct ceph_mds_session **psession,
|
||||
int again)
|
||||
__releases(ci->vfs_inode->i_lock)
|
||||
__acquires(ci->vfs_inode->i_lock)
|
||||
__releases(ci->i_ceph_lock)
|
||||
__acquires(ci->i_ceph_lock)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
int mds;
|
||||
|
@ -1261,7 +1262,7 @@ retry:
|
|||
session = NULL;
|
||||
}
|
||||
if (!session) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
mutex_lock(&mdsc->mutex);
|
||||
session = __ceph_lookup_mds_session(mdsc, mds);
|
||||
mutex_unlock(&mdsc->mutex);
|
||||
|
@ -1275,7 +1276,7 @@ retry:
|
|||
* deletion or migration. retry, and we'll
|
||||
* get a better @mds value next time.
|
||||
*/
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -1285,7 +1286,7 @@ retry:
|
|||
list_del_init(&capsnap->flushing_item);
|
||||
list_add_tail(&capsnap->flushing_item,
|
||||
&session->s_cap_snaps_flushing);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
|
||||
inode, capsnap, capsnap->follows, capsnap->flush_tid);
|
||||
|
@ -1302,7 +1303,7 @@ retry:
|
|||
next_follows = capsnap->follows + 1;
|
||||
ceph_put_cap_snap(capsnap);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -1322,11 +1323,9 @@ out:
|
|||
|
||||
static void ceph_flush_snaps(struct ceph_inode_info *ci)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__ceph_flush_snaps(ci, NULL, 0);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
|
|||
* Add dirty inode to the flushing list. Assigned a seq number so we
|
||||
* can wait for caps to flush without starving.
|
||||
*
|
||||
* Called under i_lock.
|
||||
* Called under i_ceph_lock.
|
||||
*/
|
||||
static int __mark_caps_flushing(struct inode *inode,
|
||||
struct ceph_mds_session *session)
|
||||
|
@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
u32 invalidating_gen = ci->i_rdcache_gen;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
invalidate_mapping_pages(&inode->i_data, 0, -1);
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
if (inode->i_data.nrpages == 0 &&
|
||||
invalidating_gen == ci->i_rdcache_gen) {
|
||||
|
@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
|
|||
if (mdsc->stopping)
|
||||
is_delayed = 1;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
if (ci->i_ceph_flags & CEPH_I_FLUSH)
|
||||
flags |= CHECK_CAPS_FLUSH;
|
||||
|
@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
|
|||
__ceph_flush_snaps(ci, &session, 0);
|
||||
goto retry_locked;
|
||||
retry:
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
retry_locked:
|
||||
file_wanted = __ceph_caps_file_wanted(ci);
|
||||
used = __ceph_caps_used(ci);
|
||||
|
@ -1634,7 +1633,7 @@ ack:
|
|||
if (mutex_trylock(&session->s_mutex) == 0) {
|
||||
dout("inverting session/ino locks on %p\n",
|
||||
session);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (took_snap_rwsem) {
|
||||
up_read(&mdsc->snap_rwsem);
|
||||
took_snap_rwsem = 0;
|
||||
|
@ -1648,7 +1647,7 @@ ack:
|
|||
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
|
||||
dout("inverting snap/in locks on %p\n",
|
||||
inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
down_read(&mdsc->snap_rwsem);
|
||||
took_snap_rwsem = 1;
|
||||
goto retry;
|
||||
|
@ -1664,10 +1663,10 @@ ack:
|
|||
mds = cap->mds; /* remember mds, so we don't repeat */
|
||||
sent++;
|
||||
|
||||
/* __send_cap drops i_lock */
|
||||
/* __send_cap drops i_ceph_lock */
|
||||
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
|
||||
retain, flushing, NULL);
|
||||
goto retry; /* retake i_lock and restart our cap scan. */
|
||||
goto retry; /* retake i_ceph_lock and restart our cap scan. */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1681,7 +1680,7 @@ ack:
|
|||
else if (!is_delayed || force_requeue)
|
||||
__cap_delay_requeue(mdsc, ci);
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (queue_invalidate)
|
||||
ceph_queue_invalidate(inode);
|
||||
|
@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
|
|||
int flushing = 0;
|
||||
|
||||
retry:
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
|
||||
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
|
||||
goto out;
|
||||
|
@ -1716,7 +1715,7 @@ retry:
|
|||
int delayed;
|
||||
|
||||
if (!session) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
session = cap->session;
|
||||
mutex_lock(&session->s_mutex);
|
||||
goto retry;
|
||||
|
@ -1727,18 +1726,18 @@ retry:
|
|||
|
||||
flushing = __mark_caps_flushing(inode, session);
|
||||
|
||||
/* __send_cap drops i_lock */
|
||||
/* __send_cap drops i_ceph_lock */
|
||||
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
|
||||
cap->issued | cap->implemented, flushing,
|
||||
flush_tid);
|
||||
if (!delayed)
|
||||
goto out_unlocked;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__cap_delay_requeue(mdsc, ci);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
out_unlocked:
|
||||
if (session && unlock_session)
|
||||
mutex_unlock(&session->s_mutex);
|
||||
|
@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int i, ret = 1;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
for (i = 0; i < CEPH_CAP_BITS; i++)
|
||||
if ((ci->i_flushing_caps & (1 << i)) &&
|
||||
ci->i_cap_flush_tid[i] <= tid) {
|
||||
|
@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
|
|||
ret = 0;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
struct ceph_mds_client *mdsc =
|
||||
ceph_sb_to_client(inode->i_sb)->mdsc;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (__ceph_caps_dirty(ci))
|
||||
__cap_delay_requeue_front(mdsc, ci);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
|
|||
struct inode *inode = &ci->vfs_inode;
|
||||
struct ceph_cap *cap;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = ci->i_auth_cap;
|
||||
if (cap && cap->session == session) {
|
||||
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
|
||||
|
@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
|
|||
pr_err("%p auth cap %p not mds%d ???\n", inode,
|
||||
cap, session->s_mds);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
|
|||
struct ceph_cap *cap;
|
||||
int delayed = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = ci->i_auth_cap;
|
||||
if (cap && cap->session == session) {
|
||||
dout("kick_flushing_caps %p cap %p %s\n", inode,
|
||||
|
@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
|
|||
cap->issued | cap->implemented,
|
||||
ci->i_flushing_caps, NULL);
|
||||
if (delayed) {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__cap_delay_requeue(mdsc, ci);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
} else {
|
||||
pr_err("%p auth cap %p not mds%d ???\n", inode,
|
||||
cap, session->s_mds);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
|
|||
struct ceph_cap *cap;
|
||||
int delayed = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = ci->i_auth_cap;
|
||||
dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
|
||||
ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
|
||||
|
@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
|
|||
cap->issued | cap->implemented,
|
||||
ci->i_flushing_caps, NULL);
|
||||
if (delayed) {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__cap_delay_requeue(mdsc, ci);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
} else {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
|
|||
* Take references to capabilities we hold, so that we don't release
|
||||
* them to the MDS prematurely.
|
||||
*
|
||||
* Protected by i_lock.
|
||||
* Protected by i_ceph_lock.
|
||||
*/
|
||||
static void __take_cap_refs(struct ceph_inode_info *ci, int got)
|
||||
{
|
||||
|
@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
|
|||
|
||||
dout("get_cap_refs %p need %s want %s\n", inode,
|
||||
ceph_cap_string(need), ceph_cap_string(want));
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
/* make sure file is actually open */
|
||||
file_wanted = __ceph_caps_file_wanted(ci);
|
||||
|
@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
|
|||
ceph_cap_string(have), ceph_cap_string(need));
|
||||
}
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout("get_cap_refs %p ret %d got %s\n", inode,
|
||||
ret, ceph_cap_string(*got));
|
||||
return ret;
|
||||
|
@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
|
|||
int check = 0;
|
||||
|
||||
/* do we need to explicitly request a larger max_size? */
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if ((endoff >= ci->i_max_size ||
|
||||
endoff > (inode->i_size << 1)) &&
|
||||
endoff > ci->i_wanted_max_size) {
|
||||
|
@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
|
|||
ci->i_wanted_max_size = endoff;
|
||||
check = 1;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (check)
|
||||
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
|
||||
}
|
||||
|
@ -2140,9 +2139,9 @@ retry:
|
|||
*/
|
||||
void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
|
||||
{
|
||||
spin_lock(&ci->vfs_inode.i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__take_cap_refs(ci, caps);
|
||||
spin_unlock(&ci->vfs_inode.i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
|
|||
int last = 0, put = 0, flushsnaps = 0, wake = 0;
|
||||
struct ceph_cap_snap *capsnap;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (had & CEPH_CAP_PIN)
|
||||
--ci->i_pin_ref;
|
||||
if (had & CEPH_CAP_FILE_RD)
|
||||
|
@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
|
|||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
|
||||
last ? " last" : "", put ? " put" : "");
|
||||
|
@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
|
|||
int found = 0;
|
||||
struct ceph_cap_snap *capsnap = NULL;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_wrbuffer_ref -= nr;
|
||||
last = !ci->i_wrbuffer_ref;
|
||||
|
||||
|
@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (last) {
|
||||
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
|
||||
|
@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
|
|||
* Handle a cap GRANT message from the MDS. (Note that a GRANT may
|
||||
* actually be a revocation if it specifies a smaller cap set.)
|
||||
*
|
||||
* caller holds s_mutex and i_lock, we drop both.
|
||||
* caller holds s_mutex and i_ceph_lock, we drop both.
|
||||
*
|
||||
* return value:
|
||||
* 0 - ok
|
||||
|
@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
|
|||
struct ceph_mds_session *session,
|
||||
struct ceph_cap *cap,
|
||||
struct ceph_buffer *xattr_buf)
|
||||
__releases(inode->i_lock)
|
||||
__releases(ci->i_ceph_lock)
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int mds = session->s_mds;
|
||||
|
@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
|
|||
}
|
||||
BUG_ON(cap->issued & ~cap->implemented);
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (writeback)
|
||||
/*
|
||||
* queue inode for writeback: we can't actually call
|
||||
|
@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
|
|||
struct ceph_mds_caps *m,
|
||||
struct ceph_mds_session *session,
|
||||
struct ceph_cap *cap)
|
||||
__releases(inode->i_lock)
|
||||
__releases(ci->i_ceph_lock)
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
|
||||
|
@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
|
|||
wake_up_all(&ci->i_cap_wq);
|
||||
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (drop)
|
||||
iput(inode);
|
||||
}
|
||||
|
@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
|
|||
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
|
||||
inode, ci, session->s_mds, follows);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
|
||||
if (capsnap->follows == follows) {
|
||||
if (capsnap->flush_tid != flush_tid) {
|
||||
|
@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
|
|||
capsnap, capsnap->follows);
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (drop)
|
||||
iput(inode);
|
||||
}
|
||||
|
@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
|
|||
static void handle_cap_trunc(struct inode *inode,
|
||||
struct ceph_mds_caps *trunc,
|
||||
struct ceph_mds_session *session)
|
||||
__releases(inode->i_lock)
|
||||
__releases(ci->i_ceph_lock)
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int mds = session->s_mds;
|
||||
|
@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
|
|||
inode, mds, seq, truncate_size, truncate_seq);
|
||||
queue_trunc = ceph_fill_file_size(inode, issued,
|
||||
truncate_seq, truncate_size, size);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (queue_trunc)
|
||||
ceph_queue_vmtruncate(inode);
|
||||
|
@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
|
|||
dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
|
||||
inode, ci, mds, mseq);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
/* make sure we haven't seen a higher mseq */
|
||||
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
|
||||
|
@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
|
|||
}
|
||||
/* else, we already released it */
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
|
|||
up_read(&mdsc->snap_rwsem);
|
||||
|
||||
/* make sure we re-request max_size, if necessary */
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_requested_max_size = 0;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
|||
struct ceph_mds_client *mdsc = session->s_mdsc;
|
||||
struct super_block *sb = mdsc->fsc->sb;
|
||||
struct inode *inode;
|
||||
struct ceph_inode_info *ci;
|
||||
struct ceph_cap *cap;
|
||||
struct ceph_mds_caps *h;
|
||||
int mds = session->s_mds;
|
||||
|
@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
|||
|
||||
/* lookup ino */
|
||||
inode = ceph_find_inode(sb, vino);
|
||||
ci = ceph_inode(inode);
|
||||
dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
|
||||
vino.snap, inode);
|
||||
if (!inode) {
|
||||
|
@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
|||
}
|
||||
|
||||
/* the rest require a cap */
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = __get_cap_for_mds(ceph_inode(inode), mds);
|
||||
if (!cap) {
|
||||
dout(" no cap on %p ino %llx.%llx from mds%d\n",
|
||||
inode, ceph_ino(inode), ceph_snap(inode), mds);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
goto flush_cap_releases;
|
||||
}
|
||||
|
||||
/* note that each of these drops i_lock for us */
|
||||
/* note that each of these drops i_ceph_lock for us */
|
||||
switch (op) {
|
||||
case CEPH_CAP_OP_REVOKE:
|
||||
case CEPH_CAP_OP_GRANT:
|
||||
|
@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
|||
break;
|
||||
|
||||
default:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
|
||||
ceph_cap_op_name(op));
|
||||
}
|
||||
|
@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
|
|||
struct inode *inode = &ci->vfs_inode;
|
||||
int last = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
|
||||
ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
|
||||
BUG_ON(ci->i_nr_by_mode[fmode] == 0);
|
||||
if (--ci->i_nr_by_mode[fmode] == 0)
|
||||
last++;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (last && ci->i_vino.snap == CEPH_NOSNAP)
|
||||
ceph_check_caps(ci, 0, NULL);
|
||||
|
@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
|
|||
int used, dirty;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
used = __ceph_caps_used(ci);
|
||||
dirty = __ceph_caps_dirty(ci);
|
||||
|
||||
|
@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
|
|||
inode, cap, ceph_cap_string(cap->issued));
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
|
|||
|
||||
/*
|
||||
* force an record for the directory caps if we have a dentry lease.
|
||||
* this is racy (can't take i_lock and d_lock together), but it
|
||||
* this is racy (can't take i_ceph_lock and d_lock together), but it
|
||||
* doesn't have to be perfect; the mds will revoke anything we don't
|
||||
* release.
|
||||
*/
|
||||
|
|
|
@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
}
|
||||
|
||||
/* can we use the dcache? */
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if ((filp->f_pos == 2 || fi->dentry) &&
|
||||
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
|
||||
ceph_snap(inode) != CEPH_SNAPDIR &&
|
||||
ceph_dir_test_complete(inode) &&
|
||||
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
err = __dcache_readdir(filp, dirent, filldir);
|
||||
if (err != -EAGAIN)
|
||||
return err;
|
||||
} else {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
if (fi->dentry) {
|
||||
err = note_last_dentry(fi, fi->dentry->d_name.name,
|
||||
|
@ -428,12 +428,12 @@ more:
|
|||
* were released during the whole readdir, and we should have
|
||||
* the complete dir contents in our cache.
|
||||
*/
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_release_count == fi->dir_release_count) {
|
||||
ceph_dir_set_complete(inode);
|
||||
ci->i_max_offset = filp->f_pos;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
dout("readdir %p filp %p done.\n", inode, filp);
|
||||
return 0;
|
||||
|
@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
|
|||
struct ceph_inode_info *ci = ceph_inode(dir);
|
||||
struct ceph_dentry_info *di = ceph_dentry(dentry);
|
||||
|
||||
spin_lock(&dir->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
|
||||
if (strncmp(dentry->d_name.name,
|
||||
fsc->mount_options->snapdir_name,
|
||||
|
@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
|
|||
!is_root_ceph_dentry(dir, dentry) &&
|
||||
ceph_dir_test_complete(dir) &&
|
||||
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
|
||||
spin_unlock(&dir->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout(" dir %p complete, -ENOENT\n", dir);
|
||||
d_add(dentry, NULL);
|
||||
di->lease_shared_gen = ci->i_shared_gen;
|
||||
return NULL;
|
||||
}
|
||||
spin_unlock(&dir->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
op = ceph_snap(dir) == CEPH_SNAPDIR ?
|
||||
|
@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (inode->i_nlink == 1) {
|
||||
drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
|
||||
ci->i_ceph_flags |= CEPH_I_NODELAY;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return drop;
|
||||
}
|
||||
|
||||
|
@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
|
|||
struct ceph_dentry_info *di = ceph_dentry(dentry);
|
||||
int valid = 0;
|
||||
|
||||
spin_lock(&dir->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_shared_gen == di->lease_shared_gen)
|
||||
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
|
||||
spin_unlock(&dir->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
|
||||
dir, (unsigned)ci->i_shared_gen, dentry,
|
||||
(unsigned)di->lease_shared_gen, valid);
|
||||
|
|
|
@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
|
|||
|
||||
/* trivially open snapdir */
|
||||
if (ceph_snap(inode) == CEPH_SNAPDIR) {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__ceph_get_fmode(ci, fmode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return ceph_init_file(inode, file, fmode);
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
|
|||
* write) or any MDS (for read). Update wanted set
|
||||
* asynchronously.
|
||||
*/
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (__ceph_is_any_real_caps(ci) &&
|
||||
(((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
|
||||
int mds_wanted = __ceph_caps_mds_wanted(ci);
|
||||
|
@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
|
|||
inode, fmode, ceph_cap_string(wanted),
|
||||
ceph_cap_string(issued));
|
||||
__ceph_get_fmode(ci, fmode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
/* adjust wanted? */
|
||||
if ((issued & wanted) != wanted &&
|
||||
|
@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
|
|||
} else if (ceph_snap(inode) != CEPH_NOSNAP &&
|
||||
(ci->i_snap_caps & wanted) == wanted) {
|
||||
__ceph_get_fmode(ci, fmode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return ceph_init_file(inode, file, fmode);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
|
||||
req = prepare_open_request(inode->i_sb, flags, 0);
|
||||
|
@ -743,9 +743,9 @@ retry_snap:
|
|||
*/
|
||||
int dirty;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
ceph_put_cap_refs(ci, got);
|
||||
|
||||
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
|
||||
|
@ -764,9 +764,9 @@ retry_snap:
|
|||
|
||||
if (ret >= 0) {
|
||||
int dirty;
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
}
|
||||
|
@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
|
|||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
__ceph_do_pending_vmtruncate(inode);
|
||||
if (origin != SEEK_CUR || origin != SEEK_SET) {
|
||||
|
||||
if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
|
||||
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
|
||||
if (ret < 0) {
|
||||
offset = ret;
|
||||
|
|
|
@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
|||
|
||||
dout("alloc_inode %p\n", &ci->vfs_inode);
|
||||
|
||||
spin_lock_init(&ci->i_ceph_lock);
|
||||
|
||||
ci->i_version = 0;
|
||||
ci->i_time_warp_seq = 0;
|
||||
ci->i_ceph_flags = 0;
|
||||
|
@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
|
|||
iinfo->xattr_len);
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
/*
|
||||
* provided version will be odd if inode value is projected,
|
||||
|
@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
|
|||
char *sym;
|
||||
|
||||
BUG_ON(symlen != inode->i_size);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
err = -ENOMEM;
|
||||
sym = kmalloc(symlen+1, GFP_NOFS);
|
||||
|
@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
|
|||
memcpy(sym, iinfo->symlink, symlen);
|
||||
sym[symlen] = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (!ci->i_symlink)
|
||||
ci->i_symlink = sym;
|
||||
else
|
||||
|
@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
|
|||
}
|
||||
|
||||
no_change:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
/* queue truncate if we saw i_size decrease */
|
||||
if (queue_trunc)
|
||||
|
@ -750,13 +752,13 @@ no_change:
|
|||
info->cap.flags,
|
||||
caps_reservation);
|
||||
} else {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout(" %p got snap_caps %s\n", inode,
|
||||
ceph_cap_string(le32_to_cpu(info->cap.caps)));
|
||||
ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
|
||||
if (cap_fmode >= 0)
|
||||
__ceph_get_fmode(ci, cap_fmode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
} else if (cap_fmode >= 0) {
|
||||
pr_warning("mds issued no caps on %llx.%llx\n",
|
||||
|
@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
|
|||
{
|
||||
struct dentry *dir = dn->d_parent;
|
||||
struct inode *inode = dir->d_inode;
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_dentry_info *di;
|
||||
|
||||
BUG_ON(!inode);
|
||||
|
||||
di = ceph_dentry(dn);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (!ceph_dir_test_complete(inode)) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return;
|
||||
}
|
||||
di->offset = ceph_inode(inode)->i_max_offset++;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
spin_lock(&dir->d_lock);
|
||||
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
|
||||
|
@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
|
||||
inode->i_size = size;
|
||||
inode->i_blocks = (size + (1 << 9) - 1) >> 9;
|
||||
|
@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
|
|||
(ci->i_reported_size << 1) < ci->i_max_size)
|
||||
ret = 1;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1376,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
|
|||
u32 orig_gen;
|
||||
int check = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout("invalidate_pages %p gen %d revoking %d\n", inode,
|
||||
ci->i_rdcache_gen, ci->i_rdcache_revoking);
|
||||
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
|
||||
/* nevermind! */
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
goto out;
|
||||
}
|
||||
orig_gen = ci->i_rdcache_gen;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (orig_gen == ci->i_rdcache_gen &&
|
||||
orig_gen == ci->i_rdcache_revoking) {
|
||||
dout("invalidate_pages %p gen %d successful\n", inode,
|
||||
|
@ -1401,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
|
|||
inode, orig_gen, ci->i_rdcache_gen,
|
||||
ci->i_rdcache_revoking);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (check)
|
||||
ceph_check_caps(ci, 0, NULL);
|
||||
|
@ -1460,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
|
|||
int wrbuffer_refs, wake = 0;
|
||||
|
||||
retry:
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_truncate_pending == 0) {
|
||||
dout("__do_pending_vmtruncate %p none pending\n", inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1474,7 +1477,7 @@ retry:
|
|||
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
|
||||
dout("__do_pending_vmtruncate %p flushing snaps first\n",
|
||||
inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
filemap_write_and_wait_range(&inode->i_data, 0,
|
||||
inode->i_sb->s_maxbytes);
|
||||
goto retry;
|
||||
|
@ -1484,15 +1487,15 @@ retry:
|
|||
wrbuffer_refs = ci->i_wrbuffer_ref;
|
||||
dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
|
||||
ci->i_truncate_pending, to);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
truncate_inode_pages(inode->i_mapping, to);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_truncate_pending--;
|
||||
if (ci->i_truncate_pending == 0)
|
||||
wake = 1;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (wrbuffer_refs == 0)
|
||||
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
|
||||
|
@ -1547,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
issued = __ceph_caps_issued(ci, NULL);
|
||||
dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
|
||||
|
||||
|
@ -1695,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
}
|
||||
|
||||
release &= issued;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (inode_dirty_flags)
|
||||
__mark_inode_dirty(inode, inode_dirty_flags);
|
||||
|
@ -1717,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
__ceph_do_pending_vmtruncate(inode);
|
||||
return err;
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
ceph_mdsc_put_request(req);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
|
||||
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_nr_by_mode[fi->fmode]--;
|
||||
fi->fmode |= CEPH_FILE_MODE_LAZY;
|
||||
ci->i_nr_by_mode[fi->fmode]++;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout("ioctl_layzio: file %p marked lazy\n", file);
|
||||
|
||||
ceph_check_caps(ci, 0, NULL);
|
||||
|
|
|
@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
|
|||
}
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = NULL;
|
||||
if (mode == USE_AUTH_MDS)
|
||||
cap = ci->i_auth_cap;
|
||||
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
|
||||
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
|
||||
if (!cap) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
goto random;
|
||||
}
|
||||
mds = cap->session->s_mds;
|
||||
dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
|
||||
inode, ceph_vinop(inode), mds,
|
||||
cap == ci->i_auth_cap ? "auth " : "", cap);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return mds;
|
||||
|
||||
random:
|
||||
|
@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
|
||||
dout("removing cap %p, ci is %p, inode is %p\n",
|
||||
cap, ci, &ci->vfs_inode);
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__ceph_remove_cap(cap);
|
||||
if (!__ceph_is_any_real_caps(ci)) {
|
||||
struct ceph_mds_client *mdsc =
|
||||
|
@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
}
|
||||
spin_unlock(&mdsc->cap_dirty_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
while (drop--)
|
||||
iput(inode);
|
||||
return 0;
|
||||
|
@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
|
||||
wake_up_all(&ci->i_cap_wq);
|
||||
if (arg) {
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_wanted_max_size = 0;
|
||||
ci->i_requested_max_size = 0;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
|
|||
if (session->s_trim_caps <= 0)
|
||||
return -1;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
mine = cap->issued | cap->implemented;
|
||||
used = __ceph_caps_used(ci);
|
||||
oissued = __ceph_caps_issued_other(ci, cap);
|
||||
|
@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
|
|||
__ceph_remove_cap(cap);
|
||||
} else {
|
||||
/* try to drop referring dentries */
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
d_prune_aliases(inode);
|
||||
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
|
||||
inode, cap, atomic_read(&inode->i_count));
|
||||
|
@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
|
|||
i_flushing_item);
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_cap_flush_seq <= want_flush_seq) {
|
||||
dout("check_cap_flush still flushing %p "
|
||||
"seq %lld <= %lld to mds%d\n", inode,
|
||||
|
@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
|
|||
session->s_mds);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
mutex_unlock(&session->s_mutex);
|
||||
ceph_put_mds_session(session);
|
||||
|
@ -1495,6 +1495,7 @@ retry:
|
|||
pos, temp);
|
||||
} else if (stop_on_nosnap && inode &&
|
||||
ceph_snap(inode) == CEPH_NOSNAP) {
|
||||
spin_unlock(&temp->d_lock);
|
||||
break;
|
||||
} else {
|
||||
pos -= temp->d_name.len;
|
||||
|
@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
|
||||
dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ceph_dir_clear_complete(inode);
|
||||
ci->i_release_count++;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (req->r_dentry)
|
||||
ceph_invalidate_dentry_lease(req->r_dentry);
|
||||
|
@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
if (err)
|
||||
goto out_free;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap->seq = 0; /* reset cap seq */
|
||||
cap->issue_seq = 0; /* and issue_seq */
|
||||
|
||||
|
@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
rec.v1.pathbase = cpu_to_le64(pathbase);
|
||||
reclen = sizeof(rec.v1);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
if (recon_state->flock) {
|
||||
int num_fcntl_locks, num_flock_locks;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
*
|
||||
* mdsc->snap_rwsem
|
||||
*
|
||||
* inode->i_lock
|
||||
* ci->i_ceph_lock
|
||||
* mdsc->snap_flush_lock
|
||||
* mdsc->cap_delay_lock
|
||||
*
|
||||
|
|
|
@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
used = __ceph_caps_used(ci);
|
||||
dirty = __ceph_caps_dirty(ci);
|
||||
|
||||
|
@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
|||
kfree(capsnap);
|
||||
}
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
|||
*
|
||||
* If capsnap can now be flushed, add to snap_flush list, and return 1.
|
||||
*
|
||||
* Caller must hold i_lock.
|
||||
* Caller must hold i_ceph_lock.
|
||||
*/
|
||||
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
|
||||
struct ceph_cap_snap *capsnap)
|
||||
|
@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
|
|||
inode = &ci->vfs_inode;
|
||||
ihold(inode);
|
||||
spin_unlock(&mdsc->snap_flush_lock);
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__ceph_flush_snaps(ci, &session, 0);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
iput(inode);
|
||||
spin_lock(&mdsc->snap_flush_lock);
|
||||
}
|
||||
|
@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
|
|||
continue;
|
||||
ci = ceph_inode(inode);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (!ci->i_snap_realm)
|
||||
goto skip_inode;
|
||||
/*
|
||||
|
@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
|
|||
oldrealm = ci->i_snap_realm;
|
||||
ci->i_snap_realm = realm;
|
||||
spin_unlock(&realm->inodes_with_caps_lock);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
ceph_get_snap_realm(mdsc, realm);
|
||||
ceph_put_snap_realm(mdsc, oldrealm);
|
||||
|
@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
|
|||
continue;
|
||||
|
||||
skip_inode:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
iput(inode);
|
||||
}
|
||||
|
||||
|
|
|
@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
|
|||
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
|
||||
seq_printf(m, ",rsize=%d", fsopt->rsize);
|
||||
if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
|
||||
seq_printf(m, ",rasize=%d", fsopt->rsize);
|
||||
seq_printf(m, ",rasize=%d", fsopt->rasize);
|
||||
if (fsopt->congestion_kb != default_congestion_kb())
|
||||
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
|
||||
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
|
||||
|
|
|
@ -220,7 +220,7 @@ struct ceph_dentry_info {
|
|||
* The locking for D_COMPLETE is a bit odd:
|
||||
* - we can clear it at almost any time (see ceph_d_prune)
|
||||
* - it is only meaningful if:
|
||||
* - we hold dir inode i_lock
|
||||
* - we hold dir inode i_ceph_lock
|
||||
* - we hold dir FILE_SHARED caps
|
||||
* - the dentry D_COMPLETE is set
|
||||
*/
|
||||
|
@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
|
|||
struct ceph_inode_info {
|
||||
struct ceph_vino i_vino; /* ceph ino + snap */
|
||||
|
||||
spinlock_t i_ceph_lock;
|
||||
|
||||
u64 i_version;
|
||||
u32 i_time_warp_seq;
|
||||
|
||||
|
@ -271,7 +273,7 @@ struct ceph_inode_info {
|
|||
|
||||
struct ceph_inode_xattrs_info i_xattrs;
|
||||
|
||||
/* capabilities. protected _both_ by i_lock and cap->session's
|
||||
/* capabilities. protected _both_ by i_ceph_lock and cap->session's
|
||||
* s_mutex. */
|
||||
struct rb_root i_caps; /* cap list */
|
||||
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
|
||||
|
@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
|
|||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_ceph_flags &= ~mask;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
static inline void ceph_i_set(struct inode *inode, unsigned mask)
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
ci->i_ceph_flags |= mask;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
}
|
||||
|
||||
static inline bool ceph_i_test(struct inode *inode, unsigned mask)
|
||||
|
@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
|
|||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
bool r;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
r = (ci->i_ceph_flags & mask) == mask;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
|
|||
static inline int ceph_caps_issued(struct ceph_inode_info *ci)
|
||||
{
|
||||
int issued;
|
||||
spin_lock(&ci->vfs_inode.i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
issued = __ceph_caps_issued(ci, NULL);
|
||||
spin_unlock(&ci->vfs_inode.i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return issued;
|
||||
}
|
||||
|
||||
|
@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
|
|||
int touch)
|
||||
{
|
||||
int r;
|
||||
spin_lock(&ci->vfs_inode.i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
r = __ceph_caps_issued_mask(ci, mask, touch);
|
||||
spin_unlock(&ci->vfs_inode.i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
|
|||
extern void __ceph_remove_cap(struct ceph_cap *cap);
|
||||
static inline void ceph_remove_cap(struct ceph_cap *cap)
|
||||
{
|
||||
struct inode *inode = &cap->ci->vfs_inode;
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&cap->ci->i_ceph_lock);
|
||||
__ceph_remove_cap(cap);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&cap->ci->i_ceph_lock);
|
||||
}
|
||||
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
|
||||
struct ceph_cap *cap);
|
||||
|
|
|
@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
|
|||
}
|
||||
|
||||
static int __build_xattrs(struct inode *inode)
|
||||
__releases(inode->i_lock)
|
||||
__acquires(inode->i_lock)
|
||||
__releases(ci->i_ceph_lock)
|
||||
__acquires(ci->i_ceph_lock)
|
||||
{
|
||||
u32 namelen;
|
||||
u32 numattr = 0;
|
||||
|
@ -372,7 +372,7 @@ start:
|
|||
end = p + ci->i_xattrs.blob->vec.iov_len;
|
||||
ceph_decode_32_safe(&p, end, numattr, bad);
|
||||
xattr_version = ci->i_xattrs.version;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
|
||||
GFP_NOFS);
|
||||
|
@ -387,7 +387,7 @@ start:
|
|||
goto bad_lock;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_xattrs.version != xattr_version) {
|
||||
/* lost a race, retry */
|
||||
for (i = 0; i < numattr; i++)
|
||||
|
@ -418,7 +418,7 @@ start:
|
|||
|
||||
return err;
|
||||
bad_lock:
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
bad:
|
||||
if (xattrs) {
|
||||
for (i = 0; i < numattr; i++)
|
||||
|
@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
|
|||
if (vxattrs)
|
||||
vxattr = ceph_match_vxattr(vxattrs, name);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
|
||||
ci->i_xattrs.version, ci->i_xattrs.index_version);
|
||||
|
||||
|
@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
|
|||
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
|
||||
goto get_xattr;
|
||||
} else {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
/* get xattrs from mds (if we don't already have them) */
|
||||
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
if (vxattr && vxattr->readonly) {
|
||||
err = vxattr->getxattr_cb(ci, value, size);
|
||||
|
@ -558,7 +558,7 @@ get_xattr:
|
|||
memcpy(value, xattr->val, xattr->val_len);
|
||||
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
|
|||
u32 len;
|
||||
int i;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
|
||||
ci->i_xattrs.version, ci->i_xattrs.index_version);
|
||||
|
||||
|
@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
|
|||
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
|
||||
goto list_xattr;
|
||||
} else {
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
err = __build_xattrs(inode);
|
||||
if (err < 0)
|
||||
|
@ -619,7 +619,7 @@ list_xattr:
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
|
|||
if (!xattr)
|
||||
goto out;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
retry:
|
||||
issued = __ceph_caps_issued(ci, NULL);
|
||||
if (!(issued & CEPH_CAP_XATTR_EXCL))
|
||||
|
@ -752,12 +752,12 @@ retry:
|
|||
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
|
||||
struct ceph_buffer *blob = NULL;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout(" preaallocating new blob size=%d\n", required_blob_size);
|
||||
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
|
||||
if (!blob)
|
||||
goto out;
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
if (ci->i_xattrs.prealloc_blob)
|
||||
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
|
||||
ci->i_xattrs.prealloc_blob = blob;
|
||||
|
@ -770,13 +770,13 @@ retry:
|
|||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
|
||||
ci->i_xattrs.dirty = true;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
return err;
|
||||
|
||||
do_sync:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
err = ceph_sync_setxattr(dentry, name, value, size, flags);
|
||||
out:
|
||||
kfree(newname);
|
||||
|
@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
__build_xattrs(inode);
|
||||
issued = __ceph_caps_issued(ci, NULL);
|
||||
dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
|
||||
|
@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
|
|||
ci->i_xattrs.dirty = true;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
return err;
|
||||
do_sync:
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
err = ceph_send_removexattr(dentry, name);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
|
|||
int i, j;
|
||||
int numrep;
|
||||
int firstn;
|
||||
int rc = -1;
|
||||
|
||||
BUG_ON(ruleno >= map->max_rules);
|
||||
|
||||
|
@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
|
|||
* that this may or may not correspond to the specific types
|
||||
* referenced by the crush rule.
|
||||
*/
|
||||
if (force >= 0) {
|
||||
if (force >= map->max_devices ||
|
||||
map->device_parents[force] == 0) {
|
||||
/*dprintk("CRUSH: forcefed device dne\n");*/
|
||||
rc = -1; /* force fed device dne */
|
||||
goto out;
|
||||
}
|
||||
if (!is_out(map, weight, force, x)) {
|
||||
while (1) {
|
||||
force_context[++force_pos] = force;
|
||||
if (force >= 0)
|
||||
force = map->device_parents[force];
|
||||
else
|
||||
force = map->bucket_parents[-1-force];
|
||||
if (force == 0)
|
||||
break;
|
||||
}
|
||||
if (force >= 0 &&
|
||||
force < map->max_devices &&
|
||||
map->device_parents[force] != 0 &&
|
||||
!is_out(map, weight, force, x)) {
|
||||
while (1) {
|
||||
force_context[++force_pos] = force;
|
||||
if (force >= 0)
|
||||
force = map->device_parents[force];
|
||||
else
|
||||
force = map->bucket_parents[-1-force];
|
||||
if (force == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
|
|||
BUG_ON(1);
|
||||
}
|
||||
}
|
||||
rc = result_len;
|
||||
|
||||
out:
|
||||
return rc;
|
||||
return result_len;
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue