fuse: do not take fc->lock in fuse_request_send_background()
Currently, we take fc->lock there only to check for fc->connected. But this flag is changed only on connection abort, which is very rare operation. So allow checking fc->connected under just fc->bg_lock and use this lock (as well as fc->lock) when resetting fc->connected. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
ae2dffa394
commit
63825b4e1d
|
@ -581,42 +581,38 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
* Called under fc->lock
|
|
||||||
*
|
|
||||||
* fc->connected must have been checked previously
|
|
||||||
*/
|
|
||||||
void fuse_request_send_background_nocheck(struct fuse_conn *fc,
|
|
||||||
struct fuse_req *req)
|
|
||||||
{
|
{
|
||||||
BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
|
bool queued = false;
|
||||||
|
|
||||||
|
WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
|
||||||
if (!test_bit(FR_WAITING, &req->flags)) {
|
if (!test_bit(FR_WAITING, &req->flags)) {
|
||||||
__set_bit(FR_WAITING, &req->flags);
|
__set_bit(FR_WAITING, &req->flags);
|
||||||
atomic_inc(&fc->num_waiting);
|
atomic_inc(&fc->num_waiting);
|
||||||
}
|
}
|
||||||
__set_bit(FR_ISREPLY, &req->flags);
|
__set_bit(FR_ISREPLY, &req->flags);
|
||||||
spin_lock(&fc->bg_lock);
|
spin_lock(&fc->bg_lock);
|
||||||
fc->num_background++;
|
if (likely(fc->connected)) {
|
||||||
if (fc->num_background == fc->max_background)
|
fc->num_background++;
|
||||||
fc->blocked = 1;
|
if (fc->num_background == fc->max_background)
|
||||||
if (fc->num_background == fc->congestion_threshold && fc->sb) {
|
fc->blocked = 1;
|
||||||
set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
|
if (fc->num_background == fc->congestion_threshold && fc->sb) {
|
||||||
set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
|
set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
|
||||||
|
set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
|
||||||
|
}
|
||||||
|
list_add_tail(&req->list, &fc->bg_queue);
|
||||||
|
flush_bg_queue(fc);
|
||||||
|
queued = true;
|
||||||
}
|
}
|
||||||
list_add_tail(&req->list, &fc->bg_queue);
|
|
||||||
flush_bg_queue(fc);
|
|
||||||
spin_unlock(&fc->bg_lock);
|
spin_unlock(&fc->bg_lock);
|
||||||
|
|
||||||
|
return queued;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
{
|
{
|
||||||
BUG_ON(!req->end);
|
WARN_ON(!req->end);
|
||||||
spin_lock(&fc->lock);
|
if (!fuse_request_queue_background(fc, req)) {
|
||||||
if (fc->connected) {
|
|
||||||
fuse_request_send_background_nocheck(fc, req);
|
|
||||||
spin_unlock(&fc->lock);
|
|
||||||
} else {
|
|
||||||
spin_unlock(&fc->lock);
|
|
||||||
req->out.h.error = -ENOTCONN;
|
req->out.h.error = -ENOTCONN;
|
||||||
req->end(fc, req);
|
req->end(fc, req);
|
||||||
fuse_put_request(fc, req);
|
fuse_put_request(fc, req);
|
||||||
|
@ -2119,7 +2115,11 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
||||||
struct fuse_req *req, *next;
|
struct fuse_req *req, *next;
|
||||||
LIST_HEAD(to_end);
|
LIST_HEAD(to_end);
|
||||||
|
|
||||||
|
/* Background queuing checks fc->connected under bg_lock */
|
||||||
|
spin_lock(&fc->bg_lock);
|
||||||
fc->connected = 0;
|
fc->connected = 0;
|
||||||
|
spin_unlock(&fc->bg_lock);
|
||||||
|
|
||||||
fc->aborted = is_abort;
|
fc->aborted = is_abort;
|
||||||
fuse_set_initialized(fc);
|
fuse_set_initialized(fc);
|
||||||
list_for_each_entry(fud, &fc->devices, entry) {
|
list_for_each_entry(fud, &fc->devices, entry) {
|
||||||
|
|
|
@ -1487,6 +1487,7 @@ __acquires(fc->lock)
|
||||||
struct fuse_inode *fi = get_fuse_inode(req->inode);
|
struct fuse_inode *fi = get_fuse_inode(req->inode);
|
||||||
struct fuse_write_in *inarg = &req->misc.write.in;
|
struct fuse_write_in *inarg = &req->misc.write.in;
|
||||||
__u64 data_size = req->num_pages * PAGE_SIZE;
|
__u64 data_size = req->num_pages * PAGE_SIZE;
|
||||||
|
bool queued;
|
||||||
|
|
||||||
if (!fc->connected)
|
if (!fc->connected)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
@ -1502,7 +1503,8 @@ __acquires(fc->lock)
|
||||||
|
|
||||||
req->in.args[1].size = inarg->size;
|
req->in.args[1].size = inarg->size;
|
||||||
fi->writectr++;
|
fi->writectr++;
|
||||||
fuse_request_send_background_nocheck(fc, req);
|
queued = fuse_request_queue_background(fc, req);
|
||||||
|
WARN_ON(!queued);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
|
|
@ -863,9 +863,7 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
|
||||||
* Send a request in the background
|
* Send a request in the background
|
||||||
*/
|
*/
|
||||||
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
|
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
|
||||||
|
bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
|
||||||
void fuse_request_send_background_nocheck(struct fuse_conn *fc,
|
|
||||||
struct fuse_req *req);
|
|
||||||
|
|
||||||
/* Abort all requests */
|
/* Abort all requests */
|
||||||
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
|
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
|
||||||
|
|
Loading…
Reference in New Issue