Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core

Conflicts:
	block/blk-core.c
	block/blk-flush.c
	drivers/md/raid1.c
	drivers/md/raid10.c
	drivers/md/raid5.c
	fs/nilfs2/btnode.c
	fs/nilfs2/mdt.c

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Jens Axboe 2011-03-10 08:58:35 +01:00
commit 4c63f5646e
137 changed files with 606 additions and 1533 deletions

View File

@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests.
elevator_add_req_fn* called to add a new request into the scheduler elevator_add_req_fn* called to add a new request into the scheduler
elevator_queue_empty_fn returns true if the merge queue is empty.
Drivers shouldn't use this, but rather check
if elv_next_request is NULL (without losing the
request if one exists!)
elevator_former_req_fn elevator_former_req_fn
elevator_latter_req_fn These return the request before or after the elevator_latter_req_fn These return the request before or after the
one specified in disk sort order. Used by the one specified in disk sort order. Used by the

View File

@ -27,6 +27,7 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/list_sort.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/block.h> #include <trace/events/block.h>
@ -198,135 +199,43 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
/* /*
* "plug" the device if there are no outstanding requests: this will * Make sure that plugs that were pending when this function was entered,
* force the transfer to start only after we have put all the requests * are now complete and requests pushed to the queue.
* on the list.
*
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/ */
void blk_plug_device(struct request_queue *q) static inline void queue_sync_plugs(struct request_queue *q)
{ {
WARN_ON(!irqs_disabled());
/* /*
* don't plug a stopped queue, it must be paired with blk_start_queue() * If the current process is plugged and has barriers submitted,
* which will restart the queueing * we will livelock if we don't unplug first.
*/ */
if (blk_queue_stopped(q)) blk_flush_plug(current);
return;
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
trace_block_plug(q);
} }
}
EXPORT_SYMBOL(blk_plug_device);
/** static void blk_delay_work(struct work_struct *work)
* blk_plug_device_unlocked - plug a device without queue lock held
* @q: The &struct request_queue to plug
*
* Description:
* Like @blk_plug_device(), but grabs the queue lock and disables
* interrupts.
**/
void blk_plug_device_unlocked(struct request_queue *q)
{ {
unsigned long flags; struct request_queue *q;
spin_lock_irqsave(q->queue_lock, flags); q = container_of(work, struct request_queue, delay_work.work);
blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_plug_device_unlocked);
/*
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
return 0;
del_timer(&q->unplug_timer);
return 1;
}
EXPORT_SYMBOL(blk_remove_plug);
/*
* remove the plug and let it rip..
*/
void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
return;
q->request_fn(q);
}
/**
* generic_unplug_device - fire a request queue
* @q: The &struct request_queue in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
* the device have at them. If a queue is plugged, the I/O scheduler
* is still adding and merging requests on the queue. Once the queue
* gets unplugged, the request_fn defined for the queue is invoked and
* transfers started.
**/
void generic_unplug_device(struct request_queue *q)
{
if (blk_queue_plugged(q)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__generic_unplug_device(q); __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
}
EXPORT_SYMBOL(generic_unplug_device);
static void blk_backing_dev_unplug(struct backing_dev_info *bdi, /**
struct page *page) * blk_delay_queue - restart queueing after defined interval
{ * @q: The &struct request_queue in question
struct request_queue *q = bdi->unplug_io_data; * @msecs: Delay in msecs
*
blk_unplug(q); * Description:
} * Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
void blk_unplug_work(struct work_struct *work) * restarted around the specified time.
{
struct request_queue *q =
container_of(work, struct request_queue, unplug_work);
trace_block_unplug_io(q);
q->unplug_fn(q);
}
void blk_unplug_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
trace_block_unplug_timer(q);
kblockd_schedule_work(q, &q->unplug_work);
}
void blk_unplug(struct request_queue *q)
{
/*
* devices don't necessarily have an ->unplug_fn defined
*/ */
if (q->unplug_fn) { void blk_delay_queue(struct request_queue *q, unsigned long msecs)
trace_block_unplug_io(q); {
q->unplug_fn(q); schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
} }
} EXPORT_SYMBOL(blk_delay_queue);
EXPORT_SYMBOL(blk_unplug);
/** /**
* blk_start_queue - restart a previously stopped queue * blk_start_queue - restart a previously stopped queue
@ -362,7 +271,7 @@ EXPORT_SYMBOL(blk_start_queue);
**/ **/
void blk_stop_queue(struct request_queue *q) void blk_stop_queue(struct request_queue *q)
{ {
blk_remove_plug(q); cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q); queue_flag_set(QUEUE_FLAG_STOPPED, q);
} }
EXPORT_SYMBOL(blk_stop_queue); EXPORT_SYMBOL(blk_stop_queue);
@ -387,9 +296,9 @@ EXPORT_SYMBOL(blk_stop_queue);
*/ */
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
{ {
del_timer_sync(&q->unplug_timer);
del_timer_sync(&q->timeout); del_timer_sync(&q->timeout);
cancel_work_sync(&q->unplug_work); cancel_delayed_work_sync(&q->delay_work);
queue_sync_plugs(q);
} }
EXPORT_SYMBOL(blk_sync_queue); EXPORT_SYMBOL(blk_sync_queue);
@ -405,14 +314,9 @@ EXPORT_SYMBOL(blk_sync_queue);
*/ */
void __blk_run_queue(struct request_queue *q, bool force_kblockd) void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{ {
blk_remove_plug(q);
if (unlikely(blk_queue_stopped(q))) if (unlikely(blk_queue_stopped(q)))
return; return;
if (elv_queue_empty(q))
return;
/* /*
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
@ -420,10 +324,8 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else { } else
queue_flag_set(QUEUE_FLAG_PLUGGED, q); queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
kblockd_schedule_work(q, &q->unplug_work);
}
} }
EXPORT_SYMBOL(__blk_run_queue); EXPORT_SYMBOL(__blk_run_queue);
@ -517,8 +419,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q) if (!q)
return NULL; return NULL;
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
@ -538,13 +438,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_LIST_HEAD(&q->flush_data_in_flight);
INIT_WORK(&q->unplug_work, blk_unplug_work); INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype); kobject_init(&q->kobj, &blk_queue_ktype);
@ -639,7 +538,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn; q->request_fn = rfn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL; q->unprep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = QUEUE_FLAG_DEFAULT; q->queue_flags = QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */ /* Override internal queue lock with supplied lock pointer */
@ -677,6 +575,8 @@ int blk_get_queue(struct request_queue *q)
static inline void blk_free_request(struct request_queue *q, struct request *rq) static inline void blk_free_request(struct request_queue *q, struct request *rq)
{ {
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_ELVPRIV) if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq); elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
@ -898,8 +798,8 @@ out:
} }
/* /*
* No available requests for this queue, unplug the device and wait for some * No available requests for this queue, wait for some requests to become
* requests to become available. * available.
* *
* Called with q->queue_lock held, and returns with it unlocked. * Called with q->queue_lock held, and returns with it unlocked.
*/ */
@ -920,7 +820,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
trace_block_sleeprq(q, bio, rw_flags & 1); trace_block_sleeprq(q, bio, rw_flags & 1);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
io_schedule(); io_schedule();
@ -1042,6 +941,13 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
} }
EXPORT_SYMBOL(blk_requeue_request); EXPORT_SYMBOL(blk_requeue_request);
static void add_acct_request(struct request_queue *q, struct request *rq,
int where)
{
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where);
}
/** /**
* blk_insert_request - insert a special request into a request queue * blk_insert_request - insert a special request into a request queue
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
@ -1084,8 +990,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
if (blk_rq_tagged(rq)) if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
drive_stat_acct(rq, 1); add_acct_request(q, rq, where);
__elv_add_request(q, rq, where, 0);
__blk_run_queue(q, false); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
@ -1206,6 +1111,113 @@ void blk_add_request_payload(struct request *rq, struct page *page,
} }
EXPORT_SYMBOL_GPL(blk_add_request_payload); EXPORT_SYMBOL_GPL(blk_add_request_payload);
static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "back");
return false;
}
if (!ll_back_merge_fn(q, req, bio))
return false;
trace_block_bio_backmerge(q, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
return true;
}
static bool bio_attempt_front_merge(struct request_queue *q,
struct request *req, struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
sector_t sector;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "front");
return false;
}
if (!ll_front_merge_fn(q, req, bio))
return false;
trace_block_bio_frontmerge(q, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
sector = bio->bi_sector;
bio->bi_next = req->bio;
req->bio = bio;
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
req->__sector = bio->bi_sector;
req->__data_len += bio->bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
return true;
}
/*
* Attempts to merge with the plugged list in the current process. Returns
* true if merge was succesful, otherwise false.
*/
static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
struct bio *bio)
{
struct blk_plug *plug;
struct request *rq;
bool ret = false;
plug = tsk->plug;
if (!plug)
goto out;
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;
if (rq->q != q)
continue;
el_ret = elv_try_merge(rq, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
ret = bio_attempt_back_merge(q, rq, bio);
if (ret)
break;
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
ret = bio_attempt_front_merge(q, rq, bio);
if (ret)
break;
}
}
out:
return ret;
}
void init_request_from_bio(struct request *req, struct bio *bio) void init_request_from_bio(struct request *req, struct bio *bio)
{ {
req->cpu = bio->bi_comp_cpu; req->cpu = bio->bi_comp_cpu;
@ -1221,26 +1233,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio); blk_rq_bio_prep(req->q, req, bio);
} }
/*
* Only disabling plugging for non-rotational devices if it does tagging
* as well, otherwise we do need the proper merging
*/
static inline bool queue_should_plug(struct request_queue *q)
{
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}
static int __make_request(struct request_queue *q, struct bio *bio) static int __make_request(struct request_queue *q, struct bio *bio)
{ {
struct request *req;
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
const bool sync = !!(bio->bi_rw & REQ_SYNC); const bool sync = !!(bio->bi_rw & REQ_SYNC);
const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); struct blk_plug *plug;
const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
int where = ELEVATOR_INSERT_SORT; struct request *req;
int rw_flags;
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
@ -1249,78 +1247,36 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/ */
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
spin_lock_irq(q->queue_lock);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH; where = ELEVATOR_INSERT_FLUSH;
goto get_rq; goto get_rq;
} }
if (elv_queue_empty(q)) /*
goto get_rq; * Check if we can merge with the plugged list before grabbing
* any locks.
*/
if (attempt_plug_merge(current, q, bio))
goto out;
spin_lock_irq(q->queue_lock);
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &req, bio);
switch (el_ret) { if (el_ret == ELEVATOR_BACK_MERGE) {
case ELEVATOR_BACK_MERGE: BUG_ON(req->cmd_flags & REQ_ON_PLUG);
BUG_ON(!rq_mergeable(req)); if (bio_attempt_back_merge(q, req, bio)) {
if (!ll_back_merge_fn(q, req, bio))
break;
trace_block_bio_backmerge(q, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req)) if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
goto out; goto out_unlock;
case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req));
if (!ll_front_merge_fn(q, req, bio))
break;
trace_block_bio_frontmerge(q, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
blk_rq_set_mixed_merge(req);
req->cmd_flags &= ~REQ_FAILFAST_MASK;
req->cmd_flags |= ff;
} }
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
bio->bi_next = req->bio; BUG_ON(req->cmd_flags & REQ_ON_PLUG);
req->bio = bio; if (bio_attempt_front_merge(q, req, bio)) {
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
req->__sector = bio->bi_sector;
req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req)) if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
goto out; goto out_unlock;
}
/* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
;
} }
get_rq: get_rq:
@ -1347,20 +1303,35 @@ get_rq:
*/ */
init_request_from_bio(req, bio); init_request_from_bio(req, bio);
spin_lock_irq(q->queue_lock);
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE)) bio_flagged(bio, BIO_CPU_AFFINE)) {
req->cpu = blk_cpu_to_group(smp_processor_id()); req->cpu = blk_cpu_to_group(get_cpu());
if (queue_should_plug(q) && elv_queue_empty(q)) put_cpu();
blk_plug_device(q); }
/* insert the request into the elevator */ plug = current->plug;
if (plug) {
if (!plug->should_sort && !list_empty(&plug->list)) {
struct request *__rq;
__rq = list_entry_rq(plug->list.prev);
if (__rq->q != q)
plug->should_sort = 1;
}
/*
* Debug flag, kill later
*/
req->cmd_flags |= REQ_ON_PLUG;
list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1); drive_stat_acct(req, 1);
__elv_add_request(q, req, where, 0); } else {
out: spin_lock_irq(q->queue_lock);
if (unplug || !queue_should_plug(q)) add_acct_request(q, req, where);
__generic_unplug_device(q); __blk_run_queue(q, false);
out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
}
out:
return 0; return 0;
} }
@ -1763,9 +1734,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/ */
BUG_ON(blk_queued_rq(rq)); BUG_ON(blk_queued_rq(rq));
drive_stat_acct(rq, 1); add_acct_request(q, rq, ELEVATOR_INSERT_BACK);
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
return 0; return 0;
@ -2643,6 +2612,113 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
#define PLUG_MAGIC 0x91827364
void blk_start_plug(struct blk_plug *plug)
{
struct task_struct *tsk = current;
plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
plug->should_sort = 0;
/*
* If this is a nested plug, don't actually assign it. It will be
* flushed on its own.
*/
if (!tsk->plug) {
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
*/
tsk->plug = plug;
}
}
EXPORT_SYMBOL(blk_start_plug);
static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
return !(rqa->q == rqb->q);
}
static void flush_plug_list(struct blk_plug *plug)
{
struct request_queue *q;
unsigned long flags;
struct request *rq;
BUG_ON(plug->magic != PLUG_MAGIC);
if (list_empty(&plug->list))
return;
if (plug->should_sort)
list_sort(NULL, &plug->list, plug_rq_cmp);
q = NULL;
local_irq_save(flags);
while (!list_empty(&plug->list)) {
rq = list_entry_rq(plug->list.next);
list_del_init(&rq->queuelist);
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
if (q) {
__blk_run_queue(q, false);
spin_unlock(q->queue_lock);
}
q = rq->q;
spin_lock(q->queue_lock);
}
rq->cmd_flags &= ~REQ_ON_PLUG;
/*
* rq is already accounted, so use raw insert
*/
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
}
if (q) {
__blk_run_queue(q, false);
spin_unlock(q->queue_lock);
}
BUG_ON(!list_empty(&plug->list));
local_irq_restore(flags);
}
static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
{
flush_plug_list(plug);
if (plug == tsk->plug)
tsk->plug = NULL;
}
void blk_finish_plug(struct blk_plug *plug)
{
if (plug)
__blk_finish_plug(current, plug);
}
EXPORT_SYMBOL(blk_finish_plug);
void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
{
__blk_finish_plug(tsk, plug);
tsk->plug = plug;
}
EXPORT_SYMBOL(__blk_flush_plug);
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
BUILD_BUG_ON(__REQ_NR_BITS > 8 * BUILD_BUG_ON(__REQ_NR_BITS > 8 *

View File

@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->end_io = done; rq->end_io = done;
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where, 1); __elv_add_request(q, rq, where);
__generic_unplug_device(q); __blk_run_queue(q, false);
/* the queue is stopped so it won't be plugged+unplugged */ /* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME) if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q); q->request_fn(q);

View File

@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
{ {
struct request_queue *q = flush_rq->q; struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx]; struct list_head *running = &q->flush_queue[q->flush_running_idx];
bool was_empty = elv_queue_empty(q);
bool queued = false; bool queued = false;
struct request *rq, *n; struct request *rq, *n;
@ -218,7 +217,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* from request completion path and calling directly into * from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd. * request_fn may confuse the driver. Always use kblockd.
*/ */
if (queued && was_empty) if (queued)
__blk_run_queue(q, true); __blk_run_queue(q, true);
} }
@ -269,13 +268,12 @@ static bool blk_kick_flush(struct request_queue *q)
static void flush_data_end_io(struct request *rq, int error) static void flush_data_end_io(struct request *rq, int error)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
bool was_empty = elv_queue_empty(q);
/* /*
* After populating an empty queue, kick it to avoid stall. Read * After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io(). * the comment in flush_end_io().
*/ */
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty) if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
__blk_run_queue(q, true); __blk_run_queue(q, true);
} }

View File

@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_queue_congestion_threshold(q); blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ; q->nr_batching = BLK_BATCH_REQ;
q->unplug_thresh = 4; /* hmm */
q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
if (q->unplug_delay == 0)
q->unplug_delay = 1;
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);

View File

@ -768,6 +768,7 @@ static int throtl_dispatch(struct request_queue *q)
unsigned int nr_disp = 0; unsigned int nr_disp = 0;
struct bio_list bio_list_on_stack; struct bio_list bio_list_on_stack;
struct bio *bio; struct bio *bio;
struct blk_plug plug;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
@ -796,9 +797,10 @@ out:
* immediate dispatch * immediate dispatch
*/ */
if (nr_disp) { if (nr_disp) {
blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack))) while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio); generic_make_request(bio);
blk_unplug(q); blk_finish_plug(&plug);
} }
return nr_disp; return nr_disp;
} }

View File

@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
void blk_dequeue_request(struct request *rq); void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q); void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
void blk_rq_timed_out_timer(unsigned long data); void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *); void blk_add_timer(struct request *);

View File

@ -500,13 +500,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
} }
} }
static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
return !cfqd->rq_queued;
}
/* /*
* Scale schedule slice based on io priority. Use the sync time slice only * Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async * if a queue is marked sync and has sync io queued. A sync queue with async
@ -4080,7 +4073,6 @@ static struct elevator_type iosched_cfq = {
.elevator_add_req_fn = cfq_insert_request, .elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request, .elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request, .elevator_completed_req_fn = cfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request, .elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request, .elevator_latter_req_fn = elv_rb_latter_request,

View File

@ -326,14 +326,6 @@ dispatch_request:
return 1; return 1;
} }
static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
return list_empty(&dd->fifo_list[WRITE])
&& list_empty(&dd->fifo_list[READ]);
}
static void deadline_exit_queue(struct elevator_queue *e) static void deadline_exit_queue(struct elevator_queue *e)
{ {
struct deadline_data *dd = e->elevator_data; struct deadline_data *dd = e->elevator_data;
@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
.elevator_merge_req_fn = deadline_merged_requests, .elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests, .elevator_dispatch_fn = deadline_dispatch_requests,
.elevator_add_req_fn = deadline_add_request, .elevator_add_req_fn = deadline_add_request,
.elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = elv_rb_former_request, .elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request, .elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_fn = deadline_init_queue, .elevator_init_fn = deadline_init_queue,

View File

@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
} }
EXPORT_SYMBOL(elv_rq_merge_ok); EXPORT_SYMBOL(elv_rq_merge_ok);
static inline int elv_try_merge(struct request *__rq, struct bio *bio) int elv_try_merge(struct request *__rq, struct bio *bio)
{ {
int ret = ELEVATOR_NO_MERGE; int ret = ELEVATOR_NO_MERGE;
@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
struct list_head *entry; struct list_head *entry;
int stop_flags; int stop_flags;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (q->last_merge == rq) if (q->last_merge == rq)
q->last_merge = NULL; q->last_merge = NULL;
@ -617,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
void elv_insert(struct request_queue *q, struct request *rq, int where) void elv_insert(struct request_queue *q, struct request *rq, int where)
{ {
int unplug_it = 1;
trace_block_rq_insert(q, rq); trace_block_rq_insert(q, rq);
rq->q = q; rq->q = q;
switch (where) { switch (where) {
case ELEVATOR_INSERT_REQUEUE: case ELEVATOR_INSERT_REQUEUE:
/*
* Most requeues happen because of a busy condition,
* don't force unplug of the queue for that case.
* Clear unplug_it and fall through.
*/
unplug_it = 0;
case ELEVATOR_INSERT_FRONT: case ELEVATOR_INSERT_FRONT:
rq->cmd_flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head); list_add(&rq->queuelist, &q->queue_head);
@ -677,25 +670,17 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
rq->cmd_flags |= REQ_SOFTBARRIER; rq->cmd_flags |= REQ_SOFTBARRIER;
blk_insert_flush(rq); blk_insert_flush(rq);
break; break;
default: default:
printk(KERN_ERR "%s: bad insertion point %d\n", printk(KERN_ERR "%s: bad insertion point %d\n",
__func__, where); __func__, where);
BUG(); BUG();
} }
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
}
} }
void __elv_add_request(struct request_queue *q, struct request *rq, int where, void __elv_add_request(struct request_queue *q, struct request *rq, int where)
int plug)
{ {
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_SOFTBARRIER) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS ||
@ -707,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
where == ELEVATOR_INSERT_SORT) where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK; where = ELEVATOR_INSERT_BACK;
if (plug)
blk_plug_device(q);
elv_insert(q, rq, where); elv_insert(q, rq, where);
} }
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(struct request_queue *q, struct request *rq, int where, void elv_add_request(struct request_queue *q, struct request *rq, int where)
int plug)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, where, plug); __elv_add_request(q, rq, where);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(elv_add_request);
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
if (!list_empty(&q->queue_head))
return 0;
if (e->ops->elevator_queue_empty_fn)
return e->ops->elevator_queue_empty_fn(q);
return 1;
}
EXPORT_SYMBOL(elv_queue_empty);
struct request *elv_latter_request(struct request_queue *q, struct request *rq) struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;

View File

@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
list_add_tail(&rq->queuelist, &nd->queue); list_add_tail(&rq->queuelist, &nd->queue);
} }
static int noop_queue_empty(struct request_queue *q)
{
struct noop_data *nd = q->elevator->elevator_data;
return list_empty(&nd->queue);
}
static struct request * static struct request *
noop_former_request(struct request_queue *q, struct request *rq) noop_former_request(struct request_queue *q, struct request *rq)
{ {
@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
.elevator_merge_req_fn = noop_merged_requests, .elevator_merge_req_fn = noop_merged_requests,
.elevator_dispatch_fn = noop_dispatch, .elevator_dispatch_fn = noop_dispatch,
.elevator_add_req_fn = noop_add_request, .elevator_add_req_fn = noop_add_request,
.elevator_queue_empty_fn = noop_queue_empty,
.elevator_former_req_fn = noop_former_request, .elevator_former_req_fn = noop_former_request,
.elevator_latter_req_fn = noop_latter_request, .elevator_latter_req_fn = noop_latter_request,
.elevator_init_fn = noop_init_queue, .elevator_init_fn = noop_init_queue,

View File

@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
int sg_index = 0; int sg_index = 0;
int chained = 0; int chained = 0;
/* We call start_io here in case there is a command waiting on the
* queue that has not been sent.
*/
if (blk_queue_plugged(q))
goto startio;
queue: queue:
creq = blk_peek_request(q); creq = blk_peek_request(q);
if (!creq) if (!creq)

View File

@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
struct scatterlist tmp_sg[SG_MAX]; struct scatterlist tmp_sg[SG_MAX];
int i, dir, seg; int i, dir, seg;
if (blk_queue_plugged(q))
goto startio;
queue_next: queue_next:
creq = blk_peek_request(q); creq = blk_peek_request(q);
if (!creq) if (!creq)

View File

@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA; rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC; rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev; bio->bi_bdev = bdev->md_bdev;
@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
} }
} }
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
/* always (try to) flush bitmap to stable storage */ /* always (try to) flush bitmap to stable storage */
drbd_md_flush(mdev); drbd_md_flush(mdev);

View File

@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
for (i = 0; i < num_pages; i++) for (i = 0; i < num_pages; i++)
bm_page_io_async(mdev, b, i, rw); bm_page_io_async(mdev, b, i, rw);
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {

View File

@ -377,7 +377,7 @@ union p_header {
#define DP_HARDBARRIER 1 /* depricated */ #define DP_HARDBARRIER 1 /* depricated */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */ #define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4 #define DP_MAY_SET_IN_SYNC 4
#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ #define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */ #define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */ #define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */ #define DP_DISCARD 64 /* equals REQ_DISCARD */
@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
return QUEUE_ORDERED_NONE; return QUEUE_ORDERED_NONE;
} }
static inline void drbd_blk_run_queue(struct request_queue *q)
{
if (q && q->unplug_fn)
q->unplug_fn(q);
}
static inline void drbd_kick_lo(struct drbd_conf *mdev)
{
if (get_ldev(mdev)) {
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
put_ldev(mdev);
}
}
static inline void drbd_md_flush(struct drbd_conf *mdev) static inline void drbd_md_flush(struct drbd_conf *mdev)
{ {
int r; int r;

View File

@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{ {
if (mdev->agreed_pro_version >= 95) if (mdev->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) | (bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0); (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
else else
return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
} }
/* Used to send write requests /* Used to send write requests
@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
return 0; return 0;
} }
static void drbd_unplug_fn(struct request_queue *q)
{
struct drbd_conf *mdev = q->queuedata;
/* unplug FIRST */
spin_lock_irq(q->queue_lock);
blk_remove_plug(q);
spin_unlock_irq(q->queue_lock);
/* only if connected */
spin_lock_irq(&mdev->req_lock);
if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
D_ASSERT(mdev->state.role == R_PRIMARY);
if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
/* add to the data.work queue,
* unless already queued.
* XXX this might be a good addition to drbd_queue_work
* anyways, to detect "double queuing" ... */
if (list_empty(&mdev->unplug_work.list))
drbd_queue_work(&mdev->data.work,
&mdev->unplug_work);
}
}
spin_unlock_irq(&mdev->req_lock);
if (mdev->state.disk >= D_INCONSISTENT)
drbd_kick_lo(mdev);
}
static void drbd_set_defaults(struct drbd_conf *mdev) static void drbd_set_defaults(struct drbd_conf *mdev)
{ {
/* This way we get a compile error when sync_conf grows, /* This way we get a compile error when sync_conf grows,
@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec); blk_queue_merge_bvec(q, drbd_merge_bvec);
q->queue_lock = &mdev->req_lock; /* needed since we use */ q->queue_lock = &mdev->req_lock;
/* plugging on a queue, that actually has no requests! */
q->unplug_fn = drbd_unplug_fn;
mdev->md_io_page = alloc_page(GFP_KERNEL); mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page) if (!mdev->md_io_page)

View File

@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL; return NULL;
} }
/* kick lower level device, if we have more than (arbitrary number)
* reference counts on it, which typically are locally submitted io
* requests. don't use unacked_cnt, so we speed up proto A and B, too. */
static void maybe_kick_lo(struct drbd_conf *mdev)
{
if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
drbd_kick_lo(mdev);
}
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
{ {
struct drbd_epoch_entry *e; struct drbd_epoch_entry *e;
@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
LIST_HEAD(reclaimed); LIST_HEAD(reclaimed);
struct drbd_epoch_entry *e, *t; struct drbd_epoch_entry *e, *t;
maybe_kick_lo(mdev);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
reclaim_net_ee(mdev, &reclaimed); reclaim_net_ee(mdev, &reclaimed);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
while (!list_empty(head)) { while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
drbd_kick_lo(mdev); io_schedule();
schedule();
finish_wait(&mdev->ee_wait, &wait); finish_wait(&mdev->ee_wait, &wait);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
} }
@ -1111,8 +1100,6 @@ next_bio:
/* > e->sector, unless this is the first bio */ /* > e->sector, unless this is the first bio */
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev; bio->bi_bdev = mdev->ldev->backing_bdev;
/* we special case some flags in the multi-bio case, see below
* (REQ_UNPLUG) */
bio->bi_rw = rw; bio->bi_rw = rw;
bio->bi_private = e; bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec; bio->bi_end_io = drbd_endio_sec;
@ -1141,13 +1128,8 @@ next_bio:
bios = bios->bi_next; bios = bios->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
/* strip off REQ_UNPLUG unless it is the last bio */
if (bios)
bio->bi_rw &= ~REQ_UNPLUG;
drbd_generic_make_request(mdev, fault_type, bio); drbd_generic_make_request(mdev, fault_type, bio);
} while (bios); } while (bios);
maybe_kick_lo(mdev);
return 0; return 0;
fail: fail:
@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
inc_unacked(mdev); inc_unacked(mdev);
if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
drbd_kick_lo(mdev);
mdev->current_epoch->barrier_nr = p->barrier; mdev->current_epoch->barrier_nr = p->barrier;
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
{ {
if (mdev->agreed_pro_version >= 95) if (mdev->agreed_pro_version >= 95)
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) | (dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_FUA : 0) | (dpf & DP_FLUSH ? REQ_FUA : 0) |
(dpf & DP_DISCARD ? REQ_DISCARD : 0); (dpf & DP_DISCARD ? REQ_DISCARD : 0);
else else
return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
} }
/* mirrored write */ /* mirrored write */
@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{ {
if (mdev->state.disk >= D_INCONSISTENT)
drbd_kick_lo(mdev);
/* Make sure we've acked all the TCP data associated /* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */ * with the data requests being unplugged */
drbd_tcp_quickack(mdev->data.socket); drbd_tcp_quickack(mdev->data.socket);

View File

@ -960,10 +960,6 @@ allocate_barrier:
bio_endio(req->private_bio, -EIO); bio_endio(req->private_bio, -EIO);
} }
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
* we plug after submit, so we won't miss an unplug event */
drbd_plug_device(mdev);
return 0; return 0;
fail_conflicting: fail_conflicting:

View File

@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* queue (or even the read operations for those packets * queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */ * is not finished by now). Retry in 100ms. */
drbd_kick_lo(mdev);
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10); schedule_timeout(HZ / 10);
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);

View File

@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio); generic_make_request(bio);
} }
static inline void drbd_plug_device(struct drbd_conf *mdev)
{
struct request_queue *q;
q = bdev_get_queue(mdev->this_bdev);
spin_lock_irq(q->queue_lock);
/* XXX the check on !blk_queue_plugged is redundant,
* implicitly checked in blk_plug_device */
if (!blk_queue_plugged(q)) {
blk_plug_device(q);
del_timer(&q->unplug_timer);
/* unplugging should not happen automatically... */
}
spin_unlock_irq(q->queue_lock);
}
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
{ {
return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)

View File

@ -3838,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_end_io = floppy_rb0_complete; bio.bi_end_io = floppy_rb0_complete;
submit_bio(READ, &bio); submit_bio(READ, &bio);
generic_unplug_device(bdev_get_queue(bdev));
process_fd_request(); process_fd_request();
wait_for_completion(&complete); wait_for_completion(&complete);

View File

@ -540,17 +540,6 @@ out:
return 0; return 0;
} }
/*
* kick off io on the underlying address space
*/
static void loop_unplug(struct request_queue *q)
{
struct loop_device *lo = q->queuedata;
queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
blk_run_address_space(lo->lo_backing_file->f_mapping);
}
struct switch_request { struct switch_request {
struct file *file; struct file *file;
struct completion wait; struct completion wait;
@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
*/ */
blk_queue_make_request(lo->lo_queue, loop_make_request); blk_queue_make_request(lo->lo_queue, loop_make_request);
lo->lo_queue->queuedata = lo; lo->lo_queue->queuedata = lo;
lo->lo_queue->unplug_fn = loop_unplug;
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH); blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread); kthread_stop(lo->lo_thread);
lo->lo_queue->unplug_fn = NULL;
lo->lo_backing_file = NULL; lo->lo_backing_file = NULL;
loop_release_xfer(lo); loop_release_xfer(lo);

View File

@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time; min_sleep_time = pkt->sleep_time;
} }
generic_unplug_device(bdev_get_queue(pd->bdev));
VPRINTK("kcdrwd: sleeping\n"); VPRINTK("kcdrwd: sleeping\n");
residue = schedule_timeout(min_sleep_time); residue = schedule_timeout(min_sleep_time);
VPRINTK("kcdrwd: wake up\n"); VPRINTK("kcdrwd: wake up\n");

View File

@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
* *
* Whenever IO on the active page completes, the Ready page is activated * Whenever IO on the active page completes, the Ready page is activated
* and the ex-Active page is clean out and made Ready. * and the ex-Active page is clean out and made Ready.
* Otherwise the Ready page is only activated when it becomes full, or * Otherwise the Ready page is only activated when it becomes full.
* when mm_unplug_device is called via the unplug_io_fn.
* *
* If a request arrives while both pages a full, it is queued, and b_rdev is * If a request arrives while both pages a full, it is queued, and b_rdev is
* overloaded to record whether it was a read or a write. * overloaded to record whether it was a read or a write.
@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
page->biotail = &page->bio; page->biotail = &page->bio;
} }
static void mm_unplug_device(struct request_queue *q)
{
struct cardinfo *card = q->queuedata;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
if (blk_remove_plug(q))
activate(card);
spin_unlock_irqrestore(&card->lock, flags);
}
/* /*
* If there is room on Ready page, take * If there is room on Ready page, take
* one bh off list and add it. * one bh off list and add it.
@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio; *card->biotail = bio;
bio->bi_next = NULL; bio->bi_next = NULL;
card->biotail = &bio->bi_next; card->biotail = &bio->bi_next;
blk_plug_device(q);
spin_unlock_irq(&card->lock); spin_unlock_irq(&card->lock);
return 0; return 0;
@ -897,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
blk_queue_make_request(card->queue, mm_make_request); blk_queue_make_request(card->queue, mm_make_request);
card->queue->queue_lock = &card->lock; card->queue->queue_lock = &card->lock;
card->queue->queuedata = card; card->queue->queuedata = card;
card->queue->unplug_fn = mm_unplug_device;
tasklet_init(&card->tasklet, process_page, (unsigned long)card); tasklet_init(&card->tasklet, process_page, (unsigned long)card);

View File

@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
drive->hwif->rq = NULL; drive->hwif->rq = NULL;
elv_add_request(drive->queue, &drive->sense_rq, elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
ELEVATOR_INSERT_FRONT, 0);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ide_queue_sense_rq); EXPORT_SYMBOL_GPL(ide_queue_sense_rq);

View File

@ -258,17 +258,10 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
if (time_after(jiffies, info->write_timeout)) if (time_after(jiffies, info->write_timeout))
return 0; return 0;
else { else {
struct request_queue *q = drive->queue;
unsigned long flags;
/* /*
* take a breather relying on the unplug timer to kick us again * take a breather
*/ */
blk_delay_queue(drive->queue, 1);
spin_lock_irqsave(q->queue_lock, flags);
blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
return 1; return 1;
} }
} }
@ -1514,8 +1507,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
blk_queue_dma_alignment(q, 31); blk_queue_dma_alignment(q, 31);
blk_queue_update_dma_pad(q, 15); blk_queue_update_dma_pad(q, 15);
q->unplug_delay = max((1 * HZ) / 1000, 1);
drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);

View File

@ -549,8 +549,6 @@ plug_device_2:
if (rq) if (rq)
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
} }
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
if (rq) if (rq)
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }

View File

@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
rq->cmd[0] = REQ_UNPARK_HEADS; rq->cmd[0] = REQ_UNPARK_HEADS;
rq->cmd_len = 1; rq->cmd_len = 1;
rq->cmd_type = REQ_TYPE_SPECIAL; rq->cmd_type = REQ_TYPE_SPECIAL;
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out: out:
return; return;

View File

@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes); atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh); set_buffer_locked(bh);
set_buffer_mapped(bh); set_buffer_mapped(bh);
submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); submit_bh(WRITE | REQ_SYNC, bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} }
@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait, prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
md_unplug(bitmap->mddev); io_schedule();
schedule();
finish_wait(&bitmap->overflow_wait, &__wait); finish_wait(&bitmap->overflow_wait, &__wait);
continue; continue;
} }

View File

@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor; clone->bi_destructor = dm_crypt_bio_destructor;
} }
static void kcryptd_unplug(struct crypt_config *cc)
{
blk_unplug(bdev_get_queue(cc->dev->bdev));
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* one in order to decrypt the whole bio data *afterwards*. * one in order to decrypt the whole bio data *afterwards*.
*/ */
clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
if (!clone) { if (!clone)
kcryptd_unplug(cc);
return 1; return 1;
}
crypt_inc_pending(io); crypt_inc_pending(io);

View File

@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS); BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync) if (sync)
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
/* /*
* For multiple regions we need to be careful to rewind * For multiple regions we need to be careful to rewind

View File

@ -37,13 +37,6 @@ struct dm_kcopyd_client {
unsigned int nr_pages; unsigned int nr_pages;
unsigned int nr_free_pages; unsigned int nr_free_pages;
/*
* Block devices to unplug.
* Non-NULL pointer means that a block device has some pending requests
* and needs to be unplugged.
*/
struct block_device *unplug[2];
struct dm_io_client *io_client; struct dm_io_client *io_client;
wait_queue_head_t destroyq; wait_queue_head_t destroyq;
@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
return 0; return 0;
} }
/*
* Unplug the block device at the specified index.
*/
static void unplug(struct dm_kcopyd_client *kc, int rw)
{
if (kc->unplug[rw] != NULL) {
blk_unplug(bdev_get_queue(kc->unplug[rw]));
kc->unplug[rw] = NULL;
}
}
/*
* Prepare block device unplug. If there's another device
* to be unplugged at the same array index, we unplug that
* device first.
*/
static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
struct block_device *bdev)
{
if (likely(kc->unplug[rw] == bdev))
return;
unplug(kc, rw);
kc->unplug[rw] = bdev;
}
static void complete_io(unsigned long error, void *context) static void complete_io(unsigned long error, void *context)
{ {
struct kcopyd_job *job = (struct kcopyd_job *) context; struct kcopyd_job *job = (struct kcopyd_job *) context;
@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client, .client = job->kc->io_client,
}; };
if (job->rw == READ) { if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL); r = dm_io(&io_req, 1, &job->source, NULL);
prepare_unplug(job->kc, READ, job->source.bdev); else
} else {
if (job->num_dests > 1)
io_req.bi_rw |= REQ_UNPLUG;
r = dm_io(&io_req, job->num_dests, job->dests, NULL); r = dm_io(&io_req, job->num_dests, job->dests, NULL);
if (!(io_req.bi_rw & REQ_UNPLUG))
prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
}
return r; return r;
} }
@ -466,6 +428,7 @@ static void do_work(struct work_struct *work)
{ {
struct dm_kcopyd_client *kc = container_of(work, struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work); struct dm_kcopyd_client, kcopyd_work);
struct blk_plug plug;
/* /*
* The order that these are called is *very* important. * The order that these are called is *very* important.
@ -473,18 +436,12 @@ static void do_work(struct work_struct *work)
* Pages jobs when successful will jump onto the io jobs * Pages jobs when successful will jump onto the io jobs
* list. io jobs call wake when they complete and it all * list. io jobs call wake when they complete and it all
* starts again. * starts again.
*
* Note that io_jobs add block devices to the unplug array,
* this array is cleared with "unplug" calls. It is thus
* forbidden to run complete_jobs after io_jobs and before
* unplug because the block device could be destroyed in
* job completion callback.
*/ */
blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job); process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job); process_jobs(&kc->pages_jobs, kc, run_pages_job);
process_jobs(&kc->io_jobs, kc, run_io_job); process_jobs(&kc->io_jobs, kc, run_io_job);
unplug(kc, READ); blk_finish_plug(&plug);
unplug(kc, WRITE);
} }
/* /*
@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
INIT_LIST_HEAD(&kc->io_jobs); INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs); INIT_LIST_HEAD(&kc->pages_jobs);
memset(kc->unplug, 0, sizeof(kc->unplug));
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool) if (!kc->job_pool)
goto bad_slab; goto bad_slab;

View File

@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
{ {
struct raid_set *rs = container_of(cb, struct raid_set, callbacks); struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
md_raid5_unplug_device(rs->md.private); md_raid5_kick_device(rs->md.private);
} }
/* /*

View File

@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
do_reads(ms, &reads); do_reads(ms, &reads);
do_writes(ms, &writes); do_writes(ms, &writes);
do_failures(ms, &failures); do_failures(ms, &failures);
dm_table_unplug_all(ms->ti->table);
} }
/*----------------------------------------------------------------- /*-----------------------------------------------------------------

View File

@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
return 0; return 0;
} }
void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
struct dm_target_callbacks *cb;
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
char b[BDEVNAME_SIZE];
if (likely(q))
blk_unplug(q);
else
DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
dm_device_name(t->md),
bdevname(dd->dm_dev.bdev, b));
}
list_for_each_entry(cb, &t->target_callbacks, list)
if (cb->unplug_fn)
cb->unplug_fn(cb);
}
struct mapped_device *dm_table_get_md(struct dm_table *t) struct mapped_device *dm_table_get_md(struct dm_table *t)
{ {
return t->md; return t->md;
@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
EXPORT_SYMBOL(dm_table_get_md); EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put); EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get); EXPORT_SYMBOL(dm_table_get);
EXPORT_SYMBOL(dm_table_unplug_all);

View File

@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
dm_unprep_request(rq); dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (elv_queue_empty(q))
blk_plug_device(q);
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
* number of in-flight I/Os after the queue is stopped in * number of in-flight I/Os after the queue is stopped in
* dm_suspend(). * dm_suspend().
*/ */
while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q); rq = blk_peek_request(q);
if (!rq) if (!rq)
goto plug_and_out; goto delay_and_out;
/* always use block 0 to find the target for flushes for now */ /* always use block 0 to find the target for flushes for now */
pos = 0; pos = 0;
@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
BUG_ON(!dm_target_is_valid(ti)); BUG_ON(!dm_target_is_valid(ti));
if (ti->type->busy && ti->type->busy(ti)) if (ti->type->busy && ti->type->busy(ti))
goto plug_and_out; goto delay_and_out;
blk_start_request(rq); blk_start_request(rq);
clone = rq->special; clone = rq->special;
@ -1647,11 +1645,8 @@ requeued:
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock); spin_lock(q->queue_lock);
plug_and_out: delay_and_out:
if (!elv_queue_empty(q)) blk_delay_queue(q, HZ / 10);
/* Some requests still remain, retry later */
blk_plug_device(q);
out: out:
dm_table_put(map); dm_table_put(map);
@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
return r; return r;
} }
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
if (map) {
if (dm_request_based(md))
generic_unplug_device(q);
dm_table_unplug_all(map);
dm_table_put(map);
}
}
static int dm_any_congested(void *congested_data, int bdi_bits) static int dm_any_congested(void *congested_data, int bdi_bits)
{ {
int r = bdi_bits; int r = bdi_bits;
@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md; md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request); blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec); blk_queue_merge_bvec(md->queue, dm_merge_bvec);
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
} }
@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
int r = 0; int r = 0;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
dm_unplug_all(md->queue);
add_wait_queue(&md->wait, &wait); add_wait_queue(&md->wait, &wait);
while (1) { while (1) {
@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
clear_bit(DMF_SUSPENDED, &md->flags); clear_bit(DMF_SUSPENDED, &md->flags);
dm_table_unplug_all(map);
r = 0; r = 0;
out: out:
dm_table_put(map); dm_table_put(map);

View File

@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9; return maxsectors << 9;
} }
static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf;
int i;
rcu_read_lock();
conf = rcu_dereference(mddev->private);
for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
blk_unplug(r_queue);
}
rcu_read_unlock();
}
static int linear_congested(void *data, int bits) static int linear_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
@ -224,7 +208,6 @@ static int linear_run (mddev_t *mddev)
md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
mddev->queue->backing_dev_info.congested_fn = linear_congested; mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev); md_integrity_register(mddev);

View File

@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
bio->bi_end_io = super_written; bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes); atomic_inc(&mddev->pending_writes);
submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
bio);
} }
void md_super_wait(mddev_t *mddev) void md_super_wait(mddev_t *mddev)
@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct completion event; struct completion event;
int ret; int ret;
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev; rdev->meta_bdev : rdev->bdev;
@ -4817,7 +4816,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
__md_stop_writes(mddev); __md_stop_writes(mddev);
md_stop(mddev); md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL; mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */ /* tell userspace to handle 'inactive' */
@ -6692,8 +6690,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
void md_unplug(mddev_t *mddev) void md_unplug(mddev_t *mddev)
{ {
if (mddev->queue)
blk_unplug(mddev->queue);
if (mddev->plug) if (mddev->plug)
mddev->plug->unplug_fn(mddev->plug); mddev->plug->unplug_fn(mddev->plug);
} }
@ -6876,7 +6872,6 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed >= mddev->resync_max - mddev->curr_resync_completed
)) { )) {
/* time to update curr_resync_completed */ /* time to update curr_resync_completed */
md_unplug(mddev);
wait_event(mddev->recovery_wait, wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0); atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j; mddev->curr_resync_completed = j;
@ -6952,7 +6947,6 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an * about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast) * e2fsck being done on the RAID array should execute fast)
*/ */
md_unplug(mddev);
cond_resched(); cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@ -6971,8 +6965,6 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop * this also signals 'finished resyncing' to md_stop
*/ */
out: out:
md_unplug(mddev);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */ /* tell personality that we are finished */

View File

@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
} }
static void unplug_slaves(mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
int i;
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)
&& atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
}
rcu_read_unlock();
}
static void multipath_unplug(struct request_queue *q)
{
unplug_slaves(q->queuedata);
}
static int multipath_make_request(mddev_t *mddev, struct bio * bio) static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{ {
multipath_conf_t *conf = mddev->private; multipath_conf_t *conf = mddev->private;
@ -517,7 +487,6 @@ static int multipath_run (mddev_t *mddev)
*/ */
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
mddev->queue->unplug_fn = multipath_unplug;
mddev->queue->backing_dev_info.congested_fn = multipath_congested; mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev); md_integrity_register(mddev);

View File

@ -25,21 +25,6 @@
#include "raid0.h" #include "raid0.h"
#include "raid5.h" #include "raid5.h"
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
int raid_disks = conf->strip_zone[0].nb_dev;
int i;
for (i=0; i < raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
blk_unplug(r_queue);
}
}
static int raid0_congested(void *data, int bits) static int raid0_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
mdname(mddev), mdname(mddev),
(unsigned long long)smallest->sectors); (unsigned long long)smallest->sectors);
} }
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;

View File

@ -52,23 +52,16 @@
#define NR_RAID1_BIOS 256 #define NR_RAID1_BIOS 256
static void unplug_slaves(mddev_t *mddev);
static void allow_barrier(conf_t *conf); static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf); static void lower_barrier(conf_t *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{ {
struct pool_info *pi = data; struct pool_info *pi = data;
r1bio_t *r1_bio;
int size = offsetof(r1bio_t, bios[pi->raid_disks]); int size = offsetof(r1bio_t, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */ /* allocate a r1bio with room for raid_disks entries in the bios array */
r1_bio = kzalloc(size, gfp_flags); return kzalloc(size, gfp_flags);
if (!r1_bio && pi->mddev)
unplug_slaves(pi->mddev);
return r1_bio;
} }
static void r1bio_pool_free(void *r1_bio, void *data) static void r1bio_pool_free(void *r1_bio, void *data)
@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
int i, j; int i, j;
r1_bio = r1bio_pool_alloc(gfp_flags, pi); r1_bio = r1bio_pool_alloc(gfp_flags, pi);
if (!r1_bio) { if (!r1_bio)
unplug_slaves(pi->mddev);
return NULL; return NULL;
}
/* /*
* Allocate bios : 1 for reading, n-1 for writing * Allocate bios : 1 for reading, n-1 for writing
@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return new_disk; return new_disk;
} }
static void unplug_slaves(mddev_t *mddev)
{
conf_t *conf = mddev->private;
int i;
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
}
rcu_read_unlock();
}
static void raid1_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
unplug_slaves(mddev);
md_wakeup_thread(mddev->thread);
}
static int raid1_congested(void *data, int bits) static int raid1_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits)
} }
static int flush_pending_writes(conf_t *conf) static void flush_pending_writes(conf_t *conf)
{ {
/* Any writes that have been queued but are awaiting /* Any writes that have been queued but are awaiting
* bitmap updates get flushed here. * bitmap updates get flushed here.
* We return 1 if any requests were actually submitted.
*/ */
int rv = 0;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) { if (conf->pending_bio_list.head) {
struct bio *bio; struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list); bio = bio_list_get(&conf->pending_bio_list);
/* Only take the spinlock to quiet a warning */
spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to /* flush any pending bitmap writes to
* disk before proceeding w/ I/O */ * disk before proceeding w/ I/O */
@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio); generic_make_request(bio);
bio = next; bio = next;
} }
rv = 1;
} else } else
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
return rv; }
static void md_kick_device(mddev_t *mddev)
{
blk_flush_plug(current);
md_wakeup_thread(mddev->thread);
} }
/* Barriers.... /* Barriers....
@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf)
/* Wait until no block IO is waiting */ /* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
conf->resync_lock, conf->resync_lock, md_kick_device(conf->mddev));
raid1_unplug(conf->mddev->queue));
/* block any new IO from starting */ /* block any new IO from starting */
conf->barrier++; conf->barrier++;
@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf)
/* Now wait for all pending IO to complete */ /* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier, wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH, !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
conf->resync_lock, conf->resync_lock, md_kick_device(conf->mddev));
raid1_unplug(conf->mddev->queue));
spin_unlock_irq(&conf->resync_lock); spin_unlock_irq(&conf->resync_lock);
} }
@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++; conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier, wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock, conf->resync_lock,
raid1_unplug(conf->mddev->queue)); md_kick_device(conf->mddev));
conf->nr_waiting--; conf->nr_waiting--;
} }
conf->nr_pending++; conf->nr_pending++;
@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1, conf->nr_pending == conf->nr_queued+1,
conf->resync_lock, conf->resync_lock,
({ flush_pending_writes(conf); ({ flush_pending_writes(conf);
raid1_unplug(conf->mddev->queue); })); md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock); spin_unlock_irq(&conf->resync_lock);
} }
static void unfreeze_array(conf_t *conf) static void unfreeze_array(conf_t *conf)
@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid1d snuck in to freeze_array */ /* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
if (do_sync) if (do_sync || !bitmap)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
return 0; return 0;
@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
unsigned long flags; unsigned long flags;
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
int unplug=0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
md_check_recovery(mddev); md_check_recovery(mddev);
@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
for (;;) { for (;;) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
unplug += flush_pending_writes(conf); flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) { if (list_empty(head)) {
@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev; mddev = r1_bio->mddev;
conf = mddev->private; conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) { if (test_bit(R1BIO_IsSync, &r1_bio->state))
sync_request_write(mddev, r1_bio); sync_request_write(mddev, r1_bio);
unplug = 1; else {
} else {
int disk; int disk;
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
bio->bi_end_io = raid1_end_read_request; bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync; bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio; bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio); generic_make_request(bio);
} }
} }
cond_resched(); cond_resched();
} }
if (unplug)
unplug_slaves(mddev);
} }
@ -2066,7 +2015,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
mddev->queue->unplug_fn = raid1_unplug;
mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev); md_integrity_register(mddev);

View File

@ -57,23 +57,16 @@
*/ */
#define NR_RAID10_BIOS 256 #define NR_RAID10_BIOS 256
static void unplug_slaves(mddev_t *mddev);
static void allow_barrier(conf_t *conf); static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf); static void lower_barrier(conf_t *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{ {
conf_t *conf = data; conf_t *conf = data;
r10bio_t *r10_bio;
int size = offsetof(struct r10bio_s, devs[conf->copies]); int size = offsetof(struct r10bio_s, devs[conf->copies]);
/* allocate a r10bio with room for raid_disks entries in the bios array */ /* allocate a r10bio with room for raid_disks entries in the bios array */
r10_bio = kzalloc(size, gfp_flags); return kzalloc(size, gfp_flags);
if (!r10_bio && conf->mddev)
unplug_slaves(conf->mddev);
return r10_bio;
} }
static void r10bio_pool_free(void *r10_bio, void *data) static void r10bio_pool_free(void *r10_bio, void *data)
@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
int nalloc; int nalloc;
r10_bio = r10bio_pool_alloc(gfp_flags, conf); r10_bio = r10bio_pool_alloc(gfp_flags, conf);
if (!r10_bio) { if (!r10_bio)
unplug_slaves(conf->mddev);
return NULL; return NULL;
}
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */ nalloc = conf->copies; /* resync */
@ -597,37 +588,6 @@ rb_out:
return disk; return disk;
} }
static void unplug_slaves(mddev_t *mddev)
{
conf_t *conf = mddev->private;
int i;
rcu_read_lock();
for (i=0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
}
rcu_read_unlock();
}
static void raid10_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
unplug_slaves(q->queuedata);
md_wakeup_thread(mddev->thread);
}
static int raid10_congested(void *data, int bits) static int raid10_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits)
return ret; return ret;
} }
static int flush_pending_writes(conf_t *conf) static void flush_pending_writes(conf_t *conf)
{ {
/* Any writes that have been queued but are awaiting /* Any writes that have been queued but are awaiting
* bitmap updates get flushed here. * bitmap updates get flushed here.
* We return 1 if any requests were actually submitted.
*/ */
int rv = 0;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) { if (conf->pending_bio_list.head) {
struct bio *bio; struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list); bio = bio_list_get(&conf->pending_bio_list);
/* Spinlock only taken to quiet a warning */
spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk /* flush any pending bitmap writes to disk
* before proceeding w/ I/O */ * before proceeding w/ I/O */
@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio); generic_make_request(bio);
bio = next; bio = next;
} }
rv = 1;
} else } else
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
return rv;
} }
static void md_kick_device(mddev_t *mddev)
{
blk_flush_plug(current);
md_wakeup_thread(mddev->thread);
}
/* Barriers.... /* Barriers....
* Sometimes we need to suspend IO while we do something else, * Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array. * either some resync/recovery, or reconfigure the array.
@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */ /* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
conf->resync_lock, conf->resync_lock, md_kick_device(conf->mddev));
raid10_unplug(conf->mddev->queue));
/* block any new IO from starting */ /* block any new IO from starting */
conf->barrier++; conf->barrier++;
@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
/* No wait for all pending IO to complete */ /* No wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier, wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH, !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
conf->resync_lock, conf->resync_lock, md_kick_device(conf->mddev));
raid10_unplug(conf->mddev->queue));
spin_unlock_irq(&conf->resync_lock); spin_unlock_irq(&conf->resync_lock);
} }
@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++; conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier, wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock, conf->resync_lock,
raid10_unplug(conf->mddev->queue)); md_kick_device(conf->mddev));
conf->nr_waiting--; conf->nr_waiting--;
} }
conf->nr_pending++; conf->nr_pending++;
@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1, conf->nr_pending == conf->nr_queued+1,
conf->resync_lock, conf->resync_lock,
({ flush_pending_writes(conf); ({ flush_pending_writes(conf);
raid10_unplug(conf->mddev->queue); })); md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock); spin_unlock_irq(&conf->resync_lock);
} }
@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid10d snuck in to freeze_array */ /* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
if (do_sync) if (do_sync || !mddev->bitmap)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
return 0; return 0;
@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
unsigned long flags; unsigned long flags;
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
int unplug=0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
md_check_recovery(mddev); md_check_recovery(mddev);
@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
for (;;) { for (;;) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
unplug += flush_pending_writes(conf); flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) { if (list_empty(head)) {
@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
mddev = r10_bio->mddev; mddev = r10_bio->mddev;
conf = mddev->private; conf = mddev->private;
if (test_bit(R10BIO_IsSync, &r10_bio->state)) { if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio); sync_request_write(mddev, r10_bio);
unplug = 1; else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
} else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
recovery_request_write(mddev, r10_bio); recovery_request_write(mddev, r10_bio);
unplug = 1; else {
} else {
int mirror; int mirror;
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it. * the block and we can fix it.
@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
bio->bi_rw = READ | do_sync; bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request; bio->bi_end_io = raid10_end_read_request;
unplug = 1;
generic_make_request(bio); generic_make_request(bio);
} }
} }
cond_resched(); cond_resched();
} }
if (unplug)
unplug_slaves(mddev);
} }
@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, size); md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size; mddev->resync_max_sectors = size;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested; mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;

View File

@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
return 0; return 0;
} }
static void unplug_slaves(mddev_t *mddev);
static struct stripe_head * static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector, get_active_stripe(raid5_conf_t *conf, sector_t sector,
int previous, int noblock, int noquiesce) int previous, int noblock, int noquiesce)
@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4) < (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked), || !conf->inactive_blocked),
conf->device_lock, conf->device_lock,
md_raid5_unplug_device(conf) md_raid5_kick_device(conf));
);
conf->inactive_blocked = 0; conf->inactive_blocked = 0;
} else } else
init_stripe(sh, sector, previous); init_stripe(sh, sector, previous);
@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
wait_event_lock_irq(conf->wait_for_stripe, wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list), !list_empty(&conf->inactive_list),
conf->device_lock, conf->device_lock,
unplug_slaves(conf->mddev) blk_flush_plug(current));
);
osh = get_free_stripe(conf); osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1); atomic_set(&nsh->count, 1);
@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
} }
} }
static void unplug_slaves(mddev_t *mddev) void md_raid5_kick_device(raid5_conf_t *conf)
{ {
raid5_conf_t *conf = mddev->private; blk_flush_plug(current);
int i;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
rcu_read_lock();
for (i = 0; i < devs; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
}
rcu_read_unlock();
}
void md_raid5_unplug_device(raid5_conf_t *conf)
{
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
if (plugger_remove_plug(&conf->plug)) {
conf->seq_flush++;
raid5_activate_delayed(conf); raid5_activate_delayed(conf);
}
md_wakeup_thread(conf->mddev->thread); md_wakeup_thread(conf->mddev->thread);
spin_unlock_irqrestore(&conf->device_lock, flags);
unplug_slaves(conf->mddev);
} }
EXPORT_SYMBOL_GPL(md_raid5_unplug_device); EXPORT_SYMBOL_GPL(md_raid5_kick_device);
static void raid5_unplug(struct plug_handle *plug) static void raid5_unplug(struct plug_handle *plug)
{ {
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
md_raid5_unplug_device(conf);
}
static void raid5_unplug_queue(struct request_queue *q) md_raid5_kick_device(conf);
{
mddev_t *mddev = q->queuedata;
md_raid5_unplug_device(mddev->private);
} }
int md_raid5_congested(mddev_t *mddev, int bits) int md_raid5_congested(mddev_t *mddev, int bits)
@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything * add failed due to overlap. Flush everything
* and wait a while * and wait a while
*/ */
md_raid5_unplug_device(conf); md_raid5_kick_device(conf);
release_stripe(sh); release_stripe(sh);
schedule(); schedule();
goto retry; goto retry;
@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (sector_nr >= max_sector) { if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */ /* just being told to finish up .. nothing much to do */
unplug_slaves(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf); end_reshape(conf);
@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all(); async_tx_issue_pending_all();
unplug_slaves(mddev);
pr_debug("--- raid5d inactive\n"); pr_debug("--- raid5d inactive\n");
} }
@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested; mddev->queue->backing_dev_info.congested_fn = raid5_congested;
mddev->queue->unplug_fn = raid5_unplug_queue; mddev->queue->queue_lock = &conf->device_lock;
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);

View File

@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
} }
extern int md_raid5_congested(mddev_t *mddev, int bits); extern int md_raid5_congested(mddev_t *mddev, int bits);
extern void md_raid5_unplug_device(raid5_conf_t *conf); extern void md_raid5_kick_device(raid5_conf_t *conf);
extern int raid5_set_cache_size(mddev_t *mddev, int size); extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif #endif

View File

@ -897,11 +897,7 @@ static void i2o_block_request_fn(struct request_queue *q)
{ {
struct request *req; struct request *req;
while (!blk_queue_plugged(q)) { while ((req = blk_peek_request(q)) != NULL) {
req = blk_peek_request(q);
if (!req)
break;
if (req->cmd_type == REQ_TYPE_FS) { if (req->cmd_type == REQ_TYPE_FS) {
struct i2o_block_delayed_request *dreq; struct i2o_block_delayed_request *dreq;
struct i2o_block_request *ireq = req->special; struct i2o_block_request *ireq = req->special;

View File

@ -55,7 +55,6 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
req = blk_fetch_request(q); req = blk_fetch_request(q);
mq->req = req; mq->req = req;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);

View File

@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
return; return;
} }
/* Now we try to fetch requests from the request queue */ /* Now we try to fetch requests from the request queue */
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY && if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) { rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev, DBF_DEV_EVENT(DBF_ERR, basedev,

View File

@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock); spin_lock_irq(&device->blk_data.request_queue_lock);
while ( while (
!blk_queue_plugged(queue) &&
blk_peek_request(queue) && blk_peek_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE nr_queued < TAPEBLOCK_MIN_REQUEUE
) { ) {

View File

@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
struct kmem_cache *scsi_sdb_cache; struct kmem_cache *scsi_sdb_cache;
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
* may prove this needs changing.
*/
#define SCSI_QUEUE_DELAY 3
static void scsi_run_queue(struct request_queue *q); static void scsi_run_queue(struct request_queue *q);
/* /*
@ -149,13 +156,6 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
/* /*
* Requeue this command. It will go before all other commands * Requeue this command. It will go before all other commands
* that are already in the queue. * that are already in the queue.
*
* NOTE: there is magic here about the way the queue is plugged if
* we have no outstanding commands.
*
* Although we *don't* plug the queue, we call the request
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
*/ */
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request); blk_requeue_request(q, cmd->request);
@ -1194,11 +1194,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
case BLKPREP_DEFER: case BLKPREP_DEFER:
/* /*
* If we defer, the blk_peek_request() returns NULL, but the * If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning * queue must be restarted, so we schedule a callback to happen
* command will automatically do that. * shortly.
*/ */
if (sdev->device_busy == 0) if (sdev->device_busy == 0)
blk_plug_device(q); blk_delay_queue(q, SCSI_QUEUE_DELAY);
break; break;
default: default:
req->cmd_flags |= REQ_DONTPREP; req->cmd_flags |= REQ_DONTPREP;
@ -1237,7 +1237,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
sdev_printk(KERN_INFO, sdev, sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n")); "unblocking device at zero depth\n"));
} else { } else {
blk_plug_device(q); blk_delay_queue(q, SCSI_QUEUE_DELAY);
return 0; return 0;
} }
} }
@ -1467,7 +1467,7 @@ static void scsi_request_fn(struct request_queue *q)
* the host is no longer able to accept any more requests. * the host is no longer able to accept any more requests.
*/ */
shost = sdev->host; shost = sdev->host;
while (!blk_queue_plugged(q)) { for (;;) {
int rtn; int rtn;
/* /*
* get next queueable request. We do this early to make sure * get next queueable request. We do this early to make sure
@ -1546,15 +1546,8 @@ static void scsi_request_fn(struct request_queue *q)
*/ */
rtn = scsi_dispatch_cmd(cmd); rtn = scsi_dispatch_cmd(cmd);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if(rtn) { if (rtn)
/* we're refusing the command; because of goto out_delay;
* the way locks get dropped, we need to
* check here if plugging is required */
if(sdev->device_busy == 0)
blk_plug_device(q);
break;
}
} }
goto out; goto out;
@ -1573,8 +1566,9 @@ static void scsi_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req); blk_requeue_request(q, req);
sdev->device_busy--; sdev->device_busy--;
out_delay:
if (sdev->device_busy == 0) if (sdev->device_busy == 0)
blk_plug_device(q); blk_delay_queue(q, SCSI_QUEUE_DELAY);
out: out:
/* must be careful here...if we trigger the ->remove() function /* must be careful here...if we trigger the ->remove() function
* we cannot be holding the q lock */ * we cannot be holding the q lock */

View File

@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
if (!get_device(dev)) if (!get_device(dev))
return; return;
while (!blk_queue_plugged(q)) { while (1) {
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
break; break;

View File

@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int ret; int ret;
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) { while ((req = blk_fetch_request(q)) != NULL) {
req = blk_fetch_request(q);
if (!req)
break;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler; handler = to_sas_internal(shost->transportt)->f->smp_handler;

View File

@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task)
{ {
struct se_device *dev = task->task_se_cmd->se_dev; struct se_device *dev = task->task_se_cmd->se_dev;
struct iblock_req *req = IBLOCK_REQ(task); struct iblock_req *req = IBLOCK_REQ(task);
struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
struct bio *bio = req->ib_bio, *nbio = NULL; struct bio *bio = req->ib_bio, *nbio = NULL;
struct blk_plug plug;
int rw; int rw;
if (task->task_data_direction == DMA_TO_DEVICE) { if (task->task_data_direction == DMA_TO_DEVICE) {
@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task)
rw = READ; rw = READ;
} }
blk_start_plug(&plug);
while (bio) { while (bio) {
nbio = bio->bi_next; nbio = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio); submit_bio(rw, bio);
bio = nbio; bio = nbio;
} }
blk_finish_plug(&plug);
if (q->unplug_fn)
q->unplug_fn(q);
return PYX_TRANSPORT_SENT_TO_TRANSPORT; return PYX_TRANSPORT_SENT_TO_TRANSPORT;
} }

View File

@ -73,7 +73,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = { static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage, .readpage = adfs_readpage,
.writepage = adfs_writepage, .writepage = adfs_writepage,
.sync_page = block_sync_page,
.write_begin = adfs_write_begin, .write_begin = adfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = _adfs_bmap .bmap = _adfs_bmap

View File

@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = { const struct address_space_operations affs_aops = {
.readpage = affs_readpage, .readpage = affs_readpage,
.writepage = affs_writepage, .writepage = affs_writepage,
.sync_page = block_sync_page,
.write_begin = affs_write_begin, .write_begin = affs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = _affs_bmap .bmap = _affs_bmap
@ -786,7 +785,6 @@ out:
const struct address_space_operations affs_aops_ofs = { const struct address_space_operations affs_aops_ofs = {
.readpage = affs_readpage_ofs, .readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs, //.writepage = affs_writepage_ofs,
//.sync_page = affs_sync_page_ofs,
.write_begin = affs_write_begin_ofs, .write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs .write_end = affs_write_end_ofs
}; };

View File

@ -34,8 +34,6 @@
#include <linux/security.h> #include <linux/security.h>
#include <linux/eventfd.h> #include <linux/eventfd.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock); static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head); static LIST_HEAD(fput_head);
#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
struct aio_batch_entry {
struct hlist_node list;
struct address_space *mapping;
};
mempool_t *abe_pool;
static void aio_kick_handler(struct work_struct *); static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *); static void aio_queue_work(struct kioctx *);
@ -86,8 +76,7 @@ static int __init aio_setup(void)
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = create_workqueue("aio"); aio_wq = create_workqueue("aio");
abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); BUG_ON(!aio_wq);
BUG_ON(!aio_wq || !abe_pool);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
return 0; return 0;
} }
static void aio_batch_add(struct address_space *mapping,
struct hlist_head *batch_hash)
{
struct aio_batch_entry *abe;
struct hlist_node *pos;
unsigned bucket;
bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
if (abe->mapping == mapping)
return;
}
abe = mempool_alloc(abe_pool, GFP_KERNEL);
/*
* we should be using igrab here, but
* we don't want to hammer on the global
* inode spinlock just to take an extra
* reference on a file that we must already
* have a reference to.
*
* When we're called, we always have a reference
* on the file, so we must always have a reference
* on the inode, so ihold() is safe here.
*/
ihold(mapping->host);
abe->mapping = mapping;
hlist_add_head(&abe->list, &batch_hash[bucket]);
return;
}
static void aio_batch_free(struct hlist_head *batch_hash)
{
struct aio_batch_entry *abe;
struct hlist_node *pos, *n;
int i;
for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
blk_run_address_space(abe->mapping);
iput(abe->mapping->host);
hlist_del(&abe->list);
mempool_free(abe, abe_pool);
}
}
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, struct hlist_head *batch_hash, struct iocb *iocb, bool compat)
bool compat)
{ {
struct kiocb *req; struct kiocb *req;
struct file *file; struct file *file;
@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
; ;
} }
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
if (req->ki_opcode == IOCB_CMD_PREAD ||
req->ki_opcode == IOCB_CMD_PREADV ||
req->ki_opcode == IOCB_CMD_PWRITE ||
req->ki_opcode == IOCB_CMD_PWRITEV)
aio_batch_add(file->f_mapping, batch_hash);
aio_put_req(req); /* drop extra ref to req */ aio_put_req(req); /* drop extra ref to req */
return 0; return 0;
@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
struct kioctx *ctx; struct kioctx *ctx;
long ret = 0; long ret = 0;
int i; int i;
struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; struct blk_plug plug;
if (unlikely(nr < 0)) if (unlikely(nr < 0))
return -EINVAL; return -EINVAL;
@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
return -EINVAL; return -EINVAL;
} }
blk_start_plug(&plug);
/* /*
* AKPM: should this return a partial result if some of the IOs were * AKPM: should this return a partial result if some of the IOs were
* successfully submitted? * successfully submitted?
@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
break; break;
} }
ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); ret = io_submit_one(ctx, user_iocb, &tmp, compat);
if (ret) if (ret)
break; break;
} }
aio_batch_free(batch_hash); blk_finish_plug(&plug);
put_ioctx(ctx); put_ioctx(ctx);
return i ? i : ret; return i ? i : ret;

View File

@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
static const struct address_space_operations befs_aops = { static const struct address_space_operations befs_aops = {
.readpage = befs_readpage, .readpage = befs_readpage,
.sync_page = block_sync_page,
.bmap = befs_bmap, .bmap = befs_bmap,
}; };

View File

@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = { const struct address_space_operations bfs_aops = {
.readpage = bfs_readpage, .readpage = bfs_readpage,
.writepage = bfs_writepage, .writepage = bfs_writepage,
.sync_page = block_sync_page,
.write_begin = bfs_write_begin, .write_begin = bfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = bfs_bmap, .bmap = bfs_bmap,

View File

@ -1529,7 +1529,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
static const struct address_space_operations def_blk_aops = { static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage, .readpage = blkdev_readpage,
.writepage = blkdev_writepage, .writepage = blkdev_writepage,
.sync_page = block_sync_page,
.write_begin = blkdev_write_begin, .write_begin = blkdev_write_begin,
.write_end = blkdev_write_end, .write_end = blkdev_write_end,
.writepages = generic_writepages, .writepages = generic_writepages,

View File

@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
.writepages = btree_writepages, .writepages = btree_writepages,
.releasepage = btree_releasepage, .releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage, .invalidatepage = btree_invalidatepage,
.sync_page = block_sync_page,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage, .migratepage = btree_migratepage,
#endif #endif
@ -1330,82 +1329,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
return ret; return ret;
} }
/*
* this unplugs every device on the box, and it is only used when page
* is null
*/
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
struct btrfs_device *device;
struct btrfs_fs_info *info;
info = (struct btrfs_fs_info *)bdi->unplug_io_data;
list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
if (!device->bdev)
continue;
bdi = blk_get_backing_dev_info(device->bdev);
if (bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, page);
}
}
static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
struct inode *inode;
struct extent_map_tree *em_tree;
struct extent_map *em;
struct address_space *mapping;
u64 offset;
/* the generic O_DIRECT read code does this */
if (1 || !page) {
__unplug_io_fn(bdi, page);
return;
}
/*
* page->mapping may change at any time. Get a consistent copy
* and use that for everything below
*/
smp_mb();
mapping = page->mapping;
if (!mapping)
return;
inode = mapping->host;
/*
* don't do the expensive searching for a small number of
* devices
*/
if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
__unplug_io_fn(bdi, page);
return;
}
offset = page_offset(page);
em_tree = &BTRFS_I(inode)->extent_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
read_unlock(&em_tree->lock);
if (!em) {
__unplug_io_fn(bdi, page);
return;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
__unplug_io_fn(bdi, page);
return;
}
offset = offset - em->start;
btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
em->block_start + offset, page);
free_extent_map(em);
}
/* /*
* If this fails, caller must call bdi_destroy() to get rid of the * If this fails, caller must call bdi_destroy() to get rid of the
* bdi again. * bdi again.
@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
return err; return err;
bdi->ra_pages = default_backing_dev_info.ra_pages; bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->unplug_io_fn = btrfs_unplug_io_fn;
bdi->unplug_io_data = info;
bdi->congested_fn = btrfs_congested_fn; bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info; bdi->congested_data = info;
return 0; return 0;

View File

@ -2188,7 +2188,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0; unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC_PLUG; write_flags = WRITE_SYNC;
else else
write_flags = WRITE; write_flags = WRITE;

View File

@ -7340,7 +7340,6 @@ static const struct address_space_operations btrfs_aops = {
.writepage = btrfs_writepage, .writepage = btrfs_writepage,
.writepages = btrfs_writepages, .writepages = btrfs_writepages,
.readpages = btrfs_readpages, .readpages = btrfs_readpages,
.sync_page = block_sync_page,
.direct_IO = btrfs_direct_IO, .direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage, .invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage, .releasepage = btrfs_releasepage,

View File

@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
struct bio *cur; struct bio *cur;
int again = 0; int again = 0;
unsigned long num_run; unsigned long num_run;
unsigned long num_sync_run;
unsigned long batch_run = 0; unsigned long batch_run = 0;
unsigned long limit; unsigned long limit;
unsigned long last_waited = 0; unsigned long last_waited = 0;
@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
limit = btrfs_async_submit_limit(fs_info); limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3; limit = limit * 2 / 3;
/* we want to make sure that every time we switch from the sync
* list to the normal list, we unplug
*/
num_sync_run = 0;
loop: loop:
spin_lock(&device->io_lock); spin_lock(&device->io_lock);
@ -223,15 +217,6 @@ loop_lock:
spin_unlock(&device->io_lock); spin_unlock(&device->io_lock);
/*
* if we're doing the regular priority list, make sure we unplug
* for any high prio bios we've sent down
*/
if (pending_bios == &device->pending_bios && num_sync_run > 0) {
num_sync_run = 0;
blk_run_backing_dev(bdi, NULL);
}
while (pending) { while (pending) {
rmb(); rmb();
@ -259,19 +244,11 @@ loop_lock:
BUG_ON(atomic_read(&cur->bi_cnt) == 0); BUG_ON(atomic_read(&cur->bi_cnt) == 0);
if (cur->bi_rw & REQ_SYNC)
num_sync_run++;
submit_bio(cur->bi_rw, cur); submit_bio(cur->bi_rw, cur);
num_run++; num_run++;
batch_run++; batch_run++;
if (need_resched()) { if (need_resched())
if (num_sync_run) {
blk_run_backing_dev(bdi, NULL);
num_sync_run = 0;
}
cond_resched(); cond_resched();
}
/* /*
* we made progress, there is more work to do and the bdi * we made progress, there is more work to do and the bdi
@ -304,13 +281,8 @@ loop_lock:
* against it before looping * against it before looping
*/ */
last_waited = ioc->last_waited; last_waited = ioc->last_waited;
if (need_resched()) { if (need_resched())
if (num_sync_run) {
blk_run_backing_dev(bdi, NULL);
num_sync_run = 0;
}
cond_resched(); cond_resched();
}
continue; continue;
} }
spin_lock(&device->io_lock); spin_lock(&device->io_lock);
@ -323,22 +295,6 @@ loop_lock:
} }
} }
if (num_sync_run) {
num_sync_run = 0;
blk_run_backing_dev(bdi, NULL);
}
/*
* IO has already been through a long path to get here. Checksumming,
* async helper threads, perhaps compression. We've done a pretty
* good job of collecting a batch of IO and should just unplug
* the device right away.
*
* This will help anyone who is waiting on the IO, they might have
* already unplugged, but managed to do so before the bio they
* cared about found its way down here.
*/
blk_run_backing_dev(bdi, NULL);
cond_resched(); cond_resched();
if (again) if (again)
goto loop; goto loop;
@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret, struct btrfs_multi_bio **multi_ret,
int mirror_num, struct page *unplug_page) int mirror_num)
{ {
struct extent_map *em; struct extent_map *em;
struct map_lookup *map; struct map_lookup *map;
@ -2987,11 +2943,6 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length); em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em && unplug_page) {
kfree(multi);
return 0;
}
if (!em) { if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n", printk(KERN_CRIT "unable to find logical %llu len %llu\n",
(unsigned long long)logical, (unsigned long long)logical,
@ -3047,13 +2998,13 @@ again:
*length = em->len - offset; *length = em->len - offset;
} }
if (!multi_ret && !unplug_page) if (!multi_ret)
goto out; goto out;
num_stripes = 1; num_stripes = 1;
stripe_index = 0; stripe_index = 0;
if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
if (unplug_page || (rw & REQ_WRITE)) if (rw & REQ_WRITE)
num_stripes = map->num_stripes; num_stripes = map->num_stripes;
else if (mirror_num) else if (mirror_num)
stripe_index = mirror_num - 1; stripe_index = mirror_num - 1;
@ -3075,7 +3026,7 @@ again:
stripe_index = do_div(stripe_nr, factor); stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes; stripe_index *= map->sub_stripes;
if (unplug_page || (rw & REQ_WRITE)) if (rw & REQ_WRITE)
num_stripes = map->sub_stripes; num_stripes = map->sub_stripes;
else if (mirror_num) else if (mirror_num)
stripe_index += mirror_num - 1; stripe_index += mirror_num - 1;
@ -3095,22 +3046,10 @@ again:
BUG_ON(stripe_index >= map->num_stripes); BUG_ON(stripe_index >= map->num_stripes);
for (i = 0; i < num_stripes; i++) { for (i = 0; i < num_stripes; i++) {
if (unplug_page) {
struct btrfs_device *device;
struct backing_dev_info *bdi;
device = map->stripes[stripe_index].dev;
if (device->bdev) {
bdi = blk_get_backing_dev_info(device->bdev);
if (bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, unplug_page);
}
} else {
multi->stripes[i].physical = multi->stripes[i].physical =
map->stripes[stripe_index].physical + map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len; stripe_offset + stripe_nr * map->stripe_len;
multi->stripes[i].dev = map->stripes[stripe_index].dev; multi->stripes[i].dev = map->stripes[stripe_index].dev;
}
stripe_index++; stripe_index++;
} }
if (multi_ret) { if (multi_ret) {
@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
struct btrfs_multi_bio **multi_ret, int mirror_num) struct btrfs_multi_bio **multi_ret, int mirror_num)
{ {
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
mirror_num, NULL); mirror_num);
} }
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
return 0; return 0;
} }
int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
u64 logical, struct page *page)
{
u64 length = PAGE_CACHE_SIZE;
return __btrfs_map_block(map_tree, READ, logical, &length,
NULL, 0, page);
}
static void end_bio_multi_stripe(struct bio *bio, int err) static void end_bio_multi_stripe(struct bio *bio, int err)
{ {
struct btrfs_multi_bio *multi = bio->bi_private; struct btrfs_multi_bio *multi = bio->bi_private;

View File

@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
} }
EXPORT_SYMBOL(init_buffer); EXPORT_SYMBOL(init_buffer);
static int sync_buffer(void *word) static int sleep_on_buffer(void *word)
{ {
struct block_device *bd;
struct buffer_head *bh
= container_of(word, struct buffer_head, b_state);
smp_mb();
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule(); io_schedule();
return 0; return 0;
} }
void __lock_buffer(struct buffer_head *bh) void __lock_buffer(struct buffer_head *bh)
{ {
wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL(__lock_buffer); EXPORT_SYMBOL(__lock_buffer);
@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
*/ */
void __wait_on_buffer(struct buffer_head * bh) void __wait_on_buffer(struct buffer_head * bh)
{ {
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL(__wait_on_buffer); EXPORT_SYMBOL(__wait_on_buffer);
@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{ {
struct buffer_head *bh; struct buffer_head *bh;
struct list_head tmp; struct list_head tmp;
struct address_space *mapping, *prev_mapping = NULL; struct address_space *mapping;
int err = 0, err2; int err = 0, err2;
INIT_LIST_HEAD(&tmp); INIT_LIST_HEAD(&tmp);
@ -775,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older * still in flight on potentially older
* contents. * contents.
*/ */
write_dirty_buffer(bh, WRITE_SYNC_PLUG); write_dirty_buffer(bh, WRITE_SYNC);
/* /*
* Kick off IO for the previous mapping. Note * Kick off IO for the previous mapping. Note
@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* wait_on_buffer() will do that for us * wait_on_buffer() will do that for us
* through sync_buffer(). * through sync_buffer().
*/ */
if (prev_mapping && prev_mapping != mapping)
blk_run_address_space(prev_mapping);
prev_mapping = mapping;
brelse(bh); brelse(bh);
spin_lock(lock); spin_lock(lock);
} }
@ -1614,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* prevents this contention from occurring. * prevents this contention from occurring.
* *
* If block_write_full_page() is called with wbc->sync_mode == * If block_write_full_page() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
* causes the writes to be flagged as synchronous writes, but the * causes the writes to be flagged as synchronous writes.
* block device queue will NOT be unplugged, since usually many pages
* will be pushed to the out before the higher-level caller actually
* waits for the writes to be completed. The various wait functions,
* such as wait_on_writeback_range() will ultimately call sync_page()
* which will ultimately call blk_run_backing_dev(), which will end up
* unplugging the device queue.
*/ */
static int __block_write_full_page(struct inode *inode, struct page *page, static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc, get_block_t *get_block, struct writeback_control *wbc,
@ -1634,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
const unsigned blocksize = 1 << inode->i_blkbits; const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0; int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ? int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC_PLUG : WRITE); WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
@ -3138,17 +3120,6 @@ out:
} }
EXPORT_SYMBOL(try_to_free_buffers); EXPORT_SYMBOL(try_to_free_buffers);
void block_sync_page(struct page *page)
{
struct address_space *mapping;
smp_mb();
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
}
EXPORT_SYMBOL(block_sync_page);
/* /*
* There are no bdflush tunables left. But distributions are * There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here. * still running obsolete flush daemons, so we terminate them here.

View File

@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
return rc; return rc;
} }
/* static void cifs_sync_page(struct page *page)
{
struct address_space *mapping;
struct inode *inode;
unsigned long index = page->index;
unsigned int rpages = 0;
int rc = 0;
cFYI(1, "sync page %p", page);
mapping = page->mapping;
if (!mapping)
return 0;
inode = mapping->host;
if (!inode)
return; */
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
#if 0
if (rc < 0)
return rc;
return 0;
#endif
} */
/* /*
* As file closes, flush all cached write data for this inode checking * As file closes, flush all cached write data for this inode checking
* for write behind errors. * for write behind errors.
@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
.set_page_dirty = __set_page_dirty_nobuffers, .set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page, .releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page, .invalidatepage = cifs_invalidate_page,
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */ /* .direct_IO = */
}; };
@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.set_page_dirty = __set_page_dirty_nobuffers, .set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page, .releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page, .invalidatepage = cifs_invalidate_page,
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */ /* .direct_IO = */
}; };

View File

@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
((rw & READ) || (dio->result == dio->size))) ((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED; ret = -EIOCBQUEUED;
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED)
/* All IO is now issued, send it on its way */
blk_run_address_space(inode->i_mapping);
dio_await_completion(dio); dio_await_completion(dio);
}
/* /*
* Sync will always be dropping the final ref and completing the * Sync will always be dropping the final ref and completing the
@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct dio *dio; struct dio *dio;
if (rw & WRITE) if (rw & WRITE)
rw = WRITE_ODIRECT_PLUG; rw = WRITE_ODIRECT;
if (bdev) if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));

View File

@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
} }
static const struct address_space_operations efs_aops = { static const struct address_space_operations efs_aops = {
.readpage = efs_readpage, .readpage = efs_readpage,
.sync_page = block_sync_page,
.bmap = _efs_bmap .bmap = _efs_bmap
}; };

View File

@ -795,7 +795,6 @@ const struct address_space_operations exofs_aops = {
.direct_IO = NULL, /* TODO: Should be trivial to do */ .direct_IO = NULL, /* TODO: Should be trivial to do */
/* With these NULL has special meaning or default is not exported */ /* With these NULL has special meaning or default is not exported */
.sync_page = NULL,
.get_xip_mem = NULL, .get_xip_mem = NULL,
.migratepage = NULL, .migratepage = NULL,
.launder_page = NULL, .launder_page = NULL,

View File

@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage, .readpage = ext2_readpage,
.readpages = ext2_readpages, .readpages = ext2_readpages,
.writepage = ext2_writepage, .writepage = ext2_writepage,
.sync_page = block_sync_page,
.write_begin = ext2_write_begin, .write_begin = ext2_write_begin,
.write_end = ext2_write_end, .write_end = ext2_write_end,
.bmap = ext2_bmap, .bmap = ext2_bmap,
@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage, .readpage = ext2_readpage,
.readpages = ext2_readpages, .readpages = ext2_readpages,
.writepage = ext2_nobh_writepage, .writepage = ext2_nobh_writepage,
.sync_page = block_sync_page,
.write_begin = ext2_nobh_write_begin, .write_begin = ext2_nobh_write_begin,
.write_end = nobh_write_end, .write_end = nobh_write_end,
.bmap = ext2_bmap, .bmap = ext2_bmap,

View File

@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
.readpage = ext3_readpage, .readpage = ext3_readpage,
.readpages = ext3_readpages, .readpages = ext3_readpages,
.writepage = ext3_ordered_writepage, .writepage = ext3_ordered_writepage,
.sync_page = block_sync_page,
.write_begin = ext3_write_begin, .write_begin = ext3_write_begin,
.write_end = ext3_ordered_write_end, .write_end = ext3_ordered_write_end,
.bmap = ext3_bmap, .bmap = ext3_bmap,
@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
.readpage = ext3_readpage, .readpage = ext3_readpage,
.readpages = ext3_readpages, .readpages = ext3_readpages,
.writepage = ext3_writeback_writepage, .writepage = ext3_writeback_writepage,
.sync_page = block_sync_page,
.write_begin = ext3_write_begin, .write_begin = ext3_write_begin,
.write_end = ext3_writeback_write_end, .write_end = ext3_writeback_write_end,
.bmap = ext3_bmap, .bmap = ext3_bmap,
@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
.readpage = ext3_readpage, .readpage = ext3_readpage,
.readpages = ext3_readpages, .readpages = ext3_readpages,
.writepage = ext3_journalled_writepage, .writepage = ext3_journalled_writepage,
.sync_page = block_sync_page,
.write_begin = ext3_write_begin, .write_begin = ext3_write_begin,
.write_end = ext3_journalled_write_end, .write_end = ext3_journalled_write_end,
.set_page_dirty = ext3_journalled_set_page_dirty, .set_page_dirty = ext3_journalled_set_page_dirty,

View File

@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end, .write_end = ext4_ordered_write_end,
.bmap = ext4_bmap, .bmap = ext4_bmap,
@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end, .write_end = ext4_writeback_write_end,
.bmap = ext4_bmap, .bmap = ext4_bmap,
@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage, .readpage = ext4_readpage,
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_writepage, .writepage = ext4_writepage,
.sync_page = block_sync_page,
.write_begin = ext4_write_begin, .write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end, .write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty, .set_page_dirty = ext4_journalled_set_page_dirty,
@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
.readpages = ext4_readpages, .readpages = ext4_readpages,
.writepage = ext4_writepage, .writepage = ext4_writepage,
.writepages = ext4_da_writepages, .writepages = ext4_da_writepages,
.sync_page = block_sync_page,
.write_begin = ext4_da_write_begin, .write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end, .write_end = ext4_da_write_end,
.bmap = ext4_bmap, .bmap = ext4_bmap,

View File

@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio; io->io_bio = bio;
io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
WRITE_SYNC_PLUG : WRITE);
io->io_next_block = bh->b_blocknr; io->io_next_block = bh->b_blocknr;
return 0; return 0;
} }

View File

@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
.readpages = fat_readpages, .readpages = fat_readpages,
.writepage = fat_writepage, .writepage = fat_writepage,
.writepages = fat_writepages, .writepages = fat_writepages,
.sync_page = block_sync_page,
.write_begin = fat_write_begin, .write_begin = fat_write_begin,
.write_end = fat_write_end, .write_end = fat_write_end,
.direct_IO = fat_direct_IO, .direct_IO = fat_direct_IO,

View File

@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = { const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage, .readpage = vxfs_readpage,
.bmap = vxfs_bmap, .bmap = vxfs_bmap,
.sync_page = block_sync_page,
}; };
inline void inline void

View File

@ -868,7 +868,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
fc->bdi.name = "fuse"; fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */ /* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;

View File

@ -1116,7 +1116,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writeback_writepages, .writepages = gfs2_writeback_writepages,
.readpage = gfs2_readpage, .readpage = gfs2_readpage,
.readpages = gfs2_readpages, .readpages = gfs2_readpages,
.sync_page = block_sync_page,
.write_begin = gfs2_write_begin, .write_begin = gfs2_write_begin,
.write_end = gfs2_write_end, .write_end = gfs2_write_end,
.bmap = gfs2_bmap, .bmap = gfs2_bmap,
@ -1132,7 +1131,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepage = gfs2_ordered_writepage, .writepage = gfs2_ordered_writepage,
.readpage = gfs2_readpage, .readpage = gfs2_readpage,
.readpages = gfs2_readpages, .readpages = gfs2_readpages,
.sync_page = block_sync_page,
.write_begin = gfs2_write_begin, .write_begin = gfs2_write_begin,
.write_end = gfs2_write_end, .write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty, .set_page_dirty = gfs2_set_page_dirty,
@ -1150,7 +1148,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages, .writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage, .readpage = gfs2_readpage,
.readpages = gfs2_readpages, .readpages = gfs2_readpages,
.sync_page = block_sync_page,
.write_begin = gfs2_write_begin, .write_begin = gfs2_write_begin,
.write_end = gfs2_write_end, .write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty, .set_page_dirty = gfs2_set_page_dirty,

View File

@ -121,7 +121,7 @@ __acquires(&sdp->sd_log_lock)
lock_buffer(bh); lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} else { } else {
unlock_buffer(bh); unlock_buffer(bh);
brelse(bh); brelse(bh);
@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
lock_buffer(bh); lock_buffer(bh);
if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} else { } else {
unlock_buffer(bh); unlock_buffer(bh);
brelse(bh); brelse(bh);

View File

@ -200,7 +200,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
} }
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
n = 0; n = 0;
@ -210,7 +210,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
lock_buffer(bd2->bd_bh); lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
if (++n >= num) if (++n >= num)
break; break;
@ -352,7 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
sdp->sd_log_num_revoke--; sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
bh = gfs2_log_get_buf(sdp); bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data; mh = (struct gfs2_meta_header *)bh->b_data;
@ -369,7 +369,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
} }
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} }
static void revoke_lo_before_scan(struct gfs2_jdesc *jd, static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@ -571,7 +571,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
ptr = bh_log_ptr(bh); ptr = bh_log_ptr(bh);
get_bh(bh); get_bh(bh);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
while(!list_empty(list)) { while(!list_empty(list)) {
bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
@ -597,7 +597,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
} else { } else {
bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
} }
submit_bh(WRITE_SYNC_PLUG, bh1); submit_bh(WRITE_SYNC, bh1);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
ptr += 2; ptr += 2;
} }

View File

@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int nr_underway = 0; int nr_underway = 0;
int write_op = REQ_META | int write_op = REQ_META |
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page)); BUG_ON(!page_has_buffers(page));
@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
const struct address_space_operations gfs2_meta_aops = { const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage, .writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage, .releasepage = gfs2_releasepage,
.sync_page = block_sync_page,
}; };
/** /**

View File

@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = { const struct address_space_operations hfs_btree_aops = {
.readpage = hfs_readpage, .readpage = hfs_readpage,
.writepage = hfs_writepage, .writepage = hfs_writepage,
.sync_page = block_sync_page,
.write_begin = hfs_write_begin, .write_begin = hfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = hfs_bmap, .bmap = hfs_bmap,
@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
const struct address_space_operations hfs_aops = { const struct address_space_operations hfs_aops = {
.readpage = hfs_readpage, .readpage = hfs_readpage,
.writepage = hfs_writepage, .writepage = hfs_writepage,
.sync_page = block_sync_page,
.write_begin = hfs_write_begin, .write_begin = hfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = hfs_bmap, .bmap = hfs_bmap,

View File

@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = { const struct address_space_operations hfsplus_btree_aops = {
.readpage = hfsplus_readpage, .readpage = hfsplus_readpage,
.writepage = hfsplus_writepage, .writepage = hfsplus_writepage,
.sync_page = block_sync_page,
.write_begin = hfsplus_write_begin, .write_begin = hfsplus_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = hfsplus_bmap, .bmap = hfsplus_bmap,
@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
const struct address_space_operations hfsplus_aops = { const struct address_space_operations hfsplus_aops = {
.readpage = hfsplus_readpage, .readpage = hfsplus_readpage,
.writepage = hfsplus_writepage, .writepage = hfsplus_writepage,
.sync_page = block_sync_page,
.write_begin = hfsplus_write_begin, .write_begin = hfsplus_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = hfsplus_bmap, .bmap = hfsplus_bmap,

View File

@ -120,7 +120,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations hpfs_aops = { const struct address_space_operations hpfs_aops = {
.readpage = hpfs_readpage, .readpage = hpfs_readpage,
.writepage = hpfs_writepage, .writepage = hpfs_writepage,
.sync_page = block_sync_page,
.write_begin = hpfs_write_begin, .write_begin = hpfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = _hpfs_bmap .bmap = _hpfs_bmap

View File

@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations isofs_aops = { static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage, .readpage = isofs_readpage,
.sync_page = block_sync_page,
.bmap = _isofs_bmap .bmap = _isofs_bmap
}; };

View File

@ -333,7 +333,7 @@ void journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us. * instead we rely on sync_buffer() doing the unplug for us.
*/ */
if (commit_transaction->t_synchronous_commit) if (commit_transaction->t_synchronous_commit)
write_op = WRITE_SYNC_PLUG; write_op = WRITE_SYNC;
spin_lock(&commit_transaction->t_handle_lock); spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) { while (commit_transaction->t_updates) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);

View File

@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER && if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal, !JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else else
ret = submit_bh(WRITE_SYNC_PLUG, bh); ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh; *cbh = bh;
return ret; return ret;
@ -369,7 +369,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us. * instead we rely on sync_buffer() doing the unplug for us.
*/ */
if (commit_transaction->t_synchronous_commit) if (commit_transaction->t_synchronous_commit)
write_op = WRITE_SYNC_PLUG; write_op = WRITE_SYNC;
trace_jbd2_commit_locking(journal, commit_transaction); trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait; stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_locked = jiffies; stats.run.rs_locked = jiffies;

View File

@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
.readpages = jfs_readpages, .readpages = jfs_readpages,
.writepage = jfs_writepage, .writepage = jfs_writepage,
.writepages = jfs_writepages, .writepages = jfs_writepages,
.sync_page = block_sync_page,
.write_begin = jfs_write_begin, .write_begin = jfs_write_begin,
.write_end = nobh_write_end, .write_end = nobh_write_end,
.bmap = jfs_bmap, .bmap = jfs_bmap,

View File

@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
const struct address_space_operations jfs_metapage_aops = { const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage, .readpage = metapage_readpage,
.writepage = metapage_writepage, .writepage = metapage_writepage,
.sync_page = block_sync_page,
.releasepage = metapage_releasepage, .releasepage = metapage_releasepage,
.invalidatepage = metapage_invalidatepage, .invalidatepage = metapage_invalidatepage,
.set_page_dirty = __set_page_dirty_nobuffers, .set_page_dirty = __set_page_dirty_nobuffers,

View File

@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_end_io = request_complete; bio.bi_end_io = request_complete;
submit_bio(rw, &bio); submit_bio(rw, &bio);
generic_unplug_device(bdev_get_queue(bdev));
wait_for_completion(&complete); wait_for_completion(&complete);
return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
} }
@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
} }
len = PAGE_ALIGN(len); len = PAGE_ALIGN(len);
__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
} }

View File

@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = { static const struct address_space_operations minix_aops = {
.readpage = minix_readpage, .readpage = minix_readpage,
.writepage = minix_writepage, .writepage = minix_writepage,
.sync_page = block_sync_page,
.write_begin = minix_write_begin, .write_begin = minix_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = minix_bmap .bmap = minix_bmap

View File

@ -364,6 +364,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0; sector_t last_block_in_bio = 0;
struct buffer_head map_bh; struct buffer_head map_bh;
unsigned long first_logical_block = 0; unsigned long first_logical_block = 0;
struct blk_plug plug;
blk_start_plug(&plug);
map_bh.b_state = 0; map_bh.b_state = 0;
map_bh.b_size = 0; map_bh.b_size = 0;
@ -385,6 +388,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
BUG_ON(!list_empty(pages)); BUG_ON(!list_empty(pages));
if (bio) if (bio)
mpage_bio_submit(READ, bio); mpage_bio_submit(READ, bio);
blk_finish_plug(&plug);
return 0; return 0;
} }
EXPORT_SYMBOL(mpage_readpages); EXPORT_SYMBOL(mpage_readpages);
@ -666,8 +670,11 @@ int
mpage_writepages(struct address_space *mapping, mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block) struct writeback_control *wbc, get_block_t get_block)
{ {
struct blk_plug plug;
int ret; int ret;
blk_start_plug(&plug);
if (!get_block) if (!get_block)
ret = generic_writepages(mapping, wbc); ret = generic_writepages(mapping, wbc);
else { else {
@ -682,6 +689,7 @@ mpage_writepages(struct address_space *mapping,
if (mpd.bio) if (mpd.bio)
mpage_bio_submit(WRITE, mpd.bio); mpage_bio_submit(WRITE, mpd.bio);
} }
blk_finish_plug(&plug);
return ret; return ret;
} }
EXPORT_SYMBOL(mpage_writepages); EXPORT_SYMBOL(mpage_writepages);

View File

@ -34,15 +34,10 @@
#include "page.h" #include "page.h"
#include "btnode.h" #include "btnode.h"
static const struct address_space_operations def_btnode_aops = {
.sync_page = block_sync_page,
};
void nilfs_btnode_cache_init(struct address_space *btnc, void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi) struct backing_dev_info *bdi)
{ {
nilfs_mapping_init(btnc, bdi, &def_btnode_aops); nilfs_mapping_init(btnc, bdi);
} }
void nilfs_btnode_cache_clear(struct address_space *btnc) void nilfs_btnode_cache_clear(struct address_space *btnc)

View File

@ -49,7 +49,6 @@
#include "ifile.h" #include "ifile.h"
static const struct address_space_operations def_gcinode_aops = { static const struct address_space_operations def_gcinode_aops = {
.sync_page = block_sync_page,
}; };
/* /*

View File

@ -262,7 +262,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
const struct address_space_operations nilfs_aops = { const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage, .writepage = nilfs_writepage,
.readpage = nilfs_readpage, .readpage = nilfs_readpage,
.sync_page = block_sync_page,
.writepages = nilfs_writepages, .writepages = nilfs_writepages,
.set_page_dirty = nilfs_set_page_dirty, .set_page_dirty = nilfs_set_page_dirty,
.readpages = nilfs_readpages, .readpages = nilfs_readpages,

View File

@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = { static const struct address_space_operations def_mdt_aops = {
.writepage = nilfs_mdt_write_page, .writepage = nilfs_mdt_write_page,
.sync_page = block_sync_page,
}; };
static const struct inode_operations def_mdt_iops; static const struct inode_operations def_mdt_iops;
@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
} }
static const struct address_space_operations shadow_map_aops = {
.sync_page = block_sync_page,
};
/** /**
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file * @inode: inode of the metadata file
@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers); INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data); address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); nilfs_mapping_init(&shadow->frozen_data, bdi);
address_space_init_once(&shadow->frozen_btnodes); address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
mi->mi_shadow = shadow; mi->mi_shadow = shadow;
return 0; return 0;
} }

View File

@ -493,15 +493,14 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
} }
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi, struct backing_dev_info *bdi)
const struct address_space_operations *aops)
{ {
mapping->host = NULL; mapping->host = NULL;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL; mapping->assoc_mapping = NULL;
mapping->backing_dev_info = bdi; mapping->backing_dev_info = bdi;
mapping->a_ops = aops; mapping->a_ops = NULL;
} }
/* /*

View File

@ -62,8 +62,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *); void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi, struct backing_dev_info *bdi);
const struct address_space_operations *aops);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode, unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk, sector_t start_blk,

View File

@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following * Last BIO is always sent through the following
* submission. * submission.
*/ */
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
} }

View File

@ -1543,8 +1543,6 @@ err_out:
*/ */
const struct address_space_operations ntfs_aops = { const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */ .readpage = ntfs_readpage, /* Fill page with data. */
.sync_page = block_sync_page, /* Currently, just unplugs the
disk request queue. */
#ifdef NTFS_RW #ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */ .writepage = ntfs_writepage, /* Write dirty page to disk. */
#endif /* NTFS_RW */ #endif /* NTFS_RW */
@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
*/ */
const struct address_space_operations ntfs_mst_aops = { const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */ .readpage = ntfs_readpage, /* Fill page with data. */
.sync_page = block_sync_page, /* Currently, just unplugs the
disk request queue. */
#ifdef NTFS_RW #ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */ .writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty

View File

@ -698,8 +698,7 @@ lock_retry_remap:
"uptodate! Unplugging the disk queue " "uptodate! Unplugging the disk queue "
"and rescheduling."); "and rescheduling.");
get_bh(tbh); get_bh(tbh);
blk_run_address_space(mapping); io_schedule();
schedule();
put_bh(tbh); put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh))) if (unlikely(!buffer_uptodate(tbh)))
goto read_err; goto read_err;

View File

@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = {
.write_begin = ocfs2_write_begin, .write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end, .write_end = ocfs2_write_end,
.bmap = ocfs2_bmap, .bmap = ocfs2_bmap,
.sync_page = block_sync_page,
.direct_IO = ocfs2_direct_IO, .direct_IO = ocfs2_direct_IO,
.invalidatepage = ocfs2_invalidatepage, .invalidatepage = ocfs2_invalidatepage,
.releasepage = ocfs2_releasepage, .releasepage = ocfs2_releasepage,

Some files were not shown because too many files have changed in this diff Show More