Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Remove the extra check in queue_requests_store block, blk-sysfs: Fix an err return path in blk_register_queue() block: remove stale kerneldoc member from __blk_run_queue() block: get rid of QUEUE_FLAG_REENTER cfq-iosched: read_lock() does not always imply rcu_read_lock() block: kill blk_flush_plug_list() export
This commit is contained in:
commit
73aa86825f
|
@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
|
|||
/**
|
||||
* __blk_run_queue - run a single device queue
|
||||
* @q: The queue to run
|
||||
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
|
||||
*
|
||||
* Description:
|
||||
* See @blk_run_queue. This variant must be called with the queue lock
|
||||
|
@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
|
|||
if (unlikely(blk_queue_stopped(q)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Only recurse once to avoid overrunning the stack, let the unplug
|
||||
* handling reinvoke the handler shortly if we already got there.
|
||||
*/
|
||||
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||
q->request_fn(q);
|
||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||
} else
|
||||
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||
q->request_fn(q);
|
||||
}
|
||||
EXPORT_SYMBOL(__blk_run_queue);
|
||||
|
||||
|
@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q)
|
|||
if (likely(!blk_queue_stopped(q)))
|
||||
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_run_queue_async);
|
||||
|
||||
/**
|
||||
* blk_run_queue - run a single device queue
|
||||
|
@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_flush_plug_list);
|
||||
|
||||
void blk_finish_plug(struct blk_plug *plug)
|
||||
{
|
||||
|
|
|
@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
|||
|
||||
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
|
||||
blk_set_queue_full(q, BLK_RW_SYNC);
|
||||
} else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
|
||||
} else {
|
||||
blk_clear_queue_full(q, BLK_RW_SYNC);
|
||||
wake_up(&rl->wait[BLK_RW_SYNC]);
|
||||
}
|
||||
|
||||
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
|
||||
blk_set_queue_full(q, BLK_RW_ASYNC);
|
||||
} else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
|
||||
} else {
|
||||
blk_clear_queue_full(q, BLK_RW_ASYNC);
|
||||
wake_up(&rl->wait[BLK_RW_ASYNC]);
|
||||
}
|
||||
|
@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
|
|||
return ret;
|
||||
|
||||
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
blk_trace_remove_sysfs(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_ADD);
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
|
|||
void blk_delete_timer(struct request *);
|
||||
void blk_add_timer(struct request *);
|
||||
void __generic_unplug_device(struct request_queue *);
|
||||
void blk_run_queue_async(struct request_queue *q);
|
||||
|
||||
/*
|
||||
* Internal atomic flags for request handling
|
||||
|
|
|
@ -2581,20 +2581,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
|||
cfq_put_cfqg(cfqg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must always be called with the rcu_read_lock() held
|
||||
*/
|
||||
static void
|
||||
__call_for_each_cic(struct io_context *ioc,
|
||||
void (*func)(struct io_context *, struct cfq_io_context *))
|
||||
{
|
||||
struct cfq_io_context *cic;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
|
||||
func(ioc, cic);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call func for each cic attached to this ioc.
|
||||
*/
|
||||
|
@ -2602,8 +2588,14 @@ static void
|
|||
call_for_each_cic(struct io_context *ioc,
|
||||
void (*func)(struct io_context *, struct cfq_io_context *))
|
||||
{
|
||||
struct cfq_io_context *cic;
|
||||
struct hlist_node *n;
|
||||
|
||||
rcu_read_lock();
|
||||
__call_for_each_cic(ioc, func);
|
||||
|
||||
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
|
||||
func(ioc, cic);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
|
|||
* should be ok to iterate over the known list, we will see all cic's
|
||||
* since no new ones are added.
|
||||
*/
|
||||
__call_for_each_cic(ioc, cic_free_func);
|
||||
call_for_each_cic(ioc, cic_free_func);
|
||||
}
|
||||
|
||||
static void cfq_put_cooperator(struct cfq_queue *cfqq)
|
||||
|
|
|
@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
|
|||
list_splice_init(&shost->starved_list, &starved_list);
|
||||
|
||||
while (!list_empty(&starved_list)) {
|
||||
int flagset;
|
||||
|
||||
/*
|
||||
* As long as shost is accepting commands and we have
|
||||
* starved queues, call blk_run_queue. scsi_request_fn
|
||||
|
@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
|
|||
continue;
|
||||
}
|
||||
|
||||
spin_unlock(shost->host_lock);
|
||||
|
||||
spin_lock(sdev->request_queue->queue_lock);
|
||||
flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
|
||||
!test_bit(QUEUE_FLAG_REENTER,
|
||||
&sdev->request_queue->queue_flags);
|
||||
if (flagset)
|
||||
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
|
||||
__blk_run_queue(sdev->request_queue);
|
||||
if (flagset)
|
||||
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
|
||||
spin_unlock(sdev->request_queue->queue_lock);
|
||||
|
||||
spin_lock(shost->host_lock);
|
||||
blk_run_queue_async(sdev->request_queue);
|
||||
}
|
||||
/* put any unprocessed entries back */
|
||||
list_splice(&starved_list, &shost->starved_list);
|
||||
|
|
|
@ -3816,28 +3816,17 @@ fail_host_msg:
|
|||
static void
|
||||
fc_bsg_goose_queue(struct fc_rport *rport)
|
||||
{
|
||||
int flagset;
|
||||
unsigned long flags;
|
||||
|
||||
if (!rport->rqst_q)
|
||||
return;
|
||||
|
||||
/*
|
||||
* This get/put dance makes no sense
|
||||
*/
|
||||
get_device(&rport->dev);
|
||||
|
||||
spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
|
||||
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
|
||||
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
|
||||
if (flagset)
|
||||
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
|
||||
__blk_run_queue(rport->rqst_q);
|
||||
if (flagset)
|
||||
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
|
||||
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
|
||||
|
||||
blk_run_queue_async(rport->rqst_q);
|
||||
put_device(&rport->dev);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
|
||||
* @q: rport request queue
|
||||
|
|
|
@ -388,20 +388,19 @@ struct request_queue
|
|||
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
||||
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
||||
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
|
||||
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
|
||||
#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
|
||||
#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
|
||||
#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
|
||||
#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
|
||||
#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
|
||||
#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
|
||||
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
|
||||
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
|
||||
#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
|
||||
#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
|
||||
#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
||||
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
|
||||
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
|
||||
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
|
||||
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
|
||||
#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
|
||||
#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
|
||||
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
|
||||
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
|
||||
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
|
@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
|
|||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue(struct request_queue *q);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern void blk_run_queue_async(struct request_queue *q);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, void __user *, unsigned long,
|
||||
gfp_t);
|
||||
|
|
Loading…
Reference in New Issue