block: add queue flag for SSD/non-rotational devices

We don't want to idle in AS/CFQ if the device doesn't have a seek
penalty. So add a QUEUE_FLAG_NONROT to indicate a non-rotational
device, low level drivers should set this flag upon discovery of
an SSD or similar device type.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Jens Axboe 2008-09-24 13:03:33 +02:00
parent 9e49184c82
commit a68bbddba4
3 changed files with 14 additions and 0 deletions

View File

@ -745,6 +745,12 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
*/ */
static int as_can_anticipate(struct as_data *ad, struct request *rq) static int as_can_anticipate(struct as_data *ad, struct request *rq)
{ {
/*
* SSD device without seek penalty, disable idling
*/
if (blk_queue_nonrot(ad->q))
return 0;
if (!ad->io_context) if (!ad->io_context)
/* /*
* Last request submitted was a write * Last request submitted was a write

View File

@ -878,6 +878,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
struct cfq_io_context *cic; struct cfq_io_context *cic;
unsigned long sl; unsigned long sl;
/*
* SSD device without seek penalty, disable idling
*/
if (blk_queue_nonrot(cfqd->queue))
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq)); WARN_ON(cfq_cfqq_slice_new(cfqq));

View File

@ -442,6 +442,7 @@ struct request_queue
#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
static inline int queue_is_locked(struct request_queue *q) static inline int queue_is_locked(struct request_queue *q)
{ {
@ -547,6 +548,7 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \ #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)