Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.6-block

Manual fixup for merge with Jens' "Suspend support for libata", commit
ID 9b84754866.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Linus Torvalds 2006-01-06 09:01:25 -08:00
commit d99cf9d679
50 changed files with 707 additions and 656 deletions

View File

@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
The generic i/o scheduler would make sure that it places the barrier request and The generic i/o scheduler would make sure that it places the barrier request and
all other requests coming after it after all the previous requests in the all other requests coming after it after all the previous requests in the
queue. Barriers may be implemented in different ways depending on the queue. Barriers may be implemented in different ways depending on the
driver. A SCSI driver for example could make use of ordered tags to driver. For more details regarding I/O barriers, please read barrier.txt
preserve the necessary ordering with a lower impact on throughput. For IDE in this directory.
this might be two sync cache flush: a pre and post flush when encountering
a barrier write.
There is a provision for queues to indicate what kind of barriers they
can provide. This is as of yet unmerged, details will be added here once it
is in the kernel.
1.2.2 Request Priority/Latency 1.2.2 Request Priority/Latency

View File

@ -182,6 +182,9 @@ struct as_rq {
static kmem_cache_t *arq_pool; static kmem_cache_t *arq_pool;
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);
/* /*
* IO Context helper functions * IO Context helper functions
*/ */
@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
* existing request against the same sector), which can happen when using * existing request against the same sector), which can happen when using
* direct IO, then return the alias. * direct IO, then return the alias.
*/ */
static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq) static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{ {
struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
return NULL; return NULL;
} }
static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct as_rq *alias;
while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
as_move_to_dispatch(ad, alias);
as_antic_stop(ad);
}
}
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{ {
if (!ON_RB(&arq->rb_node)) { if (!ON_RB(&arq->rb_node)) {
@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
/* /*
* take it off the sort and fifo list, add to dispatch queue * take it off the sort and fifo list, add to dispatch queue
*/ */
while (!list_empty(&rq->queuelist)) {
struct request *__rq = list_entry_rq(rq->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);
list_del(&__rq->queuelist);
elv_dispatch_add_tail(ad->q, __rq);
if (__arq->io_context && __arq->io_context->aic)
atomic_inc(&__arq->io_context->aic->nr_dispatched);
WARN_ON(__arq->state != AS_RQ_QUEUED);
__arq->state = AS_RQ_DISPATCHED;
ad->nr_dispatched++;
}
as_remove_queued_request(ad->q, rq); as_remove_queued_request(ad->q, rq);
WARN_ON(arq->state != AS_RQ_QUEUED); WARN_ON(arq->state != AS_RQ_QUEUED);
@ -1325,42 +1321,6 @@ fifo_expired:
return 1; return 1;
} }
/*
* Add arq to a list behind alias
*/
static inline void
as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
struct as_rq *alias)
{
struct request *req = arq->request;
struct list_head *insert = alias->request->queuelist.prev;
/*
* Transfer list of aliases
*/
while (!list_empty(&req->queuelist)) {
struct request *__rq = list_entry_rq(req->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);
list_move_tail(&__rq->queuelist, &alias->request->queuelist);
WARN_ON(__arq->state != AS_RQ_QUEUED);
}
/*
* Another request with the same start sector on the rbtree.
* Link this request to that sector. They are untangled in
* as_move_to_dispatch
*/
list_add(&arq->request->queuelist, insert);
/*
* Don't want to have to handle merges.
*/
as_del_arq_hash(arq);
arq->request->flags |= REQ_NOMERGE;
}
/* /*
* add arq to rbtree and fifo * add arq to rbtree and fifo
*/ */
@ -1368,7 +1328,6 @@ static void as_add_request(request_queue_t *q, struct request *rq)
{ {
struct as_data *ad = q->elevator->elevator_data; struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq); struct as_rq *arq = RQ_DATA(rq);
struct as_rq *alias;
int data_dir; int data_dir;
arq->state = AS_RQ_NEW; arq->state = AS_RQ_NEW;
@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
atomic_inc(&arq->io_context->aic->nr_queued); atomic_inc(&arq->io_context->aic->nr_queued);
} }
alias = as_add_arq_rb(ad, arq); as_add_arq_rb(ad, arq);
if (!alias) { if (rq_mergeable(arq->request))
/* as_add_arq_hash(ad, arq);
* set expire time (only used for reads) and add to fifo list
*/
arq->expires = jiffies + ad->fifo_expire[data_dir];
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
if (rq_mergeable(arq->request)) /*
as_add_arq_hash(ad, arq); * set expire time (only used for reads) and add to fifo list
as_update_arq(ad, arq); /* keep state machine up to date */ */
arq->expires = jiffies + ad->fifo_expire[data_dir];
} else { list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
as_add_aliased_request(ad, arq, alias);
/*
* have we been anticipating this request?
* or does it come from the same process as the one we are
* anticipating for?
*/
if (ad->antic_status == ANTIC_WAIT_REQ
|| ad->antic_status == ANTIC_WAIT_NEXT) {
if (as_can_break_anticipation(ad, arq))
as_antic_stop(ad);
}
}
as_update_arq(ad, arq); /* keep state machine up to date */
arq->state = AS_RQ_QUEUED; arq->state = AS_RQ_QUEUED;
} }
@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
* if the merge was a front merge, we need to reposition request * if the merge was a front merge, we need to reposition request
*/ */
if (rq_rb_key(req) != arq->rb_key) { if (rq_rb_key(req) != arq->rb_key) {
struct as_rq *alias, *next_arq = NULL;
if (ad->next_arq[arq->is_sync] == arq)
next_arq = as_find_next_arq(ad, arq);
/*
* Note! We should really be moving any old aliased requests
* off this request and try to insert them into the rbtree. We
* currently don't bother. Ditto the next function.
*/
as_del_arq_rb(ad, arq); as_del_arq_rb(ad, arq);
if ((alias = as_add_arq_rb(ad, arq))) { as_add_arq_rb(ad, arq);
list_del_init(&arq->fifo);
as_add_aliased_request(ad, arq, alias);
if (next_arq)
ad->next_arq[arq->is_sync] = next_arq;
}
/* /*
* Note! At this stage of this and the next function, our next * Note! At this stage of this and the next function, our next
* request may not be optimal - eg the request may have "grown" * request may not be optimal - eg the request may have "grown"
@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
as_add_arq_hash(ad, arq); as_add_arq_hash(ad, arq);
if (rq_rb_key(req) != arq->rb_key) { if (rq_rb_key(req) != arq->rb_key) {
struct as_rq *alias, *next_arq = NULL;
if (ad->next_arq[arq->is_sync] == arq)
next_arq = as_find_next_arq(ad, arq);
as_del_arq_rb(ad, arq); as_del_arq_rb(ad, arq);
if ((alias = as_add_arq_rb(ad, arq))) { as_add_arq_rb(ad, arq);
list_del_init(&arq->fifo);
as_add_aliased_request(ad, arq, alias);
if (next_arq)
ad->next_arq[arq->is_sync] = next_arq;
}
} }
/* /*
@ -1609,18 +1527,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
} }
} }
/*
* Transfer list of aliases
*/
while (!list_empty(&next->queuelist)) {
struct request *__rq = list_entry_rq(next->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);
list_move_tail(&__rq->queuelist, &req->queuelist);
WARN_ON(__arq->state != AS_RQ_QUEUED);
}
/* /*
* kill knowledge of next, this one is a goner * kill knowledge of next, this one is a goner
*/ */

View File

@ -25,15 +25,15 @@
/* /*
* tunables * tunables
*/ */
static int cfq_quantum = 4; /* max queue in one round of service */ static const int cfq_quantum = 4; /* max queue in one round of service */
static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
static int cfq_slice_sync = HZ / 10; static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25; static int cfq_slice_async = HZ / 25;
static int cfq_slice_async_rq = 2; static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 100; static int cfq_slice_idle = HZ / 100;
#define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_IDLE_GRACE (HZ / 10)
@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
/* /*
* disable queueing at the driver/hardware level * disable queueing at the driver/hardware level
*/ */
static int cfq_max_depth = 2; static const int cfq_max_depth = 2;
/* /*
* for the hash of cfqq inside the cfqd * for the hash of cfqq inside the cfqd

View File

@ -19,10 +19,10 @@
/* /*
* See Documentation/block/deadline-iosched.txt * See Documentation/block/deadline-iosched.txt
*/ */
static int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static int writes_starved = 2; /* max times reads can starve a write */ static const int writes_starved = 2; /* max times reads can starve a write */
static int fifo_batch = 16; /* # of sequential requests treated as one static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */ by the above parameters. For throughput. */
static const int deadline_hash_shift = 5; static const int deadline_hash_shift = 5;

View File

@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
rq->flags &= ~REQ_STARTED; rq->flags &= ~REQ_STARTED;
/* __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
* if this is the flush, requeue the original instead and drop the flush
*/
if (rq->flags & REQ_BAR_FLUSH) {
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
rq = rq->end_io_data;
}
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
} }
static void elv_drain_elevator(request_queue_t *q) static void elv_drain_elevator(request_queue_t *q)
@ -332,7 +324,18 @@ static void elv_drain_elevator(request_queue_t *q)
void __elv_add_request(request_queue_t *q, struct request *rq, int where, void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
struct list_head *pos;
unsigned ordseq;
if (q->ordcolor)
rq->flags |= REQ_ORDERED_COLOR;
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/*
* toggle ordered color
*/
q->ordcolor ^= 1;
/* /*
* barriers implicitly indicate back insertion * barriers implicitly indicate back insertion
*/ */
@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
q->elevator->ops->elevator_add_req_fn(q, rq); q->elevator->ops->elevator_add_req_fn(q, rq);
break; break;
case ELEVATOR_INSERT_REQUEUE:
/*
* If ordered flush isn't in progress, we do front
* insertion; otherwise, requests should be requeued
* in ordseq order.
*/
rq->flags |= REQ_SOFTBARRIER;
if (q->ordseq == 0) {
list_add(&rq->queuelist, &q->queue_head);
break;
}
ordseq = blk_ordered_req_seq(rq);
list_for_each(pos, &q->queue_head) {
struct request *pos_rq = list_entry_rq(pos);
if (ordseq <= blk_ordered_req_seq(pos_rq))
break;
}
list_add_tail(&rq->queuelist, pos);
break;
default: default:
printk(KERN_ERR "%s: bad insertion point %d\n", printk(KERN_ERR "%s: bad insertion point %d\n",
__FUNCTION__, where); __FUNCTION__, where);
@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
{ {
struct request *rq; struct request *rq;
if (unlikely(list_empty(&q->queue_head) && while (1) {
!q->elevator->ops->elevator_dispatch_fn(q, 0))) while (!list_empty(&q->queue_head)) {
return NULL; rq = list_entry_rq(q->queue_head.next);
if (blk_do_ordered(q, &rq))
return rq;
}
rq = list_entry_rq(q->queue_head.next); if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL;
/*
* if this is a barrier write and the device has to issue a
* flush sequence to support it, check how far we are
*/
if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
if (q->ordered == QUEUE_ORDERED_FLUSH &&
!blk_barrier_preflush(rq))
rq = blk_start_pre_flush(q, rq);
} }
return rq;
} }
struct request *elv_next_request(request_queue_t *q) struct request *elv_next_request(request_queue_t *q)
@ -498,7 +516,7 @@ struct request *elv_next_request(request_queue_t *q)
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET; rq->flags |= REQ_QUIET;
end_that_request_chunk(rq, 0, nr_bytes); end_that_request_chunk(rq, 0, nr_bytes);
end_that_request_last(rq); end_that_request_last(rq, 0);
} else { } else {
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
ret); ret);
@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
* request is released from the driver, io must be done * request is released from the driver, io must be done
*/ */
if (blk_account_rq(rq)) { if (blk_account_rq(rq)) {
struct request *first_rq = list_entry_rq(q->queue_head.next);
q->in_flight--; q->in_flight--;
/*
* Check if the queue is waiting for fs requests to be
* drained for flush sequence.
*/
if (q->ordseq && q->in_flight == 0 &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
q->request_fn(q);
}
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq); e->ops->elevator_completed_req_fn(q, rq);
} }

View File

@ -36,6 +36,8 @@
static void blk_unplug_work(void *data); static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data); static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(request_queue_t *q, struct bio *bio);
/* /*
* For the allocated request tables * For the allocated request tables
@ -288,8 +290,8 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
/** /**
* blk_queue_ordered - does this queue support ordered writes * blk_queue_ordered - does this queue support ordered writes
* @q: the request queue * @q: the request queue
* @flag: see below * @ordered: one of QUEUE_ORDERED_*
* *
* Description: * Description:
* For journalled file systems, doing ordered writes on a commit * For journalled file systems, doing ordered writes on a commit
@ -298,28 +300,30 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
* feature should call this function and indicate so. * feature should call this function and indicate so.
* *
**/ **/
void blk_queue_ordered(request_queue_t *q, int flag) int blk_queue_ordered(request_queue_t *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{ {
switch (flag) { if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
case QUEUE_ORDERED_NONE: prepare_flush_fn == NULL) {
if (q->flush_rq) printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
kmem_cache_free(request_cachep, q->flush_rq); return -EINVAL;
q->flush_rq = NULL;
q->ordered = flag;
break;
case QUEUE_ORDERED_TAG:
q->ordered = flag;
break;
case QUEUE_ORDERED_FLUSH:
q->ordered = flag;
if (!q->flush_rq)
q->flush_rq = kmem_cache_alloc(request_cachep,
GFP_KERNEL);
break;
default:
printk("blk_queue_ordered: bad value %d\n", flag);
break;
} }
if (ordered != QUEUE_ORDERED_NONE &&
ordered != QUEUE_ORDERED_DRAIN &&
ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
ordered != QUEUE_ORDERED_DRAIN_FUA &&
ordered != QUEUE_ORDERED_TAG &&
ordered != QUEUE_ORDERED_TAG_FLUSH &&
ordered != QUEUE_ORDERED_TAG_FUA) {
printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
return -EINVAL;
}
q->next_ordered = ordered;
q->prepare_flush_fn = prepare_flush_fn;
return 0;
} }
EXPORT_SYMBOL(blk_queue_ordered); EXPORT_SYMBOL(blk_queue_ordered);
@ -344,167 +348,265 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
/* /*
* Cache flushing for ordered writes handling * Cache flushing for ordered writes handling
*/ */
static void blk_pre_flush_end_io(struct request *flush_rq) inline unsigned blk_ordered_cur_seq(request_queue_t *q)
{
if (!q->ordseq)
return 0;
return 1 << ffz(q->ordseq);
}
unsigned blk_ordered_req_seq(struct request *rq)
{ {
struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q; request_queue_t *q = rq->q;
elv_completed_request(q, flush_rq); BUG_ON(q->ordseq == 0);
rq->flags |= REQ_BAR_PREFLUSH; if (rq == &q->pre_flush_rq)
return QUEUE_ORDSEQ_PREFLUSH;
if (rq == &q->bar_rq)
return QUEUE_ORDSEQ_BAR;
if (rq == &q->post_flush_rq)
return QUEUE_ORDSEQ_POSTFLUSH;
if (!flush_rq->errors) if ((rq->flags & REQ_ORDERED_COLOR) ==
elv_requeue_request(q, rq); (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
else { return QUEUE_ORDSEQ_DRAIN;
q->end_flush_fn(q, flush_rq); else
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); return QUEUE_ORDSEQ_DONE;
q->request_fn(q);
}
} }
static void blk_post_flush_end_io(struct request *flush_rq) void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
{ {
struct request *rq = flush_rq->end_io_data; struct request *rq;
request_queue_t *q = rq->q; int uptodate;
elv_completed_request(q, flush_rq); if (error && !q->orderr)
q->orderr = error;
rq->flags |= REQ_BAR_POSTFLUSH; BUG_ON(q->ordseq & seq);
q->ordseq |= seq;
q->end_flush_fn(q, flush_rq); if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); return;
q->request_fn(q);
}
struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
{
struct request *flush_rq = q->flush_rq;
BUG_ON(!blk_barrier_rq(rq));
if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
return NULL;
rq_init(q, flush_rq);
flush_rq->elevator_private = NULL;
flush_rq->flags = REQ_BAR_FLUSH;
flush_rq->rq_disk = rq->rq_disk;
flush_rq->rl = NULL;
/* /*
* prepare_flush returns 0 if no flush is needed, just mark both * Okay, sequence complete.
* pre and post flush as done in that case
*/ */
if (!q->prepare_flush_fn(q, flush_rq)) { rq = q->orig_bar_rq;
rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH; uptodate = q->orderr ? q->orderr : 1;
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
return rq; q->ordseq = 0;
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
end_that_request_last(rq, uptodate);
}
static void pre_flush_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
}
static void bar_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
}
static void post_flush_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
static void queue_flush(request_queue_t *q, unsigned which)
{
struct request *rq;
rq_end_io_fn *end_io;
if (which == QUEUE_ORDERED_PREFLUSH) {
rq = &q->pre_flush_rq;
end_io = pre_flush_end_io;
} else {
rq = &q->post_flush_rq;
end_io = post_flush_end_io;
} }
rq_init(q, rq);
rq->flags = REQ_HARDBARRIER;
rq->elevator_private = NULL;
rq->rq_disk = q->bar_rq.rq_disk;
rq->rl = NULL;
rq->end_io = end_io;
q->prepare_flush_fn(q, rq);
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
static inline struct request *start_ordered(request_queue_t *q,
struct request *rq)
{
q->bi_size = 0;
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
/* /*
* some drivers dequeue requests right away, some only after io * Prep proxy barrier request.
* completion. make sure the request is dequeued.
*/ */
if (!list_empty(&rq->queuelist)) blkdev_dequeue_request(rq);
blkdev_dequeue_request(rq); q->orig_bar_rq = rq;
rq = &q->bar_rq;
rq_init(q, rq);
rq->flags = bio_data_dir(q->orig_bar_rq->bio);
rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
rq->elevator_private = NULL;
rq->rl = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;
flush_rq->end_io_data = rq; /*
flush_rq->end_io = blk_pre_flush_end_io; * Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
* request gets inbetween ordered sequence.
*/
if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
__elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
return flush_rq;
if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
queue_flush(q, QUEUE_ORDERED_PREFLUSH);
rq = &q->pre_flush_rq;
} else
q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
q->ordseq |= QUEUE_ORDSEQ_DRAIN;
else
rq = NULL;
return rq;
} }
static void blk_start_post_flush(request_queue_t *q, struct request *rq) int blk_do_ordered(request_queue_t *q, struct request **rqp)
{ {
struct request *flush_rq = q->flush_rq; struct request *rq = *rqp, *allowed_rq;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
BUG_ON(!blk_barrier_rq(rq)); if (!q->ordseq) {
if (!is_barrier)
return 1;
rq_init(q, flush_rq); if (q->next_ordered != QUEUE_ORDERED_NONE) {
flush_rq->elevator_private = NULL; *rqp = start_ordered(q, rq);
flush_rq->flags = REQ_BAR_FLUSH; return 1;
flush_rq->rq_disk = rq->rq_disk; } else {
flush_rq->rl = NULL; /*
* This can happen when the queue switches to
if (q->prepare_flush_fn(q, flush_rq)) { * ORDERED_NONE while this request is on it.
flush_rq->end_io_data = rq; */
flush_rq->end_io = blk_post_flush_end_io; blkdev_dequeue_request(rq);
end_that_request_first(rq, -EOPNOTSUPP,
__elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); rq->hard_nr_sectors);
q->request_fn(q); end_that_request_last(rq, -EOPNOTSUPP);
*rqp = NULL;
return 0;
}
} }
}
static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq, if (q->ordered & QUEUE_ORDERED_TAG) {
int sectors) if (is_barrier && rq != &q->bar_rq)
{ *rqp = NULL;
if (sectors > rq->nr_sectors) return 1;
sectors = rq->nr_sectors;
rq->nr_sectors -= sectors;
return rq->nr_sectors;
}
static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
int sectors, int queue_locked)
{
if (q->ordered != QUEUE_ORDERED_FLUSH)
return 0;
if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
return 0;
if (blk_barrier_postflush(rq))
return 0;
if (!blk_check_end_barrier(q, rq, sectors)) {
unsigned long flags = 0;
if (!queue_locked)
spin_lock_irqsave(q->queue_lock, flags);
blk_start_post_flush(q, rq);
if (!queue_locked)
spin_unlock_irqrestore(q->queue_lock, flags);
} }
switch (blk_ordered_cur_seq(q)) {
case QUEUE_ORDSEQ_PREFLUSH:
allowed_rq = &q->pre_flush_rq;
break;
case QUEUE_ORDSEQ_BAR:
allowed_rq = &q->bar_rq;
break;
case QUEUE_ORDSEQ_POSTFLUSH:
allowed_rq = &q->post_flush_rq;
break;
default:
allowed_rq = NULL;
break;
}
if (rq != allowed_rq &&
(blk_fs_request(rq) || rq == &q->pre_flush_rq ||
rq == &q->post_flush_rq))
*rqp = NULL;
return 1; return 1;
} }
/** static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
* blk_complete_barrier_rq - complete possible barrier request
* @q: the request queue for the device
* @rq: the request
* @sectors: number of sectors to complete
*
* Description:
* Used in driver end_io handling to determine whether to postpone
* completion of a barrier request until a post flush has been done. This
* is the unlocked variant, used if the caller doesn't already hold the
* queue lock.
**/
int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
{ {
return __blk_complete_barrier_rq(q, rq, sectors, 0); request_queue_t *q = bio->bi_private;
} struct bio_vec *bvec;
EXPORT_SYMBOL(blk_complete_barrier_rq); int i;
/** /*
* blk_complete_barrier_rq_locked - complete possible barrier request * This is dry run, restore bio_sector and size. We'll finish
* @q: the request queue for the device * this request again with the original bi_end_io after an
* @rq: the request * error occurs or post flush is complete.
* @sectors: number of sectors to complete */
* q->bi_size += bytes;
* Description:
* See blk_complete_barrier_rq(). This variant must be used if the caller if (bio->bi_size)
* holds the queue lock. return 1;
**/
int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq, /* Rewind bvec's */
int sectors) bio->bi_idx = 0;
{ bio_for_each_segment(bvec, bio, i) {
return __blk_complete_barrier_rq(q, rq, sectors, 1); bvec->bv_len += bvec->bv_offset;
bvec->bv_offset = 0;
}
/* Reset bio */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_size = q->bi_size;
bio->bi_sector -= (q->bi_size >> 9);
q->bi_size = 0;
return 0;
}
static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
request_queue_t *q = rq->q;
bio_end_io_t *endio;
void *private;
if (&q->bar_rq != rq)
return 0;
/*
* Okay, this is the barrier request in progress, dry finish it.
*/
if (error && !q->orderr)
q->orderr = error;
endio = bio->bi_end_io;
private = bio->bi_private;
bio->bi_end_io = flush_dry_bio_endio;
bio->bi_private = q;
bio_endio(bio, nbytes, error);
bio->bi_end_io = endio;
bio->bi_private = private;
return 1;
} }
EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
/** /**
* blk_queue_bounce_limit - set bounce buffer limit for queue * blk_queue_bounce_limit - set bounce buffer limit for queue
@ -1039,12 +1141,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);
static char *rq_flags[] = { static const char * const rq_flags[] = {
"REQ_RW", "REQ_RW",
"REQ_FAILFAST", "REQ_FAILFAST",
"REQ_SORTED", "REQ_SORTED",
"REQ_SOFTBARRIER", "REQ_SOFTBARRIER",
"REQ_HARDBARRIER", "REQ_HARDBARRIER",
"REQ_FUA",
"REQ_CMD", "REQ_CMD",
"REQ_NOMERGE", "REQ_NOMERGE",
"REQ_STARTED", "REQ_STARTED",
@ -1064,6 +1167,7 @@ static char *rq_flags[] = {
"REQ_PM_SUSPEND", "REQ_PM_SUSPEND",
"REQ_PM_RESUME", "REQ_PM_RESUME",
"REQ_PM_SHUTDOWN", "REQ_PM_SHUTDOWN",
"REQ_ORDERED_COLOR",
}; };
void blk_dump_rq_flags(struct request *rq, char *msg) void blk_dump_rq_flags(struct request *rq, char *msg)
@ -1641,8 +1745,6 @@ void blk_cleanup_queue(request_queue_t * q)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
blk_queue_ordered(q, QUEUE_ORDERED_NONE);
kmem_cache_free(requestq_cachep, q); kmem_cache_free(requestq_cachep, q);
} }
@ -1667,8 +1769,6 @@ static int blk_init_free_list(request_queue_t *q)
return 0; return 0;
} }
static int __make_request(request_queue_t *, struct bio *);
request_queue_t *blk_alloc_queue(gfp_t gfp_mask) request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
{ {
return blk_alloc_queue_node(gfp_mask, -1); return blk_alloc_queue_node(gfp_mask, -1);
@ -1908,40 +2008,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
{ {
struct request *rq = NULL; struct request *rq = NULL;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct io_context *ioc = current_io_context(GFP_ATOMIC); struct io_context *ioc = NULL;
int priv; int may_queue, priv;
if (rl->count[rw]+1 >= q->nr_requests) { may_queue = elv_may_queue(q, rw, bio);
/* if (may_queue == ELV_MQUEUE_NO)
* The queue will fill after this allocation, so set it as goto rq_starved;
* full, and mark this process as "batching". This process
* will be allowed to complete a batch of requests, others if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
* will be blocked. if (rl->count[rw]+1 >= q->nr_requests) {
*/ ioc = current_io_context(GFP_ATOMIC);
if (!blk_queue_full(q, rw)) { /*
ioc_set_batching(q, ioc); * The queue will fill after this allocation, so set
blk_set_queue_full(q, rw); * it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
if (!blk_queue_full(q, rw)) {
ioc_set_batching(q, ioc);
blk_set_queue_full(q, rw);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
/*
* The queue is full and the allocating
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
goto out;
}
}
} }
set_queue_congested(q, rw);
} }
switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
break;
case ELV_MQUEUE_MUST:
goto get_rq;
}
if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
/*
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
goto out;
}
get_rq:
/* /*
* Only allow batching queuers to allocate up to 50% over the defined * Only allow batching queuers to allocate up to 50% over the defined
* limit of requests, otherwise we could have thousands of requests * limit of requests, otherwise we could have thousands of requests
@ -1952,8 +2052,6 @@ get_rq:
rl->count[rw]++; rl->count[rw]++;
rl->starved[rw] = 0; rl->starved[rw] = 0;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
if (priv) if (priv)
@ -1962,7 +2060,7 @@ get_rq:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
if (!rq) { if (unlikely(!rq)) {
/* /*
* Allocation failed presumably due to memory. Undo anything * Allocation failed presumably due to memory. Undo anything
* we might have messed up. * we might have messed up.
@ -1987,6 +2085,12 @@ rq_starved:
goto out; goto out;
} }
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
* not count toward the nr_batch_requests limit. There will always
* be some limit enforced by BLK_BATCH_TIME.
*/
if (ioc_batching(q, ioc)) if (ioc_batching(q, ioc))
ioc->nr_batch_requests--; ioc->nr_batch_requests--;
@ -2313,7 +2417,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
*/ */
void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
struct request *rq, int at_head, struct request *rq, int at_head,
void (*done)(struct request *)) rq_end_io_fn *done)
{ {
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@ -2517,7 +2621,7 @@ EXPORT_SYMBOL(blk_put_request);
* blk_end_sync_rq - executes a completion event on a request * blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete * @rq: request to complete
*/ */
void blk_end_sync_rq(struct request *rq) void blk_end_sync_rq(struct request *rq, int error)
{ {
struct completion *waiting = rq->waiting; struct completion *waiting = rq->waiting;
@ -2655,6 +2759,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(blk_attempt_remerge); EXPORT_SYMBOL(blk_attempt_remerge);
static void init_request_from_bio(struct request *req, struct bio *bio)
{
req->flags |= REQ_CMD;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
req->nr_phys_segments = bio_phys_segments(req->q, bio);
req->nr_hw_segments = bio_hw_segments(req->q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
}
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req; struct request *req;
@ -2680,7 +2814,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
spin_lock_prefetch(q->queue_lock); spin_lock_prefetch(q->queue_lock);
barrier = bio_barrier(bio); barrier = bio_barrier(bio);
if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) { if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto end_io; goto end_io;
} }
@ -2750,33 +2884,7 @@ get_rq:
* We don't worry about that case for efficiency. It won't happen * We don't worry about that case for efficiency. It won't happen
* often, and the elevators are able to handle it. * often, and the elevators are able to handle it.
*/ */
init_request_from_bio(req, bio);
req->flags |= REQ_CMD;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (unlikely(barrier))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
req->nr_phys_segments = bio_phys_segments(q, bio);
req->nr_hw_segments = bio_hw_segments(q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q)) if (elv_queue_empty(q))
@ -3067,7 +3175,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (nr_bytes >= bio->bi_size) { if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next; req->bio = bio->bi_next;
nbytes = bio->bi_size; nbytes = bio->bi_size;
bio_endio(bio, nbytes, error); if (!ordered_bio_endio(req, bio, nbytes, error))
bio_endio(bio, nbytes, error);
next_idx = 0; next_idx = 0;
bio_nbytes = 0; bio_nbytes = 0;
} else { } else {
@ -3122,7 +3231,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
* if the request wasn't completed, update state * if the request wasn't completed, update state
*/ */
if (bio_nbytes) { if (bio_nbytes) {
bio_endio(bio, bio_nbytes, error); if (!ordered_bio_endio(req, bio, bio_nbytes, error))
bio_endio(bio, bio_nbytes, error);
bio->bi_idx += next_idx; bio->bi_idx += next_idx;
bio_iovec(bio)->bv_offset += nr_bytes; bio_iovec(bio)->bv_offset += nr_bytes;
bio_iovec(bio)->bv_len -= nr_bytes; bio_iovec(bio)->bv_len -= nr_bytes;
@ -3179,9 +3289,17 @@ EXPORT_SYMBOL(end_that_request_chunk);
/* /*
* queue lock must be held * queue lock must be held
*/ */
void end_that_request_last(struct request *req) void end_that_request_last(struct request *req, int uptodate)
{ {
struct gendisk *disk = req->rq_disk; struct gendisk *disk = req->rq_disk;
int error;
/*
* extend uptodate bool to allow < 0 value to be direct io error
*/
error = 0;
if (end_io_error(uptodate))
error = !uptodate ? -EIO : uptodate;
if (unlikely(laptop_mode) && blk_fs_request(req)) if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion(); laptop_io_completion();
@ -3196,7 +3314,7 @@ void end_that_request_last(struct request *req)
disk->in_flight--; disk->in_flight--;
} }
if (req->end_io) if (req->end_io)
req->end_io(req); req->end_io(req, error);
else else
__blk_put_request(req->q, req); __blk_put_request(req->q, req);
} }
@ -3208,7 +3326,7 @@ void end_request(struct request *req, int uptodate)
if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, uptodate);
} }
} }

View File

@ -46,7 +46,7 @@ EXPORT_SYMBOL(scsi_command_size);
static int sg_get_version(int __user *p) static int sg_get_version(int __user *p)
{ {
static int sg_version_num = 30527; static const int sg_version_num = 30527;
return put_user(sg_version_num, p); return put_user(sg_version_num, p);
} }

View File

@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
end_that_request_last(Request); end_that_request_last(Request, UpToDate);
if (Command->Completion) { if (Command->Completion) {
complete(Command->Completion); complete(Command->Completion);

View File

@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
printk("Done with %p\n", cmd->rq); printk("Done with %p\n", cmd->rq);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
end_that_request_last(cmd->rq); end_that_request_last(cmd->rq, status ? 1 : -EIO);
cmd_free(h,cmd,1); cmd_free(h,cmd,1);
} }

View File

@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
complete_buffers(cmd->rq->bio, ok); complete_buffers(cmd->rq->bio, ok);
DBGPX(printk("Done with %p\n", cmd->rq);); DBGPX(printk("Done with %p\n", cmd->rq););
end_that_request_last(cmd->rq); end_that_request_last(cmd->rq, ok ? 1 : -EIO);
} }
/* /*

View File

@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate)
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
floppy_off((long)req->rq_disk->private_data); floppy_off((long)req->rq_disk->private_data);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, uptodate);
/* We're done with the request */ /* We're done with the request */
current_req = NULL; current_req = NULL;

View File

@ -140,7 +140,7 @@ static void nbd_end_request(struct request *req)
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (!end_that_request_first(req, uptodate, req->nr_sectors)) { if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
end_that_request_last(req); end_that_request_last(req, uptodate);
} }
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }

View File

@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
assert(rc == 0); assert(rc == 0);
end_that_request_last(req); end_that_request_last(req, uptodate);
rc = carm_put_request(host, crq); rc = carm_put_request(host, crq);
assert(rc == 0); assert(rc == 0);

View File

@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
static void ub_end_rq(struct request *rq, int uptodate) static void ub_end_rq(struct request *rq, int uptodate)
{ {
end_that_request_first(rq, uptodate, rq->hard_nr_sectors); end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
end_that_request_last(rq); end_that_request_last(rq, uptodate);
} }
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,

View File

@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
if (end_that_request_first(req, uptodate, num_sectors)) if (end_that_request_first(req, uptodate, num_sectors))
return; return;
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
end_that_request_last(req); end_that_request_last(req, uptodate);
} }
/* /*

View File

@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
if (!end_that_request_first(req, 1, nblock)) { if (!end_that_request_first(req, 1, nblock)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, 1);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
continue; continue;

View File

@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
*/ */
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
end_that_request_chunk(failed, 0, failed->data_len); end_that_request_chunk(failed, 0, failed->data_len);
end_that_request_last(failed); end_that_request_last(failed, 0);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
@ -1735,7 +1735,7 @@ end_request:
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
end_that_request_last(rq); end_that_request_last(rq, 1);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
return ide_stopped; return ide_stopped;

View File

@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
{ {
ide_drive_t *drive = q->queuedata; ide_drive_t *drive = q->queuedata;
struct request *rq = flush_rq->end_io_data;
int good_sectors = rq->hard_nr_sectors;
int bad_sectors;
sector_t sector;
if (flush_rq->errors & ABRT_ERR) {
printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
blk_queue_issue_flush_fn(drive->queue, NULL);
good_sectors = 0;
} else if (flush_rq->errors) {
good_sectors = 0;
if (blk_barrier_preflush(rq)) {
sector = ide_get_error_location(drive,flush_rq->buffer);
if ((sector >= rq->hard_sector) &&
(sector < rq->hard_sector + rq->hard_nr_sectors))
good_sectors = sector - rq->hard_sector;
}
}
if (flush_rq->errors)
printk(KERN_ERR "%s: failed barrier write: "
"sector=%Lx(good=%d/bad=%d)\n",
drive->name, (unsigned long long)rq->sector,
good_sectors,
(int) (rq->hard_nr_sectors-good_sectors));
bad_sectors = rq->hard_nr_sectors - good_sectors;
if (good_sectors)
__ide_end_request(drive, rq, 1, good_sectors);
if (bad_sectors)
__ide_end_request(drive, rq, 0, bad_sectors);
}
static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
if (!drive->wcache)
return 0;
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
rq->cmd[0] = WIN_FLUSH_CACHE; rq->cmd[0] = WIN_FLUSH_CACHE;
rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; rq->flags |= REQ_DRIVE_TASK;
rq->buffer = rq->cmd; rq->buffer = rq->cmd;
return 1;
} }
static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
return 0; return 0;
} }
static void update_ordered(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
unsigned ordered = QUEUE_ORDERED_NONE;
prepare_flush_fn *prep_fn = NULL;
issue_flush_fn *issue_fn = NULL;
if (drive->wcache) {
unsigned long long capacity;
int barrier;
/*
* We must avoid issuing commands a drive does not
* understand or we may crash it. We check flush cache
* is supported. We also check we have the LBA48 flush
* cache if the drive capacity is too large. By this
* time we have trimmed the drive capacity if LBA48 is
* not available so we don't need to recheck that.
*/
capacity = idedisk_capacity(drive);
barrier = ide_id_has_flush_cache(id) &&
(drive->addressing == 0 || capacity <= (1ULL << 28) ||
ide_id_has_flush_cache_ext(id));
printk(KERN_INFO "%s: cache flushes %ssupported\n",
drive->name, barrier ? "" : "not");
if (barrier) {
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
prep_fn = idedisk_prepare_flush;
issue_fn = idedisk_issue_flush;
}
} else
ordered = QUEUE_ORDERED_DRAIN;
blk_queue_ordered(drive->queue, ordered, prep_fn);
blk_queue_issue_flush_fn(drive->queue, issue_fn);
}
static int write_cache(ide_drive_t *drive, int arg) static int write_cache(ide_drive_t *drive, int arg)
{ {
ide_task_t args; ide_task_t args;
int err; int err = 1;
if (!ide_id_has_flush_cache(drive->id)) if (ide_id_has_flush_cache(drive->id)) {
return 1; memset(&args, 0, sizeof(ide_task_t));
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
memset(&args, 0, sizeof(ide_task_t));
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
args.command_type = IDE_DRIVE_TASK_NO_DATA; args.command_type = IDE_DRIVE_TASK_NO_DATA;
args.handler = &task_no_data_intr; args.handler = &task_no_data_intr;
err = ide_raw_taskfile(drive, &args, NULL);
if (err == 0)
drive->wcache = arg;
}
err = ide_raw_taskfile(drive, &args, NULL); update_ordered(drive);
if (err)
return err;
drive->wcache = arg; return err;
return 0;
} }
static int do_idedisk_flushcache (ide_drive_t *drive) static int do_idedisk_flushcache (ide_drive_t *drive)
@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
{ {
struct hd_driveid *id = drive->id; struct hd_driveid *id = drive->id;
unsigned long long capacity; unsigned long long capacity;
int barrier;
idedisk_add_settings(drive); idedisk_add_settings(drive);
@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
drive->wcache = 1; drive->wcache = 1;
write_cache(drive, 1); write_cache(drive, 1);
/*
* We must avoid issuing commands a drive does not understand
* or we may crash it. We check flush cache is supported. We also
* check we have the LBA48 flush cache if the drive capacity is
* too large. By this time we have trimmed the drive capacity if
* LBA48 is not available so we don't need to recheck that.
*/
barrier = 0;
if (ide_id_has_flush_cache(id))
barrier = 1;
if (drive->addressing == 1) {
/* Can't issue the correct flush ? */
if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
barrier = 0;
}
printk(KERN_INFO "%s: cache flushes %ssupported\n",
drive->name, barrier ? "" : "not ");
if (barrier) {
blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
drive->queue->prepare_flush_fn = idedisk_prepare_flush;
drive->queue->end_flush_fn = idedisk_end_flush;
blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
}
} }
static void ide_cacheflush_p(ide_drive_t *drive) static void ide_cacheflush_p(ide_drive_t *drive)

View File

@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
end_that_request_last(rq); end_that_request_last(rq, uptodate);
ret = 0; ret = 0;
} }
return ret; return ret;
@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
if (!nr_sectors) if (!nr_sectors)
nr_sectors = rq->hard_cur_sectors; nr_sectors = rq->hard_cur_sectors;
if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
ret = rq->nr_sectors != 0;
else
ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
return ret; return ret;
@ -247,7 +244,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
} }
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
end_that_request_last(rq); end_that_request_last(rq, 1);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
@ -379,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
rq->errors = err; rq->errors = err;
end_that_request_last(rq); end_that_request_last(rq, !rq->errors);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }

View File

@ -468,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
end_that_request_last(req); end_that_request_last(req, uptodate);
if (likely(dev)) { if (likely(dev)) {
dev->open_queue_depth--; dev->open_queue_depth--;

View File

@ -263,7 +263,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
*/ */
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, 1);
} }
spin_unlock_irq(&md->lock); spin_unlock_irq(&md->lock);
} while (ret); } while (ret);
@ -289,7 +289,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, 0);
spin_unlock_irq(&md->lock); spin_unlock_irq(&md->lock);
return 0; return 0;

View File

@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate)
if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
BUG(); BUG();
add_disk_randomness(req->rq_disk); add_disk_randomness(req->rq_disk);
end_that_request_last(req); end_that_request_last(req, uptodate);
} }
/* /*

View File

@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
{ {
if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
BUG(); BUG();
end_that_request_last(req); end_that_request_last(req, uptodate);
} }
static void static void

View File

@ -214,7 +214,6 @@ static struct scsi_host_template ahci_sht = {
.dma_boundary = AHCI_DMA_BOUNDARY, .dma_boundary = AHCI_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations ahci_ops = { static const struct ata_port_operations ahci_ops = {

View File

@ -187,7 +187,6 @@ static struct scsi_host_template piix_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
.resume = ata_scsi_device_resume, .resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend, .suspend = ata_scsi_device_suspend,
}; };

View File

@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->cmd_per_lun = sht->cmd_per_lun; shost->cmd_per_lun = sht->cmd_per_lun;
shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering; shost->use_clustering = sht->use_clustering;
shost->ordered_flush = sht->ordered_flush;
shost->ordered_tag = sht->ordered_tag; shost->ordered_tag = sht->ordered_tag;
/*
* hosts/devices that do queueing must support ordered tags
*/
if (shost->can_queue > 1 && shost->ordered_flush) {
printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
shost->ordered_flush = 0;
}
if (sht->max_host_blocked) if (sht->max_host_blocked)
shost->max_host_blocked = sht->max_host_blocked; shost->max_host_blocked = sht->max_host_blocked;
else else

View File

@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
/* kill current request */ /* kill current request */
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, 0);
if (req->flags & REQ_SENSE) if (req->flags & REQ_SENSE)
kfree(scsi->pc->buffer); kfree(scsi->pc->buffer);
kfree(scsi->pc); kfree(scsi->pc);
@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
/* now nuke the drive queue */ /* now nuke the drive queue */
while ((req = elv_next_request(drive->queue))) { while ((req = elv_next_request(drive->queue))) {
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
end_that_request_last(req); end_that_request_last(req, 0);
} }
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;

View File

@ -562,16 +562,28 @@ static const u8 ata_rw_cmds[] = {
ATA_CMD_WRITE_MULTI, ATA_CMD_WRITE_MULTI,
ATA_CMD_READ_MULTI_EXT, ATA_CMD_READ_MULTI_EXT,
ATA_CMD_WRITE_MULTI_EXT, ATA_CMD_WRITE_MULTI_EXT,
0,
0,
0,
ATA_CMD_WRITE_MULTI_FUA_EXT,
/* pio */ /* pio */
ATA_CMD_PIO_READ, ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE, ATA_CMD_PIO_WRITE,
ATA_CMD_PIO_READ_EXT, ATA_CMD_PIO_READ_EXT,
ATA_CMD_PIO_WRITE_EXT, ATA_CMD_PIO_WRITE_EXT,
0,
0,
0,
0,
/* dma */ /* dma */
ATA_CMD_READ, ATA_CMD_READ,
ATA_CMD_WRITE, ATA_CMD_WRITE,
ATA_CMD_READ_EXT, ATA_CMD_READ_EXT,
ATA_CMD_WRITE_EXT ATA_CMD_WRITE_EXT,
0,
0,
0,
ATA_CMD_WRITE_FUA_EXT
}; };
/** /**
@ -584,25 +596,32 @@ static const u8 ata_rw_cmds[] = {
* LOCKING: * LOCKING:
* caller. * caller.
*/ */
void ata_rwcmd_protocol(struct ata_queued_cmd *qc) int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
{ {
struct ata_taskfile *tf = &qc->tf; struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev; struct ata_device *dev = qc->dev;
u8 cmd;
int index, lba48, write; int index, fua, lba48, write;
fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
if (dev->flags & ATA_DFLAG_PIO) { if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO; tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 4; index = dev->multi_count ? 0 : 8;
} else { } else {
tf->protocol = ATA_PROT_DMA; tf->protocol = ATA_PROT_DMA;
index = 8; index = 16;
} }
tf->command = ata_rw_cmds[index + lba48 + write]; cmd = ata_rw_cmds[index + fua + lba48 + write];
if (cmd) {
tf->command = cmd;
return 0;
}
return -1;
} }
static const char * const xfer_mode_str[] = { static const char * const xfer_mode_str[] = {

View File

@ -1096,11 +1096,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
scsicmd[0] == WRITE_16) scsicmd[0] == WRITE_16)
tf->flags |= ATA_TFLAG_WRITE; tf->flags |= ATA_TFLAG_WRITE;
/* Calculate the SCSI LBA and transfer length. */ /* Calculate the SCSI LBA, transfer length and FUA. */
switch (scsicmd[0]) { switch (scsicmd[0]) {
case READ_10: case READ_10:
case WRITE_10: case WRITE_10:
scsi_10_lba_len(scsicmd, &block, &n_block); scsi_10_lba_len(scsicmd, &block, &n_block);
if (unlikely(scsicmd[1] & (1 << 3)))
tf->flags |= ATA_TFLAG_FUA;
break; break;
case READ_6: case READ_6:
case WRITE_6: case WRITE_6:
@ -1115,6 +1117,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
case READ_16: case READ_16:
case WRITE_16: case WRITE_16:
scsi_16_lba_len(scsicmd, &block, &n_block); scsi_16_lba_len(scsicmd, &block, &n_block);
if (unlikely(scsicmd[1] & (1 << 3)))
tf->flags |= ATA_TFLAG_FUA;
break; break;
default: default:
DPRINTK("no-byte command\n"); DPRINTK("no-byte command\n");
@ -1158,7 +1162,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
tf->device |= (block >> 24) & 0xf; tf->device |= (block >> 24) & 0xf;
} }
ata_rwcmd_protocol(qc); if (unlikely(ata_rwcmd_protocol(qc) < 0))
goto invalid_fld;
qc->nsect = n_block; qc->nsect = n_block;
tf->nsect = n_block & 0xff; tf->nsect = n_block & 0xff;
@ -1176,7 +1181,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
if ((block >> 28) || (n_block > 256)) if ((block >> 28) || (n_block > 256))
goto out_of_range; goto out_of_range;
ata_rwcmd_protocol(qc); if (unlikely(ata_rwcmd_protocol(qc) < 0))
goto invalid_fld;
/* Convert LBA to CHS */ /* Convert LBA to CHS */
track = (u32)block / dev->sectors; track = (u32)block / dev->sectors;
@ -1711,6 +1717,7 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen) unsigned int buflen)
{ {
struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p, *last; u8 *scsicmd = args->cmd->cmnd, *p, *last;
const u8 sat_blk_desc[] = { const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */ 0, 0, 0, 0, /* number of blocks: sat unspecified */
@ -1719,6 +1726,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
}; };
u8 pg, spg; u8 pg, spg;
unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
u8 dpofua;
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
@ -1787,9 +1795,17 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
if (minlen < 1) if (minlen < 1)
return 0; return 0;
dpofua = 0;
if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
(!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
dpofua = 1 << 4;
if (six_byte) { if (six_byte) {
output_len--; output_len--;
rbuf[0] = output_len; rbuf[0] = output_len;
if (minlen > 2)
rbuf[2] |= dpofua;
if (ebd) { if (ebd) {
if (minlen > 3) if (minlen > 3)
rbuf[3] = sizeof(sat_blk_desc); rbuf[3] = sizeof(sat_blk_desc);
@ -1802,6 +1818,8 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
rbuf[0] = output_len >> 8; rbuf[0] = output_len >> 8;
if (minlen > 1) if (minlen > 1)
rbuf[1] = output_len; rbuf[1] = output_len;
if (minlen > 3)
rbuf[3] |= dpofua;
if (ebd) { if (ebd) {
if (minlen > 7) if (minlen > 7)
rbuf[7] = sizeof(sat_blk_desc); rbuf[7] = sizeof(sat_blk_desc);
@ -2462,7 +2480,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (xlat_func) if (xlat_func)
ata_scsi_translate(ap, dev, cmd, done, xlat_func); ata_scsi_translate(ap, dev, cmd, done, xlat_func);
else else
ata_scsi_simulate(dev->id, cmd, done); ata_scsi_simulate(ap, dev, cmd, done);
} else } else
ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
@ -2485,14 +2503,16 @@ out_unlock:
* spin_lock_irqsave(host_set lock) * spin_lock_irqsave(host_set lock)
*/ */
void ata_scsi_simulate(u16 *id, void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *)) void (*done)(struct scsi_cmnd *))
{ {
struct ata_scsi_args args; struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd; const u8 *scsicmd = cmd->cmnd;
args.id = id; args.ap = ap;
args.dev = dev;
args.id = dev->id;
args.cmd = cmd; args.cmd = cmd;
args.done = done; args.done = done;

View File

@ -32,6 +32,8 @@
#define DRV_VERSION "1.20" /* must be exactly four chars */ #define DRV_VERSION "1.20" /* must be exactly four chars */
struct ata_scsi_args { struct ata_scsi_args {
struct ata_port *ap;
struct ata_device *dev;
u16 *id; u16 *id;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
void (*done)(struct scsi_cmnd *); void (*done)(struct scsi_cmnd *);
@ -41,7 +43,7 @@ struct ata_scsi_args {
extern int atapi_enabled; extern int atapi_enabled;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev); struct ata_device *dev);
extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc);
extern int ata_qc_issue(struct ata_queued_cmd *qc); extern int ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);

View File

@ -374,7 +374,6 @@ static struct scsi_host_template mv_sht = {
.dma_boundary = MV_DMA_BOUNDARY, .dma_boundary = MV_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations mv5_ops = { static const struct ata_port_operations mv5_ops = {

View File

@ -235,7 +235,6 @@ static struct scsi_host_template nv_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations nv_ops = { static const struct ata_port_operations nv_ops = {

View File

@ -114,7 +114,6 @@ static struct scsi_host_template pdc_ata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations pdc_sata_ops = { static const struct ata_port_operations pdc_sata_ops = {

View File

@ -147,7 +147,6 @@ static struct scsi_host_template sil_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations sil_ops = { static const struct ata_port_operations sil_ops = {

View File

@ -292,7 +292,6 @@ static struct scsi_host_template sil24_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1, /* NCQ not supported yet */
}; };
static const struct ata_port_operations sil24_ops = { static const struct ata_port_operations sil24_ops = {

View File

@ -99,7 +99,6 @@ static struct scsi_host_template sis_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations sis_ops = { static const struct ata_port_operations sis_ops = {

View File

@ -303,7 +303,6 @@ static struct scsi_host_template k2_sata_sht = {
.proc_info = k2_sata_proc_info, .proc_info = k2_sata_proc_info,
#endif #endif
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };

View File

@ -194,7 +194,6 @@ static struct scsi_host_template pdc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations pdc_20621_ops = { static const struct ata_port_operations pdc_20621_ops = {

View File

@ -87,7 +87,6 @@ static struct scsi_host_template uli_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations uli_ops = { static const struct ata_port_operations uli_ops = {

View File

@ -106,7 +106,6 @@ static struct scsi_host_template svia_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };
static const struct ata_port_operations svia_sata_ops = { static const struct ata_port_operations svia_sata_ops = {

View File

@ -235,7 +235,6 @@ static struct scsi_host_template vsc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param, .bios_param = ata_std_bios_param,
.ordered_flush = 1,
}; };

View File

@ -308,7 +308,7 @@ struct scsi_io_context {
static kmem_cache_t *scsi_io_context_cache; static kmem_cache_t *scsi_io_context_cache;
static void scsi_end_async(struct request *req) static void scsi_end_async(struct request *req, int uptodate)
{ {
struct scsi_io_context *sioc = req->end_io_data; struct scsi_io_context *sioc = req->end_io_data;
@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req)) if (blk_rq_tagged(req))
blk_queue_end_tag(q, req); blk_queue_end_tag(q, req);
end_that_request_last(req); end_that_request_last(req, uptodate);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
/* /*
@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
int sense_valid = 0; int sense_valid = 0;
int sense_deferred = 0; int sense_deferred = 0;
if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
return;
/* /*
* Free up any indirection buffers we allocated for DMA purposes. * Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the * For the case of a READ, we need to copy the data out of the
@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL; return BLKPREP_KILL;
} }
static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_driver *drv;
if (sdev->sdev_state == SDEV_RUNNING) {
drv = *(struct scsi_driver **) rq->rq_disk->private_data;
if (drv->prepare_flush)
return drv->prepare_flush(q, rq);
}
return 0;
}
static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
{
struct scsi_device *sdev = q->queuedata;
struct request *flush_rq = rq->end_io_data;
struct scsi_driver *drv;
if (flush_rq->errors) {
printk("scsi: barrier error, disabling flush support\n");
blk_queue_ordered(q, QUEUE_ORDERED_NONE);
}
if (sdev->sdev_state == SDEV_RUNNING) {
drv = *(struct scsi_driver **) rq->rq_disk->private_data;
drv->end_flush(q, rq);
}
}
static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
sector_t *error_sector) sector_t *error_sector)
{ {
@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
/*
* ordered tags are superior to flush ordering
*/
if (shost->ordered_tag)
blk_queue_ordered(q, QUEUE_ORDERED_TAG);
else if (shost->ordered_flush) {
blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
q->prepare_flush_fn = scsi_prepare_flush_fn;
q->end_flush_fn = scsi_end_flush_fn;
}
if (!shost->use_clustering) if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q; return q;

View File

@ -102,6 +102,7 @@ struct scsi_disk {
u8 write_prot; u8 write_prot;
unsigned WCE : 1; /* state of disk WCE bit */ unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
}; };
static DEFINE_IDR(sd_index_idr); static DEFINE_IDR(sd_index_idr);
@ -121,8 +122,7 @@ static void sd_shutdown(struct device *dev);
static void sd_rescan(struct device *); static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *); static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *); static int sd_issue_flush(struct device *, sector_t *);
static void sd_end_flush(request_queue_t *, struct request *); static void sd_prepare_flush(request_queue_t *, struct request *);
static int sd_prepare_flush(request_queue_t *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
unsigned char *buffer); unsigned char *buffer);
@ -137,8 +137,6 @@ static struct scsi_driver sd_template = {
.rescan = sd_rescan, .rescan = sd_rescan,
.init_command = sd_init_command, .init_command = sd_init_command,
.issue_flush = sd_issue_flush, .issue_flush = sd_issue_flush,
.prepare_flush = sd_prepare_flush,
.end_flush = sd_end_flush,
}; };
/* /*
@ -346,6 +344,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
if (block > 0xffffffff) { if (block > 0xffffffff) {
SCpnt->cmnd[0] += READ_16 - READ_6; SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@ -365,6 +364,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
this_count = 0xffff; this_count = 0xffff;
SCpnt->cmnd[0] += READ_10 - READ_6; SCpnt->cmnd[0] += READ_10 - READ_6;
SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@ -373,6 +373,17 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
} else { } else {
if (unlikely(blk_fua_rq(rq))) {
/*
* This happens only if this drive failed
* 10byte rw command with ILLEGAL_REQUEST
* during operation and thus turned off
* use_10_for_rw.
*/
printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n");
return 0;
}
SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
SCpnt->cmnd[3] = (unsigned char) block & 0xff; SCpnt->cmnd[3] = (unsigned char) block & 0xff;
@ -729,42 +740,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
return ret; return ret;
} }
static void sd_end_flush(request_queue_t *q, struct request *flush_rq) static void sd_prepare_flush(request_queue_t *q, struct request *rq)
{ {
struct request *rq = flush_rq->end_io_data;
struct scsi_cmnd *cmd = rq->special;
unsigned int bytes = rq->hard_nr_sectors << 9;
if (!flush_rq->errors) {
spin_unlock(q->queue_lock);
scsi_io_completion(cmd, bytes, 0);
spin_lock(q->queue_lock);
} else if (blk_barrier_postflush(rq)) {
spin_unlock(q->queue_lock);
scsi_io_completion(cmd, 0, bytes);
spin_lock(q->queue_lock);
} else {
/*
* force journal abort of barriers
*/
end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
end_that_request_last(rq);
}
}
static int sd_prepare_flush(request_queue_t *q, struct request *rq)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev);
if (!sdkp || !sdkp->WCE)
return 0;
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; rq->flags |= REQ_BLOCK_PC;
rq->timeout = SD_TIMEOUT; rq->timeout = SD_TIMEOUT;
rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd[0] = SYNCHRONIZE_CACHE;
return 1; rq->cmd_len = 10;
} }
static void sd_rescan(struct device *dev) static void sd_rescan(struct device *dev)
@ -1427,10 +1409,18 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
sdkp->RCD = 0; sdkp->RCD = 0;
} }
sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
printk(KERN_NOTICE "SCSI device %s: uses "
"READ/WRITE(6), disabling FUA\n", diskname);
sdkp->DPOFUA = 0;
}
ct = sdkp->RCD + 2*sdkp->WCE; ct = sdkp->RCD + 2*sdkp->WCE;
printk(KERN_NOTICE "SCSI device %s: drive cache: %s\n", printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
diskname, types[ct]); diskname, types[ct],
sdkp->DPOFUA ? " w/ FUA" : "");
return; return;
} }
@ -1462,6 +1452,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
unsigned char *buffer; unsigned char *buffer;
unsigned ordered;
SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
@ -1499,6 +1490,20 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_cache_type(sdkp, disk->disk_name, buffer); sd_read_cache_type(sdkp, disk->disk_name, buffer);
} }
/*
* We now have all cache related info, determine how we deal
* with ordered requests. Note that as the current SCSI
* dispatch function can alter request order, we cannot use
* QUEUE_ORDERED_TAG_* even when ordered tag is supported.
*/
if (sdkp->WCE)
ordered = sdkp->DPOFUA
? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
else
ordered = QUEUE_ORDERED_DRAIN;
blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);
set_capacity(disk, sdkp->capacity); set_capacity(disk, sdkp->capacity);
kfree(buffer); kfree(buffer);
@ -1598,6 +1603,7 @@ static int sd_probe(struct device *dev)
strcpy(gd->devfs_name, sdp->devfs_name); strcpy(gd->devfs_name, sdp->devfs_name);
gd->private_data = &sdkp->driver; gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
sd_revalidate_disk(gd); sd_revalidate_disk(gd);
@ -1605,7 +1611,6 @@ static int sd_probe(struct device *dev)
gd->flags = GENHD_FL_DRIVERFS; gd->flags = GENHD_FL_DRIVERFS;
if (sdp->removable) if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE; gd->flags |= GENHD_FL_REMOVABLE;
gd->queue = sdkp->device->request_queue;
dev_set_drvdata(dev, sdkp); dev_set_drvdata(dev, sdkp);
add_disk(gd); add_disk(gd);

View File

@ -325,10 +325,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
if (unlikely(bio_flagged(bio, BIO_CLONED))) if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 0; return 0;
if (bio->bi_vcnt >= bio->bi_max_vecs) if (((bio->bi_size + len) >> 9) > max_sectors)
return 0; return 0;
if (((bio->bi_size + len) >> 9) > max_sectors) /*
* For filesystems with a blocksize smaller than the pagesize
* we will often be called with the same page as last time and
* a consecutive offset. Optimize this special case.
*/
if (bio->bi_vcnt > 0) {
struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
prev->bv_len += len;
if (q->merge_bvec_fn &&
q->merge_bvec_fn(q, bio, prev) < len) {
prev->bv_len -= len;
return 0;
}
goto done;
}
}
if (bio->bi_vcnt >= bio->bi_max_vecs)
return 0; return 0;
/* /*
@ -382,6 +403,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
bio->bi_vcnt++; bio->bi_vcnt++;
bio->bi_phys_segments++; bio->bi_phys_segments++;
bio->bi_hw_segments++; bio->bi_hw_segments++;
done:
bio->bi_size += len; bio->bi_size += len;
return len; return len;
} }

View File

@ -129,6 +129,7 @@ enum {
ATA_CMD_READ_EXT = 0x25, ATA_CMD_READ_EXT = 0x25,
ATA_CMD_WRITE = 0xCA, ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35, ATA_CMD_WRITE_EXT = 0x35,
ATA_CMD_WRITE_FUA_EXT = 0x3D,
ATA_CMD_PIO_READ = 0x20, ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24, ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30, ATA_CMD_PIO_WRITE = 0x30,
@ -137,6 +138,7 @@ enum {
ATA_CMD_READ_MULTI_EXT = 0x29, ATA_CMD_READ_MULTI_EXT = 0x29,
ATA_CMD_WRITE_MULTI = 0xC5, ATA_CMD_WRITE_MULTI = 0xC5,
ATA_CMD_WRITE_MULTI_EXT = 0x39, ATA_CMD_WRITE_MULTI_EXT = 0x39,
ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
ATA_CMD_SET_FEATURES = 0xEF, ATA_CMD_SET_FEATURES = 0xEF,
ATA_CMD_PACKET = 0xA0, ATA_CMD_PACKET = 0xA0,
ATA_CMD_VERIFY = 0x40, ATA_CMD_VERIFY = 0x40,
@ -194,6 +196,7 @@ enum {
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
}; };
enum ata_tf_protocols { enum ata_tf_protocols {
@ -247,7 +250,8 @@ struct ata_taskfile {
#define ata_id_is_sata(id) ((id)[93] == 0) #define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
#define ata_id_has_flush(id) ((id)[83] & (1 << 12)) #define ata_id_has_fua(id) ((id)[84] & (1 << 6))
#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13)) #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
#define ata_id_has_lba48(id) ((id)[83] & (1 << 10)) #define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
#define ata_id_has_wcache(id) ((id)[82] & (1 << 5)) #define ata_id_has_wcache(id) ((id)[82] & (1 << 5))

View File

@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
struct request; struct request;
typedef void (rq_end_io_fn)(struct request *); typedef void (rq_end_io_fn)(struct request *, int);
struct request_list { struct request_list {
int count[2]; int count[2];
@ -207,6 +207,7 @@ enum rq_flag_bits {
__REQ_SORTED, /* elevator knows about this request */ __REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
__REQ_CMD, /* is a regular fs rw request */ __REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */ __REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */ __REQ_STARTED, /* drive already may have started this one */
@ -230,9 +231,7 @@ enum rq_flag_bits {
__REQ_PM_SUSPEND, /* suspend request */ __REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */ __REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */ __REQ_PM_SHUTDOWN, /* shutdown request */
__REQ_BAR_PREFLUSH, /* barrier pre-flush done */ __REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_BAR_POSTFLUSH, /* barrier post-flush */
__REQ_BAR_FLUSH, /* rq is the flush request */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
@ -241,6 +240,7 @@ enum rq_flag_bits {
#define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_CMD (1 << __REQ_CMD) #define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE) #define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED) #define REQ_STARTED (1 << __REQ_STARTED)
@ -260,9 +260,7 @@ enum rq_flag_bits {
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) #define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
/* /*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
@ -292,8 +290,7 @@ struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw); typedef void (activity_fn) (void *data, int rw);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
typedef int (prepare_flush_fn) (request_queue_t *, struct request *); typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
typedef void (end_flush_fn) (request_queue_t *, struct request *);
enum blk_queue_state { enum blk_queue_state {
Queue_down, Queue_down,
@ -335,7 +332,6 @@ struct request_queue
activity_fn *activity_fn; activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn; issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn; prepare_flush_fn *prepare_flush_fn;
end_flush_fn *end_flush_fn;
/* /*
* Dispatch queue sorting * Dispatch queue sorting
@ -420,14 +416,11 @@ struct request_queue
/* /*
* reserved for flush operations * reserved for flush operations
*/ */
struct request *flush_rq; unsigned int ordered, next_ordered, ordseq;
unsigned char ordered; int orderr, ordcolor;
}; struct request pre_flush_rq, bar_rq, post_flush_rq;
struct request *orig_bar_rq;
enum { unsigned int bi_size;
QUEUE_ORDERED_NONE,
QUEUE_ORDERED_TAG,
QUEUE_ORDERED_FLUSH,
}; };
#define RQ_INACTIVE (-1) #define RQ_INACTIVE (-1)
@ -445,12 +438,51 @@ enum {
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
enum {
/*
* Hardbarrier is supported with one of the following methods.
*
* NONE : hardbarrier unsupported
* DRAIN : ordering by draining is enough
* DRAIN_FLUSH : ordering by draining w/ pre and post flushes
* DRAIN_FUA : ordering by draining w/ pre flush and FUA write
* TAG : ordering by tag is enough
* TAG_FLUSH : ordering by tag w/ pre and post flushes
* TAG_FUA : ordering by tag w/ pre flush and FUA write
*/
QUEUE_ORDERED_NONE = 0x00,
QUEUE_ORDERED_DRAIN = 0x01,
QUEUE_ORDERED_TAG = 0x02,
QUEUE_ORDERED_PREFLUSH = 0x10,
QUEUE_ORDERED_POSTFLUSH = 0x20,
QUEUE_ORDERED_FUA = 0x40,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
/*
* Ordered operation sequence
*/
QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
QUEUE_ORDSEQ_DONE = 0x20,
};
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) #define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
@ -466,8 +498,7 @@ enum {
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
@ -560,7 +591,7 @@ extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio); extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *); extern void __blk_put_request(request_queue_t *, struct request *);
extern void blk_end_sync_rq(struct request *rq); extern void blk_end_sync_rq(struct request *rq, int error);
extern void blk_attempt_remerge(request_queue_t *, struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t); extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *); extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
@ -582,8 +613,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io
extern int blk_execute_rq(request_queue_t *, struct gendisk *, extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int); struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
struct request *, int, struct request *, int, rq_end_io_fn *);
void (*done)(struct request *));
static inline request_queue_t *bdev_get_queue(struct block_device *bdev) static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{ {
@ -614,7 +644,7 @@ static inline void blk_run_address_space(struct address_space *mapping)
*/ */
extern int end_that_request_first(struct request *, int, int); extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int); extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *); extern void end_that_request_last(struct request *, int);
extern void end_request(struct request *req, int uptodate); extern void end_request(struct request *req, int uptodate);
/* /*
@ -665,11 +695,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int); extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern void blk_queue_ordered(request_queue_t *, int); extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); extern int blk_do_ordered(request_queue_t *, struct request **);
extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); extern unsigned blk_ordered_cur_seq(request_queue_t *);
extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); extern unsigned blk_ordered_req_seq(struct request *);
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);

View File

@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_FRONT 1 #define ELEVATOR_INSERT_FRONT 1
#define ELEVATOR_INSERT_BACK 2 #define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3 #define ELEVATOR_INSERT_SORT 3
#define ELEVATOR_INSERT_REQUEUE 4
/* /*
* return values from elevator_may_queue_fn * return values from elevator_may_queue_fn

View File

@ -488,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap); extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc); extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap); extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd, extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *)); void (*done)(struct scsi_cmnd *));
extern int ata_std_bios_param(struct scsi_device *sdev, extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev, struct block_device *bdev,

View File

@ -15,7 +15,6 @@ struct scsi_driver {
void (*rescan)(struct device *); void (*rescan)(struct device *);
int (*issue_flush)(struct device *, sector_t *); int (*issue_flush)(struct device *, sector_t *);
int (*prepare_flush)(struct request_queue *, struct request *); int (*prepare_flush)(struct request_queue *, struct request *);
void (*end_flush)(struct request_queue *, struct request *);
}; };
#define to_scsi_driver(drv) \ #define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv) container_of((drv), struct scsi_driver, gendrv)

View File

@ -398,7 +398,6 @@ struct scsi_host_template {
/* /*
* ordered write support * ordered write support
*/ */
unsigned ordered_flush:1;
unsigned ordered_tag:1; unsigned ordered_tag:1;
/* /*