Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: fix setting of max_segment_size and seg_boundary mask block: internal dequeue shouldn't start timer block: set disk->node_id before it's being used When block layer fails to map iov, it calls bio_unmap_user to undo
This commit is contained in:
commit
feaf3848a8
|
@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
|
||||||
/*
|
/*
|
||||||
* Prep proxy barrier request.
|
* Prep proxy barrier request.
|
||||||
*/
|
*/
|
||||||
blkdev_dequeue_request(rq);
|
elv_dequeue_request(q, rq);
|
||||||
q->orig_bar_rq = rq;
|
q->orig_bar_rq = rq;
|
||||||
rq = &q->bar_rq;
|
rq = &q->bar_rq;
|
||||||
blk_rq_init(q, rq);
|
blk_rq_init(q, rq);
|
||||||
|
@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||||
* This can happen when the queue switches to
|
* This can happen when the queue switches to
|
||||||
* ORDERED_NONE while this request is on it.
|
* ORDERED_NONE while this request is on it.
|
||||||
*/
|
*/
|
||||||
blkdev_dequeue_request(rq);
|
elv_dequeue_request(q, rq);
|
||||||
if (__blk_end_request(rq, -EOPNOTSUPP,
|
if (__blk_end_request(rq, -EOPNOTSUPP,
|
||||||
blk_rq_bytes(rq)))
|
blk_rq_bytes(rq)))
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||||
1 << QUEUE_FLAG_STACKABLE);
|
1 << QUEUE_FLAG_STACKABLE);
|
||||||
q->queue_lock = lock;
|
q->queue_lock = lock;
|
||||||
|
|
||||||
blk_queue_segment_boundary(q, 0xffffffff);
|
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
|
||||||
|
|
||||||
blk_queue_make_request(q, __make_request);
|
blk_queue_make_request(q, __make_request);
|
||||||
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
|
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
|
||||||
|
@ -1636,6 +1636,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blkdev_dequeue_request - dequeue request and start timeout timer
|
||||||
|
* @req: request to dequeue
|
||||||
|
*
|
||||||
|
* Dequeue @req and start timeout timer on it. This hands off the
|
||||||
|
* request to the driver.
|
||||||
|
*
|
||||||
|
* Block internal functions which don't want to start timer should
|
||||||
|
* call elv_dequeue_request().
|
||||||
|
*/
|
||||||
|
void blkdev_dequeue_request(struct request *req)
|
||||||
|
{
|
||||||
|
elv_dequeue_request(req->q, req);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are now handing the request to the hardware, add the
|
||||||
|
* timeout handler.
|
||||||
|
*/
|
||||||
|
blk_add_timer(req);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blkdev_dequeue_request);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __end_that_request_first - end I/O on a request
|
* __end_that_request_first - end I/O on a request
|
||||||
* @req: the request being processed
|
* @req: the request being processed
|
||||||
|
@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error)
|
||||||
blk_queue_end_tag(req->q, req);
|
blk_queue_end_tag(req->q, req);
|
||||||
|
|
||||||
if (blk_queued_rq(req))
|
if (blk_queued_rq(req))
|
||||||
blkdev_dequeue_request(req);
|
elv_dequeue_request(req->q, req);
|
||||||
|
|
||||||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||||
laptop_io_completion();
|
laptop_io_completion();
|
||||||
|
|
|
@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||||
*/
|
*/
|
||||||
bio_get(bio);
|
bio_get(bio);
|
||||||
bio_endio(bio, 0);
|
bio_endio(bio, 0);
|
||||||
bio_unmap_user(bio);
|
__blk_rq_unmap_user(bio);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||||
q->nr_requests = BLKDEV_MAX_RQ;
|
q->nr_requests = BLKDEV_MAX_RQ;
|
||||||
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
|
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
|
||||||
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
|
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
|
||||||
|
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
|
||||||
|
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
|
||||||
|
|
||||||
q->make_request_fn = mfn;
|
q->make_request_fn = mfn;
|
||||||
q->backing_dev_info.ra_pages =
|
q->backing_dev_info.ra_pages =
|
||||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||||
|
@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
||||||
/* zero is "infinity" */
|
/* zero is "infinity" */
|
||||||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||||
|
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
|
||||||
|
|
||||||
t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
|
t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
|
||||||
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
|
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
|
||||||
|
|
|
@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
|
||||||
*/
|
*/
|
||||||
if (blk_account_rq(rq))
|
if (blk_account_rq(rq))
|
||||||
q->in_flight++;
|
q->in_flight++;
|
||||||
|
|
||||||
/*
|
|
||||||
* We are now handing the request to the hardware, add the
|
|
||||||
* timeout handler.
|
|
||||||
*/
|
|
||||||
blk_add_timer(rq);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(elv_dequeue_request);
|
|
||||||
|
|
||||||
int elv_queue_empty(struct request_queue *q)
|
int elv_queue_empty(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1102,6 +1102,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
|
||||||
kfree(disk);
|
kfree(disk);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
disk->node_id = node_id;
|
||||||
if (disk_expand_part_tbl(disk, 0)) {
|
if (disk_expand_part_tbl(disk, 0)) {
|
||||||
free_part_stats(&disk->part0);
|
free_part_stats(&disk->part0);
|
||||||
kfree(disk);
|
kfree(disk);
|
||||||
|
@ -1116,7 +1117,6 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
|
||||||
device_initialize(disk_to_dev(disk));
|
device_initialize(disk_to_dev(disk));
|
||||||
INIT_WORK(&disk->async_notify,
|
INIT_WORK(&disk->async_notify,
|
||||||
media_change_notify_thread);
|
media_change_notify_thread);
|
||||||
disk->node_id = node_id;
|
|
||||||
}
|
}
|
||||||
return disk;
|
return disk;
|
||||||
}
|
}
|
||||||
|
|
|
@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
|
||||||
if (!rs->max_segment_size)
|
if (!rs->max_segment_size)
|
||||||
rs->max_segment_size = MAX_SEGMENT_SIZE;
|
rs->max_segment_size = MAX_SEGMENT_SIZE;
|
||||||
if (!rs->seg_boundary_mask)
|
if (!rs->seg_boundary_mask)
|
||||||
rs->seg_boundary_mask = -1;
|
rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||||
if (!rs->bounce_pfn)
|
if (!rs->bounce_pfn)
|
||||||
rs->bounce_pfn = -1;
|
rs->bounce_pfn = -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -786,6 +786,8 @@ static inline void blk_run_address_space(struct address_space *mapping)
|
||||||
blk_run_backing_dev(mapping->backing_dev_info, NULL);
|
blk_run_backing_dev(mapping->backing_dev_info, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern void blkdev_dequeue_request(struct request *req);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blk_end_request() and friends.
|
* blk_end_request() and friends.
|
||||||
* __blk_end_request() and end_request() must be called with
|
* __blk_end_request() and end_request() must be called with
|
||||||
|
@ -820,11 +822,6 @@ extern void blk_update_request(struct request *rq, int error,
|
||||||
extern unsigned int blk_rq_bytes(struct request *rq);
|
extern unsigned int blk_rq_bytes(struct request *rq);
|
||||||
extern unsigned int blk_rq_cur_bytes(struct request *rq);
|
extern unsigned int blk_rq_cur_bytes(struct request *rq);
|
||||||
|
|
||||||
static inline void blkdev_dequeue_request(struct request *req)
|
|
||||||
{
|
|
||||||
elv_dequeue_request(req->q, req);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Access functions for manipulating queue properties
|
* Access functions for manipulating queue properties
|
||||||
*/
|
*/
|
||||||
|
@ -921,6 +918,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
|
||||||
|
|
||||||
#define MAX_SEGMENT_SIZE 65536
|
#define MAX_SEGMENT_SIZE 65536
|
||||||
|
|
||||||
|
#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL
|
||||||
|
|
||||||
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
||||||
|
|
||||||
static inline int queue_hardsect_size(struct request_queue *q)
|
static inline int queue_hardsect_size(struct request_queue *q)
|
||||||
|
|
Loading…
Reference in New Issue