Merge branch 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block: (65 commits) Documentation/iostats.txt: bit-size reference etc. cfq-iosched: removing unnecessary think time checking cfq-iosched: Don't clear queue stats when preempt. blk-throttle: Reset group slice when limits are changed blk-cgroup: Only give unaccounted_time under debug cfq-iosched: Don't set active queue in preempt block: fix non-atomic access to genhd inflight structures block: attempt to merge with existing requests on plug flush block: NULL dereference on error path in __blkdev_get() cfq-iosched: Don't update group weights when on service tree fs: assign sb->s_bdi to default_backing_dev_info if the bdi is going away block: Require subsystems to explicitly allocate bio_set integrity mempool jbd2: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging jbd: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging fs: make fsync_buffers_list() plug mm: make generic_writepages() use plugging blk-cgroup: Add unaccounted time to timeslice_used. block: fixup plugging stubs for !CONFIG_BLOCK block: remove obsolete comments for blkdev_issue_zeroout. blktrace: Use rq->cmd_flags directly in blk_add_trace_rq. ... Fix up conflicts in fs/{aio.c,super.c}
This commit is contained in:
commit
6c51038900
|
@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests.
|
||||||
|
|
||||||
elevator_add_req_fn* called to add a new request into the scheduler
|
elevator_add_req_fn* called to add a new request into the scheduler
|
||||||
|
|
||||||
elevator_queue_empty_fn returns true if the merge queue is empty.
|
|
||||||
Drivers shouldn't use this, but rather check
|
|
||||||
if elv_next_request is NULL (without losing the
|
|
||||||
request if one exists!)
|
|
||||||
|
|
||||||
elevator_former_req_fn
|
elevator_former_req_fn
|
||||||
elevator_latter_req_fn These return the request before or after the
|
elevator_latter_req_fn These return the request before or after the
|
||||||
one specified in disk sort order. Used by the
|
one specified in disk sort order. Used by the
|
||||||
|
|
|
@ -140,7 +140,7 @@ Proportional weight policy files
|
||||||
- Specifies per cgroup weight. This is default weight of the group
|
- Specifies per cgroup weight. This is default weight of the group
|
||||||
on all the devices until and unless overridden by per device rule.
|
on all the devices until and unless overridden by per device rule.
|
||||||
(See blkio.weight_device).
|
(See blkio.weight_device).
|
||||||
Currently allowed range of weights is from 100 to 1000.
|
Currently allowed range of weights is from 10 to 1000.
|
||||||
|
|
||||||
- blkio.weight_device
|
- blkio.weight_device
|
||||||
- One can specify per cgroup per device rules using this interface.
|
- One can specify per cgroup per device rules using this interface.
|
||||||
|
@ -343,34 +343,6 @@ Common files among various policies
|
||||||
|
|
||||||
CFQ sysfs tunable
|
CFQ sysfs tunable
|
||||||
=================
|
=================
|
||||||
/sys/block/<disk>/queue/iosched/group_isolation
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
If group_isolation=1, it provides stronger isolation between groups at the
|
|
||||||
expense of throughput. By default group_isolation is 0. In general that
|
|
||||||
means that if group_isolation=0, expect fairness for sequential workload
|
|
||||||
only. Set group_isolation=1 to see fairness for random IO workload also.
|
|
||||||
|
|
||||||
Generally CFQ will put random seeky workload in sync-noidle category. CFQ
|
|
||||||
will disable idling on these queues and it does a collective idling on group
|
|
||||||
of such queues. Generally these are slow moving queues and if there is a
|
|
||||||
sync-noidle service tree in each group, that group gets exclusive access to
|
|
||||||
disk for certain period. That means it will bring the throughput down if
|
|
||||||
group does not have enough IO to drive deeper queue depths and utilize disk
|
|
||||||
capacity to the fullest in the slice allocated to it. But the flip side is
|
|
||||||
that even a random reader should get better latencies and overall throughput
|
|
||||||
if there are lots of sequential readers/sync-idle workload running in the
|
|
||||||
system.
|
|
||||||
|
|
||||||
If group_isolation=0, then CFQ automatically moves all the random seeky queues
|
|
||||||
in the root group. That means there will be no service differentiation for
|
|
||||||
that kind of workload. This leads to better throughput as we do collective
|
|
||||||
idling on root sync-noidle tree.
|
|
||||||
|
|
||||||
By default one should run with group_isolation=0. If that is not sufficient
|
|
||||||
and one wants stronger isolation between groups, then set group_isolation=1
|
|
||||||
but this will come at cost of reduced throughput.
|
|
||||||
|
|
||||||
/sys/block/<disk>/queue/iosched/slice_idle
|
/sys/block/<disk>/queue/iosched/slice_idle
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
On a faster hardware CFQ can be slow, especially with sequential workload.
|
On a faster hardware CFQ can be slow, especially with sequential workload.
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
I/O statistics fields
|
I/O statistics fields
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Last modified Sep 30, 2003
|
|
||||||
|
|
||||||
Since 2.4.20 (and some versions before, with patches), and 2.5.45,
|
Since 2.4.20 (and some versions before, with patches), and 2.5.45,
|
||||||
more extensive disk statistics have been introduced to help measure disk
|
more extensive disk statistics have been introduced to help measure disk
|
||||||
activity. Tools such as sar and iostat typically interpret these and do
|
activity. Tools such as sar and iostat typically interpret these and do
|
||||||
|
@ -46,11 +44,12 @@ the above example, the first field of statistics would be 446216.
|
||||||
By contrast, in 2.6 if you look at /sys/block/hda/stat, you'll
|
By contrast, in 2.6 if you look at /sys/block/hda/stat, you'll
|
||||||
find just the eleven fields, beginning with 446216. If you look at
|
find just the eleven fields, beginning with 446216. If you look at
|
||||||
/proc/diskstats, the eleven fields will be preceded by the major and
|
/proc/diskstats, the eleven fields will be preceded by the major and
|
||||||
minor device numbers, and device name. Each of these formats provide
|
minor device numbers, and device name. Each of these formats provides
|
||||||
eleven fields of statistics, each meaning exactly the same things.
|
eleven fields of statistics, each meaning exactly the same things.
|
||||||
All fields except field 9 are cumulative since boot. Field 9 should
|
All fields except field 9 are cumulative since boot. Field 9 should
|
||||||
go to zero as I/Os complete; all others only increase. Yes, these are
|
go to zero as I/Os complete; all others only increase (unless they
|
||||||
32 bit unsigned numbers, and on a very busy or long-lived system they
|
overflow and wrap). Yes, these are (32-bit or 64-bit) unsigned long
|
||||||
|
(native word size) numbers, and on a very busy or long-lived system they
|
||||||
may wrap. Applications should be prepared to deal with that; unless
|
may wrap. Applications should be prepared to deal with that; unless
|
||||||
your observations are measured in large numbers of minutes or hours,
|
your observations are measured in large numbers of minutes or hours,
|
||||||
they should not wrap twice before you notice them.
|
they should not wrap twice before you notice them.
|
||||||
|
@ -96,11 +95,11 @@ introduced when changes collide, so (for instance) adding up all the
|
||||||
read I/Os issued per partition should equal those made to the disks ...
|
read I/Os issued per partition should equal those made to the disks ...
|
||||||
but due to the lack of locking it may only be very close.
|
but due to the lack of locking it may only be very close.
|
||||||
|
|
||||||
In 2.6, there are counters for each cpu, which made the lack of locking
|
In 2.6, there are counters for each CPU, which make the lack of locking
|
||||||
almost a non-issue. When the statistics are read, the per-cpu counters
|
almost a non-issue. When the statistics are read, the per-CPU counters
|
||||||
are summed (possibly overflowing the unsigned 32-bit variable they are
|
are summed (possibly overflowing the unsigned long variable they are
|
||||||
summed to) and the result given to the user. There is no convenient
|
summed to) and the result given to the user. There is no convenient
|
||||||
user interface for accessing the per-cpu counters themselves.
|
user interface for accessing the per-CPU counters themselves.
|
||||||
|
|
||||||
Disks vs Partitions
|
Disks vs Partitions
|
||||||
-------------------
|
-------------------
|
||||||
|
|
|
@ -371,12 +371,14 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
|
EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
|
||||||
|
|
||||||
void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
|
void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
|
||||||
|
unsigned long unaccounted_time)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&blkg->stats_lock, flags);
|
spin_lock_irqsave(&blkg->stats_lock, flags);
|
||||||
blkg->stats.time += time;
|
blkg->stats.time += time;
|
||||||
|
blkg->stats.unaccounted_time += unaccounted_time;
|
||||||
spin_unlock_irqrestore(&blkg->stats_lock, flags);
|
spin_unlock_irqrestore(&blkg->stats_lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
|
EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
|
||||||
|
@ -604,6 +606,9 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
|
||||||
return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
||||||
blkg->stats.sectors, cb, dev);
|
blkg->stats.sectors, cb, dev);
|
||||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||||
|
if (type == BLKIO_STAT_UNACCOUNTED_TIME)
|
||||||
|
return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
||||||
|
blkg->stats.unaccounted_time, cb, dev);
|
||||||
if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
|
if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
|
||||||
uint64_t sum = blkg->stats.avg_queue_size_sum;
|
uint64_t sum = blkg->stats.avg_queue_size_sum;
|
||||||
uint64_t samples = blkg->stats.avg_queue_size_samples;
|
uint64_t samples = blkg->stats.avg_queue_size_samples;
|
||||||
|
@ -1125,6 +1130,9 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
|
||||||
return blkio_read_blkg_stats(blkcg, cft, cb,
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
||||||
BLKIO_STAT_QUEUED, 1);
|
BLKIO_STAT_QUEUED, 1);
|
||||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||||
|
case BLKIO_PROP_unaccounted_time:
|
||||||
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
||||||
|
BLKIO_STAT_UNACCOUNTED_TIME, 0);
|
||||||
case BLKIO_PROP_dequeue:
|
case BLKIO_PROP_dequeue:
|
||||||
return blkio_read_blkg_stats(blkcg, cft, cb,
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
||||||
BLKIO_STAT_DEQUEUE, 0);
|
BLKIO_STAT_DEQUEUE, 0);
|
||||||
|
@ -1382,6 +1390,12 @@ struct cftype blkio_files[] = {
|
||||||
BLKIO_PROP_dequeue),
|
BLKIO_PROP_dequeue),
|
||||||
.read_map = blkiocg_file_read_map,
|
.read_map = blkiocg_file_read_map,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = "unaccounted_time",
|
||||||
|
.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
|
||||||
|
BLKIO_PROP_unaccounted_time),
|
||||||
|
.read_map = blkiocg_file_read_map,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@ enum stat_type {
|
||||||
/* All the single valued stats go below this */
|
/* All the single valued stats go below this */
|
||||||
BLKIO_STAT_TIME,
|
BLKIO_STAT_TIME,
|
||||||
BLKIO_STAT_SECTORS,
|
BLKIO_STAT_SECTORS,
|
||||||
|
/* Time not charged to this cgroup */
|
||||||
|
BLKIO_STAT_UNACCOUNTED_TIME,
|
||||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||||
BLKIO_STAT_AVG_QUEUE_SIZE,
|
BLKIO_STAT_AVG_QUEUE_SIZE,
|
||||||
BLKIO_STAT_IDLE_TIME,
|
BLKIO_STAT_IDLE_TIME,
|
||||||
|
@ -81,6 +83,7 @@ enum blkcg_file_name_prop {
|
||||||
BLKIO_PROP_io_serviced,
|
BLKIO_PROP_io_serviced,
|
||||||
BLKIO_PROP_time,
|
BLKIO_PROP_time,
|
||||||
BLKIO_PROP_sectors,
|
BLKIO_PROP_sectors,
|
||||||
|
BLKIO_PROP_unaccounted_time,
|
||||||
BLKIO_PROP_io_service_time,
|
BLKIO_PROP_io_service_time,
|
||||||
BLKIO_PROP_io_wait_time,
|
BLKIO_PROP_io_wait_time,
|
||||||
BLKIO_PROP_io_merged,
|
BLKIO_PROP_io_merged,
|
||||||
|
@ -114,6 +117,8 @@ struct blkio_group_stats {
|
||||||
/* total disk time and nr sectors dispatched by this group */
|
/* total disk time and nr sectors dispatched by this group */
|
||||||
uint64_t time;
|
uint64_t time;
|
||||||
uint64_t sectors;
|
uint64_t sectors;
|
||||||
|
/* Time not charged to this cgroup */
|
||||||
|
uint64_t unaccounted_time;
|
||||||
uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
|
uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
|
||||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||||
/* Sum of number of IOs queued across all samples */
|
/* Sum of number of IOs queued across all samples */
|
||||||
|
@ -240,7 +245,7 @@ static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define BLKIO_WEIGHT_MIN 100
|
#define BLKIO_WEIGHT_MIN 10
|
||||||
#define BLKIO_WEIGHT_MAX 1000
|
#define BLKIO_WEIGHT_MAX 1000
|
||||||
#define BLKIO_WEIGHT_DEFAULT 500
|
#define BLKIO_WEIGHT_DEFAULT 500
|
||||||
|
|
||||||
|
@ -293,7 +298,8 @@ extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
|
||||||
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
|
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
|
||||||
void *key);
|
void *key);
|
||||||
void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||||
unsigned long time);
|
unsigned long time,
|
||||||
|
unsigned long unaccounted_time);
|
||||||
void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
|
void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
|
||||||
bool direction, bool sync);
|
bool direction, bool sync);
|
||||||
void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
||||||
|
@ -319,7 +325,9 @@ blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
|
||||||
static inline struct blkio_group *
|
static inline struct blkio_group *
|
||||||
blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
|
blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
|
||||||
static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||||
unsigned long time) {}
|
unsigned long time,
|
||||||
|
unsigned long unaccounted_time)
|
||||||
|
{}
|
||||||
static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||||
uint64_t bytes, bool direction, bool sync) {}
|
uint64_t bytes, bool direction, bool sync) {}
|
||||||
static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
|
||||||
|
|
652
block/blk-core.c
652
block/blk-core.c
|
@ -27,6 +27,7 @@
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/task_io_accounting_ops.h>
|
#include <linux/task_io_accounting_ops.h>
|
||||||
#include <linux/fault-inject.h>
|
#include <linux/fault-inject.h>
|
||||||
|
#include <linux/list_sort.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/block.h>
|
#include <trace/events/block.h>
|
||||||
|
@ -149,39 +150,29 @@ EXPORT_SYMBOL(blk_rq_init);
|
||||||
static void req_bio_endio(struct request *rq, struct bio *bio,
|
static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||||
unsigned int nbytes, int error)
|
unsigned int nbytes, int error)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
if (error)
|
||||||
|
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||||
|
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||||
|
error = -EIO;
|
||||||
|
|
||||||
if (&q->flush_rq != rq) {
|
if (unlikely(nbytes > bio->bi_size)) {
|
||||||
if (error)
|
printk(KERN_ERR "%s: want %u bytes done, %u left\n",
|
||||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
__func__, nbytes, bio->bi_size);
|
||||||
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
nbytes = bio->bi_size;
|
||||||
error = -EIO;
|
|
||||||
|
|
||||||
if (unlikely(nbytes > bio->bi_size)) {
|
|
||||||
printk(KERN_ERR "%s: want %u bytes done, %u left\n",
|
|
||||||
__func__, nbytes, bio->bi_size);
|
|
||||||
nbytes = bio->bi_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(rq->cmd_flags & REQ_QUIET))
|
|
||||||
set_bit(BIO_QUIET, &bio->bi_flags);
|
|
||||||
|
|
||||||
bio->bi_size -= nbytes;
|
|
||||||
bio->bi_sector += (nbytes >> 9);
|
|
||||||
|
|
||||||
if (bio_integrity(bio))
|
|
||||||
bio_integrity_advance(bio, nbytes);
|
|
||||||
|
|
||||||
if (bio->bi_size == 0)
|
|
||||||
bio_endio(bio, error);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Okay, this is the sequenced flush request in
|
|
||||||
* progress, just record the error;
|
|
||||||
*/
|
|
||||||
if (error && !q->flush_err)
|
|
||||||
q->flush_err = error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(rq->cmd_flags & REQ_QUIET))
|
||||||
|
set_bit(BIO_QUIET, &bio->bi_flags);
|
||||||
|
|
||||||
|
bio->bi_size -= nbytes;
|
||||||
|
bio->bi_sector += (nbytes >> 9);
|
||||||
|
|
||||||
|
if (bio_integrity(bio))
|
||||||
|
bio_integrity_advance(bio, nbytes);
|
||||||
|
|
||||||
|
/* don't actually finish bio if it's part of flush sequence */
|
||||||
|
if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
|
||||||
|
bio_endio(bio, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_dump_rq_flags(struct request *rq, char *msg)
|
void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||||
|
@ -208,135 +199,43 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||||
EXPORT_SYMBOL(blk_dump_rq_flags);
|
EXPORT_SYMBOL(blk_dump_rq_flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "plug" the device if there are no outstanding requests: this will
|
* Make sure that plugs that were pending when this function was entered,
|
||||||
* force the transfer to start only after we have put all the requests
|
* are now complete and requests pushed to the queue.
|
||||||
* on the list.
|
*/
|
||||||
*
|
static inline void queue_sync_plugs(struct request_queue *q)
|
||||||
* This is called with interrupts off and no requests on the queue and
|
|
||||||
* with the queue lock held.
|
|
||||||
*/
|
|
||||||
void blk_plug_device(struct request_queue *q)
|
|
||||||
{
|
{
|
||||||
WARN_ON(!irqs_disabled());
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* don't plug a stopped queue, it must be paired with blk_start_queue()
|
* If the current process is plugged and has barriers submitted,
|
||||||
* which will restart the queueing
|
* we will livelock if we don't unplug first.
|
||||||
*/
|
*/
|
||||||
if (blk_queue_stopped(q))
|
blk_flush_plug(current);
|
||||||
return;
|
|
||||||
|
|
||||||
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
|
|
||||||
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
|
|
||||||
trace_block_plug(q);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_plug_device);
|
|
||||||
|
|
||||||
/**
|
static void blk_delay_work(struct work_struct *work)
|
||||||
* blk_plug_device_unlocked - plug a device without queue lock held
|
|
||||||
* @q: The &struct request_queue to plug
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Like @blk_plug_device(), but grabs the queue lock and disables
|
|
||||||
* interrupts.
|
|
||||||
**/
|
|
||||||
void blk_plug_device_unlocked(struct request_queue *q)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
struct request_queue *q;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
q = container_of(work, struct request_queue, delay_work.work);
|
||||||
blk_plug_device(q);
|
spin_lock_irq(q->queue_lock);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
__blk_run_queue(q, false);
|
||||||
}
|
spin_unlock_irq(q->queue_lock);
|
||||||
EXPORT_SYMBOL(blk_plug_device_unlocked);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* remove the queue from the plugged list, if present. called with
|
|
||||||
* queue lock held and interrupts disabled.
|
|
||||||
*/
|
|
||||||
int blk_remove_plug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
WARN_ON(!irqs_disabled());
|
|
||||||
|
|
||||||
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
del_timer(&q->unplug_timer);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_remove_plug);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* remove the plug and let it rip..
|
|
||||||
*/
|
|
||||||
void __generic_unplug_device(struct request_queue *q)
|
|
||||||
{
|
|
||||||
if (unlikely(blk_queue_stopped(q)))
|
|
||||||
return;
|
|
||||||
if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
|
|
||||||
return;
|
|
||||||
|
|
||||||
q->request_fn(q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* generic_unplug_device - fire a request queue
|
* blk_delay_queue - restart queueing after defined interval
|
||||||
* @q: The &struct request_queue in question
|
* @q: The &struct request_queue in question
|
||||||
|
* @msecs: Delay in msecs
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Linux uses plugging to build bigger requests queues before letting
|
* Sometimes queueing needs to be postponed for a little while, to allow
|
||||||
* the device have at them. If a queue is plugged, the I/O scheduler
|
* resources to come back. This function will make sure that queueing is
|
||||||
* is still adding and merging requests on the queue. Once the queue
|
* restarted around the specified time.
|
||||||
* gets unplugged, the request_fn defined for the queue is invoked and
|
*/
|
||||||
* transfers started.
|
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
|
||||||
**/
|
|
||||||
void generic_unplug_device(struct request_queue *q)
|
|
||||||
{
|
{
|
||||||
if (blk_queue_plugged(q)) {
|
schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
__generic_unplug_device(q);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(generic_unplug_device);
|
EXPORT_SYMBOL(blk_delay_queue);
|
||||||
|
|
||||||
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
|
|
||||||
struct page *page)
|
|
||||||
{
|
|
||||||
struct request_queue *q = bdi->unplug_io_data;
|
|
||||||
|
|
||||||
blk_unplug(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
void blk_unplug_work(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct request_queue *q =
|
|
||||||
container_of(work, struct request_queue, unplug_work);
|
|
||||||
|
|
||||||
trace_block_unplug_io(q);
|
|
||||||
q->unplug_fn(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
void blk_unplug_timeout(unsigned long data)
|
|
||||||
{
|
|
||||||
struct request_queue *q = (struct request_queue *)data;
|
|
||||||
|
|
||||||
trace_block_unplug_timer(q);
|
|
||||||
kblockd_schedule_work(q, &q->unplug_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
void blk_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* devices don't necessarily have an ->unplug_fn defined
|
|
||||||
*/
|
|
||||||
if (q->unplug_fn) {
|
|
||||||
trace_block_unplug_io(q);
|
|
||||||
q->unplug_fn(q);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_unplug);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_start_queue - restart a previously stopped queue
|
* blk_start_queue - restart a previously stopped queue
|
||||||
|
@ -372,7 +271,7 @@ EXPORT_SYMBOL(blk_start_queue);
|
||||||
**/
|
**/
|
||||||
void blk_stop_queue(struct request_queue *q)
|
void blk_stop_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
blk_remove_plug(q);
|
cancel_delayed_work(&q->delay_work);
|
||||||
queue_flag_set(QUEUE_FLAG_STOPPED, q);
|
queue_flag_set(QUEUE_FLAG_STOPPED, q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_stop_queue);
|
EXPORT_SYMBOL(blk_stop_queue);
|
||||||
|
@ -390,13 +289,16 @@ EXPORT_SYMBOL(blk_stop_queue);
|
||||||
* that its ->make_request_fn will not re-add plugging prior to calling
|
* that its ->make_request_fn will not re-add plugging prior to calling
|
||||||
* this function.
|
* this function.
|
||||||
*
|
*
|
||||||
|
* This function does not cancel any asynchronous activity arising
|
||||||
|
* out of elevator or throttling code. That would require elevaotor_exit()
|
||||||
|
* and blk_throtl_exit() to be called with queue lock initialized.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
void blk_sync_queue(struct request_queue *q)
|
void blk_sync_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
del_timer_sync(&q->unplug_timer);
|
|
||||||
del_timer_sync(&q->timeout);
|
del_timer_sync(&q->timeout);
|
||||||
cancel_work_sync(&q->unplug_work);
|
cancel_delayed_work_sync(&q->delay_work);
|
||||||
throtl_shutdown_timer_wq(q);
|
queue_sync_plugs(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_sync_queue);
|
EXPORT_SYMBOL(blk_sync_queue);
|
||||||
|
|
||||||
|
@ -412,14 +314,9 @@ EXPORT_SYMBOL(blk_sync_queue);
|
||||||
*/
|
*/
|
||||||
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
|
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
|
||||||
{
|
{
|
||||||
blk_remove_plug(q);
|
|
||||||
|
|
||||||
if (unlikely(blk_queue_stopped(q)))
|
if (unlikely(blk_queue_stopped(q)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (elv_queue_empty(q))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only recurse once to avoid overrunning the stack, let the unplug
|
* Only recurse once to avoid overrunning the stack, let the unplug
|
||||||
* handling reinvoke the handler shortly if we already got there.
|
* handling reinvoke the handler shortly if we already got there.
|
||||||
|
@ -427,10 +324,8 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
|
||||||
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||||
q->request_fn(q);
|
q->request_fn(q);
|
||||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||||
} else {
|
} else
|
||||||
queue_flag_set(QUEUE_FLAG_PLUGGED, q);
|
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||||
kblockd_schedule_work(q, &q->unplug_work);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__blk_run_queue);
|
EXPORT_SYMBOL(__blk_run_queue);
|
||||||
|
|
||||||
|
@ -457,6 +352,11 @@ void blk_put_queue(struct request_queue *q)
|
||||||
kobject_put(&q->kobj);
|
kobject_put(&q->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: If a driver supplied the queue lock, it should not zap that lock
|
||||||
|
* unexpectedly as some queue cleanup components like elevator_exit() and
|
||||||
|
* blk_throtl_exit() need queue lock.
|
||||||
|
*/
|
||||||
void blk_cleanup_queue(struct request_queue *q)
|
void blk_cleanup_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -475,6 +375,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||||
if (q->elevator)
|
if (q->elevator)
|
||||||
elevator_exit(q->elevator);
|
elevator_exit(q->elevator);
|
||||||
|
|
||||||
|
blk_throtl_exit(q);
|
||||||
|
|
||||||
blk_put_queue(q);
|
blk_put_queue(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||||
|
@ -517,8 +419,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
if (!q)
|
if (!q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
|
|
||||||
q->backing_dev_info.unplug_io_data = q;
|
|
||||||
q->backing_dev_info.ra_pages =
|
q->backing_dev_info.ra_pages =
|
||||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||||
q->backing_dev_info.state = 0;
|
q->backing_dev_info.state = 0;
|
||||||
|
@ -538,17 +438,24 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
|
|
||||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||||
laptop_mode_timer_fn, (unsigned long) q);
|
laptop_mode_timer_fn, (unsigned long) q);
|
||||||
init_timer(&q->unplug_timer);
|
|
||||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||||
INIT_LIST_HEAD(&q->timeout_list);
|
INIT_LIST_HEAD(&q->timeout_list);
|
||||||
INIT_LIST_HEAD(&q->pending_flushes);
|
INIT_LIST_HEAD(&q->flush_queue[0]);
|
||||||
INIT_WORK(&q->unplug_work, blk_unplug_work);
|
INIT_LIST_HEAD(&q->flush_queue[1]);
|
||||||
|
INIT_LIST_HEAD(&q->flush_data_in_flight);
|
||||||
|
INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
|
||||||
|
|
||||||
kobject_init(&q->kobj, &blk_queue_ktype);
|
kobject_init(&q->kobj, &blk_queue_ktype);
|
||||||
|
|
||||||
mutex_init(&q->sysfs_lock);
|
mutex_init(&q->sysfs_lock);
|
||||||
spin_lock_init(&q->__queue_lock);
|
spin_lock_init(&q->__queue_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By default initialize queue_lock to internal lock and driver can
|
||||||
|
* override it later if need be.
|
||||||
|
*/
|
||||||
|
q->queue_lock = &q->__queue_lock;
|
||||||
|
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_alloc_queue_node);
|
EXPORT_SYMBOL(blk_alloc_queue_node);
|
||||||
|
@ -631,9 +538,11 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
||||||
q->request_fn = rfn;
|
q->request_fn = rfn;
|
||||||
q->prep_rq_fn = NULL;
|
q->prep_rq_fn = NULL;
|
||||||
q->unprep_rq_fn = NULL;
|
q->unprep_rq_fn = NULL;
|
||||||
q->unplug_fn = generic_unplug_device;
|
|
||||||
q->queue_flags = QUEUE_FLAG_DEFAULT;
|
q->queue_flags = QUEUE_FLAG_DEFAULT;
|
||||||
q->queue_lock = lock;
|
|
||||||
|
/* Override internal queue lock with supplied lock pointer */
|
||||||
|
if (lock)
|
||||||
|
q->queue_lock = lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This also sets hw/phys segments, boundary and size
|
* This also sets hw/phys segments, boundary and size
|
||||||
|
@ -666,6 +575,8 @@ int blk_get_queue(struct request_queue *q)
|
||||||
|
|
||||||
static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
|
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_ELVPRIV)
|
if (rq->cmd_flags & REQ_ELVPRIV)
|
||||||
elv_put_request(q, rq);
|
elv_put_request(q, rq);
|
||||||
mempool_free(rq, q->rq.rq_pool);
|
mempool_free(rq, q->rq.rq_pool);
|
||||||
|
@ -761,6 +672,25 @@ static void freed_request(struct request_queue *q, int sync, int priv)
|
||||||
__freed_request(q, sync ^ 1);
|
__freed_request(q, sync ^ 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine if elevator data should be initialized when allocating the
|
||||||
|
* request associated with @bio.
|
||||||
|
*/
|
||||||
|
static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||||
|
{
|
||||||
|
if (!bio)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush requests do not use the elevator so skip initialization.
|
||||||
|
* This allows a request to share the flush and elevator data.
|
||||||
|
*/
|
||||||
|
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get a free request, queue_lock must be held.
|
* Get a free request, queue_lock must be held.
|
||||||
* Returns NULL on failure, with queue_lock held.
|
* Returns NULL on failure, with queue_lock held.
|
||||||
|
@ -773,7 +703,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||||
struct request_list *rl = &q->rq;
|
struct request_list *rl = &q->rq;
|
||||||
struct io_context *ioc = NULL;
|
struct io_context *ioc = NULL;
|
||||||
const bool is_sync = rw_is_sync(rw_flags) != 0;
|
const bool is_sync = rw_is_sync(rw_flags) != 0;
|
||||||
int may_queue, priv;
|
int may_queue, priv = 0;
|
||||||
|
|
||||||
may_queue = elv_may_queue(q, rw_flags);
|
may_queue = elv_may_queue(q, rw_flags);
|
||||||
if (may_queue == ELV_MQUEUE_NO)
|
if (may_queue == ELV_MQUEUE_NO)
|
||||||
|
@ -817,9 +747,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||||
rl->count[is_sync]++;
|
rl->count[is_sync]++;
|
||||||
rl->starved[is_sync] = 0;
|
rl->starved[is_sync] = 0;
|
||||||
|
|
||||||
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
if (blk_rq_should_init_elevator(bio)) {
|
||||||
if (priv)
|
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
||||||
rl->elvpriv++;
|
if (priv)
|
||||||
|
rl->elvpriv++;
|
||||||
|
}
|
||||||
|
|
||||||
if (blk_queue_io_stat(q))
|
if (blk_queue_io_stat(q))
|
||||||
rw_flags |= REQ_IO_STAT;
|
rw_flags |= REQ_IO_STAT;
|
||||||
|
@ -866,8 +798,8 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No available requests for this queue, unplug the device and wait for some
|
* No available requests for this queue, wait for some requests to become
|
||||||
* requests to become available.
|
* available.
|
||||||
*
|
*
|
||||||
* Called with q->queue_lock held, and returns with it unlocked.
|
* Called with q->queue_lock held, and returns with it unlocked.
|
||||||
*/
|
*/
|
||||||
|
@ -888,7 +820,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||||
|
|
||||||
trace_block_sleeprq(q, bio, rw_flags & 1);
|
trace_block_sleeprq(q, bio, rw_flags & 1);
|
||||||
|
|
||||||
__generic_unplug_device(q);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
io_schedule();
|
io_schedule();
|
||||||
|
|
||||||
|
@ -1010,6 +941,13 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_requeue_request);
|
EXPORT_SYMBOL(blk_requeue_request);
|
||||||
|
|
||||||
|
static void add_acct_request(struct request_queue *q, struct request *rq,
|
||||||
|
int where)
|
||||||
|
{
|
||||||
|
drive_stat_acct(rq, 1);
|
||||||
|
__elv_add_request(q, rq, where);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_insert_request - insert a special request into a request queue
|
* blk_insert_request - insert a special request into a request queue
|
||||||
* @q: request queue where request should be inserted
|
* @q: request queue where request should be inserted
|
||||||
|
@ -1052,8 +990,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
|
||||||
if (blk_rq_tagged(rq))
|
if (blk_rq_tagged(rq))
|
||||||
blk_queue_end_tag(q, rq);
|
blk_queue_end_tag(q, rq);
|
||||||
|
|
||||||
drive_stat_acct(rq, 1);
|
add_acct_request(q, rq, where);
|
||||||
__elv_add_request(q, rq, where, 0);
|
|
||||||
__blk_run_queue(q, false);
|
__blk_run_queue(q, false);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1174,6 +1111,113 @@ void blk_add_request_payload(struct request *rq, struct page *page,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_add_request_payload);
|
EXPORT_SYMBOL_GPL(blk_add_request_payload);
|
||||||
|
|
||||||
|
static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||||
|
struct bio *bio)
|
||||||
|
{
|
||||||
|
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Debug stuff, kill later
|
||||||
|
*/
|
||||||
|
if (!rq_mergeable(req)) {
|
||||||
|
blk_dump_rq_flags(req, "back");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ll_back_merge_fn(q, req, bio))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
trace_block_bio_backmerge(q, bio);
|
||||||
|
|
||||||
|
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||||
|
blk_rq_set_mixed_merge(req);
|
||||||
|
|
||||||
|
req->biotail->bi_next = bio;
|
||||||
|
req->biotail = bio;
|
||||||
|
req->__data_len += bio->bi_size;
|
||||||
|
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
|
||||||
|
|
||||||
|
drive_stat_acct(req, 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bio_attempt_front_merge(struct request_queue *q,
|
||||||
|
struct request *req, struct bio *bio)
|
||||||
|
{
|
||||||
|
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
||||||
|
sector_t sector;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Debug stuff, kill later
|
||||||
|
*/
|
||||||
|
if (!rq_mergeable(req)) {
|
||||||
|
blk_dump_rq_flags(req, "front");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ll_front_merge_fn(q, req, bio))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
trace_block_bio_frontmerge(q, bio);
|
||||||
|
|
||||||
|
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||||
|
blk_rq_set_mixed_merge(req);
|
||||||
|
|
||||||
|
sector = bio->bi_sector;
|
||||||
|
|
||||||
|
bio->bi_next = req->bio;
|
||||||
|
req->bio = bio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* may not be valid. if the low level driver said
|
||||||
|
* it didn't need a bounce buffer then it better
|
||||||
|
* not touch req->buffer either...
|
||||||
|
*/
|
||||||
|
req->buffer = bio_data(bio);
|
||||||
|
req->__sector = bio->bi_sector;
|
||||||
|
req->__data_len += bio->bi_size;
|
||||||
|
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
|
||||||
|
|
||||||
|
drive_stat_acct(req, 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempts to merge with the plugged list in the current process. Returns
|
||||||
|
* true if merge was succesful, otherwise false.
|
||||||
|
*/
|
||||||
|
static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
|
||||||
|
struct bio *bio)
|
||||||
|
{
|
||||||
|
struct blk_plug *plug;
|
||||||
|
struct request *rq;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
plug = tsk->plug;
|
||||||
|
if (!plug)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
|
||||||
|
int el_ret;
|
||||||
|
|
||||||
|
if (rq->q != q)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
el_ret = elv_try_merge(rq, bio);
|
||||||
|
if (el_ret == ELEVATOR_BACK_MERGE) {
|
||||||
|
ret = bio_attempt_back_merge(q, rq, bio);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
||||||
|
ret = bio_attempt_front_merge(q, rq, bio);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
void init_request_from_bio(struct request *req, struct bio *bio)
|
void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
{
|
{
|
||||||
req->cpu = bio->bi_comp_cpu;
|
req->cpu = bio->bi_comp_cpu;
|
||||||
|
@ -1189,26 +1233,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
blk_rq_bio_prep(req->q, req, bio);
|
blk_rq_bio_prep(req->q, req, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Only disabling plugging for non-rotational devices if it does tagging
|
|
||||||
* as well, otherwise we do need the proper merging
|
|
||||||
*/
|
|
||||||
static inline bool queue_should_plug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __make_request(struct request_queue *q, struct bio *bio)
|
static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request *req;
|
|
||||||
int el_ret;
|
|
||||||
unsigned int bytes = bio->bi_size;
|
|
||||||
const unsigned short prio = bio_prio(bio);
|
|
||||||
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
||||||
const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
|
struct blk_plug *plug;
|
||||||
const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
|
||||||
int where = ELEVATOR_INSERT_SORT;
|
struct request *req;
|
||||||
int rw_flags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* low level driver can indicate that it wants pages above a
|
* low level driver can indicate that it wants pages above a
|
||||||
|
@ -1217,78 +1247,36 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
*/
|
*/
|
||||||
blk_queue_bounce(q, &bio);
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
|
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
|
||||||
where = ELEVATOR_INSERT_FRONT;
|
spin_lock_irq(q->queue_lock);
|
||||||
|
where = ELEVATOR_INSERT_FLUSH;
|
||||||
goto get_rq;
|
goto get_rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (elv_queue_empty(q))
|
/*
|
||||||
goto get_rq;
|
* Check if we can merge with the plugged list before grabbing
|
||||||
|
* any locks.
|
||||||
|
*/
|
||||||
|
if (attempt_plug_merge(current, q, bio))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
el_ret = elv_merge(q, &req, bio);
|
el_ret = elv_merge(q, &req, bio);
|
||||||
switch (el_ret) {
|
if (el_ret == ELEVATOR_BACK_MERGE) {
|
||||||
case ELEVATOR_BACK_MERGE:
|
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
|
||||||
BUG_ON(!rq_mergeable(req));
|
if (bio_attempt_back_merge(q, req, bio)) {
|
||||||
|
if (!attempt_back_merge(q, req))
|
||||||
if (!ll_back_merge_fn(q, req, bio))
|
elv_merged_request(q, req, el_ret);
|
||||||
break;
|
goto out_unlock;
|
||||||
|
}
|
||||||
trace_block_bio_backmerge(q, bio);
|
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
||||||
|
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
|
||||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
if (bio_attempt_front_merge(q, req, bio)) {
|
||||||
blk_rq_set_mixed_merge(req);
|
if (!attempt_front_merge(q, req))
|
||||||
|
elv_merged_request(q, req, el_ret);
|
||||||
req->biotail->bi_next = bio;
|
goto out_unlock;
|
||||||
req->biotail = bio;
|
|
||||||
req->__data_len += bytes;
|
|
||||||
req->ioprio = ioprio_best(req->ioprio, prio);
|
|
||||||
if (!blk_rq_cpu_valid(req))
|
|
||||||
req->cpu = bio->bi_comp_cpu;
|
|
||||||
drive_stat_acct(req, 0);
|
|
||||||
elv_bio_merged(q, req, bio);
|
|
||||||
if (!attempt_back_merge(q, req))
|
|
||||||
elv_merged_request(q, req, el_ret);
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
case ELEVATOR_FRONT_MERGE:
|
|
||||||
BUG_ON(!rq_mergeable(req));
|
|
||||||
|
|
||||||
if (!ll_front_merge_fn(q, req, bio))
|
|
||||||
break;
|
|
||||||
|
|
||||||
trace_block_bio_frontmerge(q, bio);
|
|
||||||
|
|
||||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
|
|
||||||
blk_rq_set_mixed_merge(req);
|
|
||||||
req->cmd_flags &= ~REQ_FAILFAST_MASK;
|
|
||||||
req->cmd_flags |= ff;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bio->bi_next = req->bio;
|
|
||||||
req->bio = bio;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* may not be valid. if the low level driver said
|
|
||||||
* it didn't need a bounce buffer then it better
|
|
||||||
* not touch req->buffer either...
|
|
||||||
*/
|
|
||||||
req->buffer = bio_data(bio);
|
|
||||||
req->__sector = bio->bi_sector;
|
|
||||||
req->__data_len += bytes;
|
|
||||||
req->ioprio = ioprio_best(req->ioprio, prio);
|
|
||||||
if (!blk_rq_cpu_valid(req))
|
|
||||||
req->cpu = bio->bi_comp_cpu;
|
|
||||||
drive_stat_acct(req, 0);
|
|
||||||
elv_bio_merged(q, req, bio);
|
|
||||||
if (!attempt_front_merge(q, req))
|
|
||||||
elv_merged_request(q, req, el_ret);
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* ELV_NO_MERGE: elevator says don't/can't merge. */
|
|
||||||
default:
|
|
||||||
;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get_rq:
|
get_rq:
|
||||||
|
@ -1315,20 +1303,35 @@ get_rq:
|
||||||
*/
|
*/
|
||||||
init_request_from_bio(req, bio);
|
init_request_from_bio(req, bio);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
|
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
|
||||||
bio_flagged(bio, BIO_CPU_AFFINE))
|
bio_flagged(bio, BIO_CPU_AFFINE)) {
|
||||||
req->cpu = blk_cpu_to_group(smp_processor_id());
|
req->cpu = blk_cpu_to_group(get_cpu());
|
||||||
if (queue_should_plug(q) && elv_queue_empty(q))
|
put_cpu();
|
||||||
blk_plug_device(q);
|
}
|
||||||
|
|
||||||
/* insert the request into the elevator */
|
plug = current->plug;
|
||||||
drive_stat_acct(req, 1);
|
if (plug) {
|
||||||
__elv_add_request(q, req, where, 0);
|
if (!plug->should_sort && !list_empty(&plug->list)) {
|
||||||
|
struct request *__rq;
|
||||||
|
|
||||||
|
__rq = list_entry_rq(plug->list.prev);
|
||||||
|
if (__rq->q != q)
|
||||||
|
plug->should_sort = 1;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Debug flag, kill later
|
||||||
|
*/
|
||||||
|
req->cmd_flags |= REQ_ON_PLUG;
|
||||||
|
list_add_tail(&req->queuelist, &plug->list);
|
||||||
|
drive_stat_acct(req, 1);
|
||||||
|
} else {
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
add_acct_request(q, req, where);
|
||||||
|
__blk_run_queue(q, false);
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
if (unplug || !queue_should_plug(q))
|
|
||||||
__generic_unplug_device(q);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1731,9 +1734,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||||
*/
|
*/
|
||||||
BUG_ON(blk_queued_rq(rq));
|
BUG_ON(blk_queued_rq(rq));
|
||||||
|
|
||||||
drive_stat_acct(rq, 1);
|
add_acct_request(q, rq, ELEVATOR_INSERT_BACK);
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1805,7 +1806,7 @@ static void blk_account_io_done(struct request *req)
|
||||||
* normal IO on queueing nor completion. Accounting the
|
* normal IO on queueing nor completion. Accounting the
|
||||||
* containing request is enough.
|
* containing request is enough.
|
||||||
*/
|
*/
|
||||||
if (blk_do_io_stat(req) && req != &req->q->flush_rq) {
|
if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
|
||||||
unsigned long duration = jiffies - req->start_time;
|
unsigned long duration = jiffies - req->start_time;
|
||||||
const int rw = rq_data_dir(req);
|
const int rw = rq_data_dir(req);
|
||||||
struct hd_struct *part;
|
struct hd_struct *part;
|
||||||
|
@ -2628,6 +2629,113 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||||
|
|
||||||
|
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||||
|
struct delayed_work *dwork, unsigned long delay)
|
||||||
|
{
|
||||||
|
return queue_delayed_work(kblockd_workqueue, dwork, delay);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
||||||
|
|
||||||
|
#define PLUG_MAGIC 0x91827364
|
||||||
|
|
||||||
|
void blk_start_plug(struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
|
plug->magic = PLUG_MAGIC;
|
||||||
|
INIT_LIST_HEAD(&plug->list);
|
||||||
|
plug->should_sort = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is a nested plug, don't actually assign it. It will be
|
||||||
|
* flushed on its own.
|
||||||
|
*/
|
||||||
|
if (!tsk->plug) {
|
||||||
|
/*
|
||||||
|
* Store ordering should not be needed here, since a potential
|
||||||
|
* preempt will imply a full memory barrier
|
||||||
|
*/
|
||||||
|
tsk->plug = plug;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_start_plug);
|
||||||
|
|
||||||
|
static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||||
|
{
|
||||||
|
struct request *rqa = container_of(a, struct request, queuelist);
|
||||||
|
struct request *rqb = container_of(b, struct request, queuelist);
|
||||||
|
|
||||||
|
return !(rqa->q == rqb->q);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void flush_plug_list(struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
struct request_queue *q;
|
||||||
|
unsigned long flags;
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
|
BUG_ON(plug->magic != PLUG_MAGIC);
|
||||||
|
|
||||||
|
if (list_empty(&plug->list))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (plug->should_sort)
|
||||||
|
list_sort(NULL, &plug->list, plug_rq_cmp);
|
||||||
|
|
||||||
|
q = NULL;
|
||||||
|
local_irq_save(flags);
|
||||||
|
while (!list_empty(&plug->list)) {
|
||||||
|
rq = list_entry_rq(plug->list.next);
|
||||||
|
list_del_init(&rq->queuelist);
|
||||||
|
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
|
||||||
|
BUG_ON(!rq->q);
|
||||||
|
if (rq->q != q) {
|
||||||
|
if (q) {
|
||||||
|
__blk_run_queue(q, false);
|
||||||
|
spin_unlock(q->queue_lock);
|
||||||
|
}
|
||||||
|
q = rq->q;
|
||||||
|
spin_lock(q->queue_lock);
|
||||||
|
}
|
||||||
|
rq->cmd_flags &= ~REQ_ON_PLUG;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rq is already accounted, so use raw insert
|
||||||
|
*/
|
||||||
|
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (q) {
|
||||||
|
__blk_run_queue(q, false);
|
||||||
|
spin_unlock(q->queue_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
BUG_ON(!list_empty(&plug->list));
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
flush_plug_list(plug);
|
||||||
|
|
||||||
|
if (plug == tsk->plug)
|
||||||
|
tsk->plug = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_finish_plug(struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
if (plug)
|
||||||
|
__blk_finish_plug(current, plug);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_finish_plug);
|
||||||
|
|
||||||
|
void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
__blk_finish_plug(tsk, plug);
|
||||||
|
tsk->plug = plug;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__blk_flush_plug);
|
||||||
|
|
||||||
int __init blk_dev_init(void)
|
int __init blk_dev_init(void)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||||
|
|
|
@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
rq->end_io = done;
|
rq->end_io = done;
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
__elv_add_request(q, rq, where, 1);
|
__elv_add_request(q, rq, where);
|
||||||
__generic_unplug_device(q);
|
__blk_run_queue(q, false);
|
||||||
/* the queue is stopped so it won't be plugged+unplugged */
|
/* the queue is stopped so it won't be plugged+unplugged */
|
||||||
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
|
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
|
||||||
q->request_fn(q);
|
q->request_fn(q);
|
||||||
|
|
|
@ -1,6 +1,69 @@
|
||||||
/*
|
/*
|
||||||
* Functions to sequence FLUSH and FUA writes.
|
* Functions to sequence FLUSH and FUA writes.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
|
||||||
|
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
|
||||||
|
*
|
||||||
|
* This file is released under the GPLv2.
|
||||||
|
*
|
||||||
|
* REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
|
||||||
|
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
|
||||||
|
* properties and hardware capability.
|
||||||
|
*
|
||||||
|
* If a request doesn't have data, only REQ_FLUSH makes sense, which
|
||||||
|
* indicates a simple flush request. If there is data, REQ_FLUSH indicates
|
||||||
|
* that the device cache should be flushed before the data is executed, and
|
||||||
|
* REQ_FUA means that the data must be on non-volatile media on request
|
||||||
|
* completion.
|
||||||
|
*
|
||||||
|
* If the device doesn't have writeback cache, FLUSH and FUA don't make any
|
||||||
|
* difference. The requests are either completed immediately if there's no
|
||||||
|
* data or executed as normal requests otherwise.
|
||||||
|
*
|
||||||
|
* If the device has writeback cache and supports FUA, REQ_FLUSH is
|
||||||
|
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
|
||||||
|
*
|
||||||
|
* If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
|
||||||
|
* translated to PREFLUSH and REQ_FUA to POSTFLUSH.
|
||||||
|
*
|
||||||
|
* The actual execution of flush is double buffered. Whenever a request
|
||||||
|
* needs to execute PRE or POSTFLUSH, it queues at
|
||||||
|
* q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
|
||||||
|
* flush is issued and the pending_idx is toggled. When the flush
|
||||||
|
* completes, all the requests which were pending are proceeded to the next
|
||||||
|
* step. This allows arbitrary merging of different types of FLUSH/FUA
|
||||||
|
* requests.
|
||||||
|
*
|
||||||
|
* Currently, the following conditions are used to determine when to issue
|
||||||
|
* flush.
|
||||||
|
*
|
||||||
|
* C1. At any given time, only one flush shall be in progress. This makes
|
||||||
|
* double buffering sufficient.
|
||||||
|
*
|
||||||
|
* C2. Flush is deferred if any request is executing DATA of its sequence.
|
||||||
|
* This avoids issuing separate POSTFLUSHes for requests which shared
|
||||||
|
* PREFLUSH.
|
||||||
|
*
|
||||||
|
* C3. The second condition is ignored if there is a request which has
|
||||||
|
* waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
|
||||||
|
* starvation in the unlikely case where there are continuous stream of
|
||||||
|
* FUA (without FLUSH) requests.
|
||||||
|
*
|
||||||
|
* For devices which support FUA, it isn't clear whether C2 (and thus C3)
|
||||||
|
* is beneficial.
|
||||||
|
*
|
||||||
|
* Note that a sequenced FLUSH/FUA request with DATA is completed twice.
|
||||||
|
* Once while executing DATA and again after the whole sequence is
|
||||||
|
* complete. The first completion updates the contained bio but doesn't
|
||||||
|
* finish it so that the bio submitter is notified only after the whole
|
||||||
|
* sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
|
||||||
|
* req_bio_endio().
|
||||||
|
*
|
||||||
|
* The above peculiarity requires that each FLUSH/FUA request has only one
|
||||||
|
* bio attached to it, which is guaranteed as they aren't allowed to be
|
||||||
|
* merged in the usual way.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
|
@ -11,58 +74,142 @@
|
||||||
|
|
||||||
/* FLUSH/FUA sequences */
|
/* FLUSH/FUA sequences */
|
||||||
enum {
|
enum {
|
||||||
QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
|
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
|
||||||
QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
|
REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
|
||||||
QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
|
REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
|
||||||
QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
|
REQ_FSEQ_DONE = (1 << 3),
|
||||||
QUEUE_FSEQ_DONE = (1 << 4),
|
|
||||||
|
REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
|
||||||
|
REQ_FSEQ_POSTFLUSH,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If flush has been pending longer than the following timeout,
|
||||||
|
* it's issued even if flush_data requests are still in flight.
|
||||||
|
*/
|
||||||
|
FLUSH_PENDING_TIMEOUT = 5 * HZ,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct request *queue_next_fseq(struct request_queue *q);
|
static bool blk_kick_flush(struct request_queue *q);
|
||||||
|
|
||||||
unsigned blk_flush_cur_seq(struct request_queue *q)
|
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
|
||||||
{
|
{
|
||||||
if (!q->flush_seq)
|
unsigned int policy = 0;
|
||||||
return 0;
|
|
||||||
return 1 << ffz(q->flush_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct request *blk_flush_complete_seq(struct request_queue *q,
|
if (fflags & REQ_FLUSH) {
|
||||||
unsigned seq, int error)
|
if (rq->cmd_flags & REQ_FLUSH)
|
||||||
{
|
policy |= REQ_FSEQ_PREFLUSH;
|
||||||
struct request *next_rq = NULL;
|
if (blk_rq_sectors(rq))
|
||||||
|
policy |= REQ_FSEQ_DATA;
|
||||||
if (error && !q->flush_err)
|
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
|
||||||
q->flush_err = error;
|
policy |= REQ_FSEQ_POSTFLUSH;
|
||||||
|
|
||||||
BUG_ON(q->flush_seq & seq);
|
|
||||||
q->flush_seq |= seq;
|
|
||||||
|
|
||||||
if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
|
|
||||||
/* not complete yet, queue the next flush sequence */
|
|
||||||
next_rq = queue_next_fseq(q);
|
|
||||||
} else {
|
|
||||||
/* complete this flush request */
|
|
||||||
__blk_end_request_all(q->orig_flush_rq, q->flush_err);
|
|
||||||
q->orig_flush_rq = NULL;
|
|
||||||
q->flush_seq = 0;
|
|
||||||
|
|
||||||
/* dispatch the next flush if there's one */
|
|
||||||
if (!list_empty(&q->pending_flushes)) {
|
|
||||||
next_rq = list_entry_rq(q->pending_flushes.next);
|
|
||||||
list_move(&next_rq->queuelist, &q->queue_head);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return next_rq;
|
return policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_flush_complete_seq_end_io(struct request_queue *q,
|
static unsigned int blk_flush_cur_seq(struct request *rq)
|
||||||
unsigned seq, int error)
|
|
||||||
{
|
{
|
||||||
bool was_empty = elv_queue_empty(q);
|
return 1 << ffz(rq->flush.seq);
|
||||||
struct request *next_rq;
|
}
|
||||||
|
|
||||||
next_rq = blk_flush_complete_seq(q, seq, error);
|
static void blk_flush_restore_request(struct request *rq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* After flush data completion, @rq->bio is %NULL but we need to
|
||||||
|
* complete the bio again. @rq->biotail is guaranteed to equal the
|
||||||
|
* original @rq->bio. Restore it.
|
||||||
|
*/
|
||||||
|
rq->bio = rq->biotail;
|
||||||
|
|
||||||
|
/* make @rq a normal request */
|
||||||
|
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
|
||||||
|
rq->end_io = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_flush_complete_seq - complete flush sequence
|
||||||
|
* @rq: FLUSH/FUA request being sequenced
|
||||||
|
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
|
||||||
|
* @error: whether an error occurred
|
||||||
|
*
|
||||||
|
* @rq just completed @seq part of its flush sequence, record the
|
||||||
|
* completion and trigger the next step.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* spin_lock_irq(q->queue_lock)
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* %true if requests were added to the dispatch queue, %false otherwise.
|
||||||
|
*/
|
||||||
|
static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
|
||||||
|
int error)
|
||||||
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
|
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
|
||||||
|
bool queued = false;
|
||||||
|
|
||||||
|
BUG_ON(rq->flush.seq & seq);
|
||||||
|
rq->flush.seq |= seq;
|
||||||
|
|
||||||
|
if (likely(!error))
|
||||||
|
seq = blk_flush_cur_seq(rq);
|
||||||
|
else
|
||||||
|
seq = REQ_FSEQ_DONE;
|
||||||
|
|
||||||
|
switch (seq) {
|
||||||
|
case REQ_FSEQ_PREFLUSH:
|
||||||
|
case REQ_FSEQ_POSTFLUSH:
|
||||||
|
/* queue for flush */
|
||||||
|
if (list_empty(pending))
|
||||||
|
q->flush_pending_since = jiffies;
|
||||||
|
list_move_tail(&rq->flush.list, pending);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case REQ_FSEQ_DATA:
|
||||||
|
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
|
||||||
|
list_add(&rq->queuelist, &q->queue_head);
|
||||||
|
queued = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case REQ_FSEQ_DONE:
|
||||||
|
/*
|
||||||
|
* @rq was previously adjusted by blk_flush_issue() for
|
||||||
|
* flush sequencing and may already have gone through the
|
||||||
|
* flush data request completion path. Restore @rq for
|
||||||
|
* normal completion and end it.
|
||||||
|
*/
|
||||||
|
BUG_ON(!list_empty(&rq->queuelist));
|
||||||
|
list_del_init(&rq->flush.list);
|
||||||
|
blk_flush_restore_request(rq);
|
||||||
|
__blk_end_request_all(rq, error);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
return blk_kick_flush(q) | queued;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void flush_end_io(struct request *flush_rq, int error)
|
||||||
|
{
|
||||||
|
struct request_queue *q = flush_rq->q;
|
||||||
|
struct list_head *running = &q->flush_queue[q->flush_running_idx];
|
||||||
|
bool queued = false;
|
||||||
|
struct request *rq, *n;
|
||||||
|
|
||||||
|
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
|
||||||
|
|
||||||
|
/* account completion of the flush request */
|
||||||
|
q->flush_running_idx ^= 1;
|
||||||
|
elv_completed_request(q, flush_rq);
|
||||||
|
|
||||||
|
/* and push the waiting requests to the next stage */
|
||||||
|
list_for_each_entry_safe(rq, n, running, flush.list) {
|
||||||
|
unsigned int seq = blk_flush_cur_seq(rq);
|
||||||
|
|
||||||
|
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
|
||||||
|
queued |= blk_flush_complete_seq(rq, seq, error);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Moving a request silently to empty queue_head may stall the
|
* Moving a request silently to empty queue_head may stall the
|
||||||
|
@ -70,127 +217,153 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
|
||||||
* from request completion path and calling directly into
|
* from request completion path and calling directly into
|
||||||
* request_fn may confuse the driver. Always use kblockd.
|
* request_fn may confuse the driver. Always use kblockd.
|
||||||
*/
|
*/
|
||||||
if (was_empty && next_rq)
|
if (queued)
|
||||||
__blk_run_queue(q, true);
|
__blk_run_queue(q, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pre_flush_end_io(struct request *rq, int error)
|
/**
|
||||||
|
* blk_kick_flush - consider issuing flush request
|
||||||
|
* @q: request_queue being kicked
|
||||||
|
*
|
||||||
|
* Flush related states of @q have changed, consider issuing flush request.
|
||||||
|
* Please read the comment at the top of this file for more info.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* spin_lock_irq(q->queue_lock)
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* %true if flush was issued, %false otherwise.
|
||||||
|
*/
|
||||||
|
static bool blk_kick_flush(struct request_queue *q)
|
||||||
{
|
{
|
||||||
elv_completed_request(rq->q, rq);
|
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
|
||||||
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
|
struct request *first_rq =
|
||||||
|
list_first_entry(pending, struct request, flush.list);
|
||||||
|
|
||||||
|
/* C1 described at the top of this file */
|
||||||
|
if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* C2 and C3 */
|
||||||
|
if (!list_empty(&q->flush_data_in_flight) &&
|
||||||
|
time_before(jiffies,
|
||||||
|
q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Issue flush and toggle pending_idx. This makes pending_idx
|
||||||
|
* different from running_idx, which means flush is in flight.
|
||||||
|
*/
|
||||||
|
blk_rq_init(q, &q->flush_rq);
|
||||||
|
q->flush_rq.cmd_type = REQ_TYPE_FS;
|
||||||
|
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
|
||||||
|
q->flush_rq.rq_disk = first_rq->rq_disk;
|
||||||
|
q->flush_rq.end_io = flush_end_io;
|
||||||
|
|
||||||
|
q->flush_pending_idx ^= 1;
|
||||||
|
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_data_end_io(struct request *rq, int error)
|
static void flush_data_end_io(struct request *rq, int error)
|
||||||
{
|
{
|
||||||
elv_completed_request(rq->q, rq);
|
struct request_queue *q = rq->q;
|
||||||
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void post_flush_end_io(struct request *rq, int error)
|
|
||||||
{
|
|
||||||
elv_completed_request(rq->q, rq);
|
|
||||||
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_flush_request(struct request *rq, struct gendisk *disk)
|
|
||||||
{
|
|
||||||
rq->cmd_type = REQ_TYPE_FS;
|
|
||||||
rq->cmd_flags = WRITE_FLUSH;
|
|
||||||
rq->rq_disk = disk;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct request *queue_next_fseq(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct request *orig_rq = q->orig_flush_rq;
|
|
||||||
struct request *rq = &q->flush_rq;
|
|
||||||
|
|
||||||
blk_rq_init(q, rq);
|
|
||||||
|
|
||||||
switch (blk_flush_cur_seq(q)) {
|
|
||||||
case QUEUE_FSEQ_PREFLUSH:
|
|
||||||
init_flush_request(rq, orig_rq->rq_disk);
|
|
||||||
rq->end_io = pre_flush_end_io;
|
|
||||||
break;
|
|
||||||
case QUEUE_FSEQ_DATA:
|
|
||||||
init_request_from_bio(rq, orig_rq->bio);
|
|
||||||
/*
|
|
||||||
* orig_rq->rq_disk may be different from
|
|
||||||
* bio->bi_bdev->bd_disk if orig_rq got here through
|
|
||||||
* remapping drivers. Make sure rq->rq_disk points
|
|
||||||
* to the same one as orig_rq.
|
|
||||||
*/
|
|
||||||
rq->rq_disk = orig_rq->rq_disk;
|
|
||||||
rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
|
|
||||||
rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
|
|
||||||
rq->end_io = flush_data_end_io;
|
|
||||||
break;
|
|
||||||
case QUEUE_FSEQ_POSTFLUSH:
|
|
||||||
init_flush_request(rq, orig_rq->rq_disk);
|
|
||||||
rq->end_io = post_flush_end_io;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
|
|
||||||
return rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct request *blk_do_flush(struct request_queue *q, struct request *rq)
|
|
||||||
{
|
|
||||||
unsigned int fflags = q->flush_flags; /* may change, cache it */
|
|
||||||
bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
|
|
||||||
bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
|
|
||||||
bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
|
|
||||||
unsigned skip = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special case. If there's data but flush is not necessary,
|
* After populating an empty queue, kick it to avoid stall. Read
|
||||||
* the request can be issued directly.
|
* the comment in flush_end_io().
|
||||||
*
|
|
||||||
* Flush w/o data should be able to be issued directly too but
|
|
||||||
* currently some drivers assume that rq->bio contains
|
|
||||||
* non-zero data if it isn't NULL and empty FLUSH requests
|
|
||||||
* getting here usually have bio's without data.
|
|
||||||
*/
|
*/
|
||||||
if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
|
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
|
||||||
rq->cmd_flags &= ~REQ_FLUSH;
|
__blk_run_queue(q, true);
|
||||||
if (!has_fua)
|
}
|
||||||
rq->cmd_flags &= ~REQ_FUA;
|
|
||||||
return rq;
|
/**
|
||||||
}
|
* blk_insert_flush - insert a new FLUSH/FUA request
|
||||||
|
* @rq: request to insert
|
||||||
|
*
|
||||||
|
* To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
|
||||||
|
* @rq is being submitted. Analyze what needs to be done and put it on the
|
||||||
|
* right queue.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* spin_lock_irq(q->queue_lock)
|
||||||
|
*/
|
||||||
|
void blk_insert_flush(struct request *rq)
|
||||||
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
|
unsigned int fflags = q->flush_flags; /* may change, cache */
|
||||||
|
unsigned int policy = blk_flush_policy(fflags, rq);
|
||||||
|
|
||||||
|
BUG_ON(rq->end_io);
|
||||||
|
BUG_ON(!rq->bio || rq->bio != rq->biotail);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sequenced flushes can't be processed in parallel. If
|
* @policy now records what operations need to be done. Adjust
|
||||||
* another one is already in progress, queue for later
|
* REQ_FLUSH and FUA for the driver.
|
||||||
* processing.
|
|
||||||
*/
|
*/
|
||||||
if (q->flush_seq) {
|
|
||||||
list_move_tail(&rq->queuelist, &q->pending_flushes);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Start a new flush sequence
|
|
||||||
*/
|
|
||||||
q->flush_err = 0;
|
|
||||||
q->flush_seq |= QUEUE_FSEQ_STARTED;
|
|
||||||
|
|
||||||
/* adjust FLUSH/FUA of the original request and stash it away */
|
|
||||||
rq->cmd_flags &= ~REQ_FLUSH;
|
rq->cmd_flags &= ~REQ_FLUSH;
|
||||||
if (!has_fua)
|
if (!(fflags & REQ_FUA))
|
||||||
rq->cmd_flags &= ~REQ_FUA;
|
rq->cmd_flags &= ~REQ_FUA;
|
||||||
blk_dequeue_request(rq);
|
|
||||||
q->orig_flush_rq = rq;
|
|
||||||
|
|
||||||
/* skip unneded sequences and return the first one */
|
/*
|
||||||
if (!do_preflush)
|
* If there's data but flush is not necessary, the request can be
|
||||||
skip |= QUEUE_FSEQ_PREFLUSH;
|
* processed directly without going through flush machinery. Queue
|
||||||
if (!blk_rq_sectors(rq))
|
* for normal execution.
|
||||||
skip |= QUEUE_FSEQ_DATA;
|
*/
|
||||||
if (!do_postflush)
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
skip |= QUEUE_FSEQ_POSTFLUSH;
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
return blk_flush_complete_seq(q, skip, 0);
|
list_add(&rq->queuelist, &q->queue_head);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @rq should go through flush machinery. Mark it part of flush
|
||||||
|
* sequence and submit for further processing.
|
||||||
|
*/
|
||||||
|
memset(&rq->flush, 0, sizeof(rq->flush));
|
||||||
|
INIT_LIST_HEAD(&rq->flush.list);
|
||||||
|
rq->cmd_flags |= REQ_FLUSH_SEQ;
|
||||||
|
rq->end_io = flush_data_end_io;
|
||||||
|
|
||||||
|
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_abort_flushes - @q is being aborted, abort flush requests
|
||||||
|
* @q: request_queue being aborted
|
||||||
|
*
|
||||||
|
* To be called from elv_abort_queue(). @q is being aborted. Prepare all
|
||||||
|
* FLUSH/FUA requests for abortion.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* spin_lock_irq(q->queue_lock)
|
||||||
|
*/
|
||||||
|
void blk_abort_flushes(struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct request *rq, *n;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Requests in flight for data are already owned by the dispatch
|
||||||
|
* queue or the device driver. Just restore for normal completion.
|
||||||
|
*/
|
||||||
|
list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
|
||||||
|
list_del_init(&rq->flush.list);
|
||||||
|
blk_flush_restore_request(rq);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to give away requests on flush queues. Restore for
|
||||||
|
* normal completion and put them on the dispatch queue.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
|
||||||
|
list_for_each_entry_safe(rq, n, &q->flush_queue[i],
|
||||||
|
flush.list) {
|
||||||
|
list_del_init(&rq->flush.list);
|
||||||
|
blk_flush_restore_request(rq);
|
||||||
|
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bio_end_flush(struct bio *bio, int err)
|
static void bio_end_flush(struct bio *bio, int err)
|
||||||
|
|
|
@ -136,8 +136,6 @@ static void bio_batch_end_io(struct bio *bio, int err)
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Generate and issue number of bios with zerofiled pages.
|
* Generate and issue number of bios with zerofiled pages.
|
||||||
* Send barrier at the beginning and at the end if requested. This guarantie
|
|
||||||
* correct request ordering. Empty barrier allow us to avoid post queue flush.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||||
|
|
|
@ -465,3 +465,9 @@ int attempt_front_merge(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||||
|
struct request *next)
|
||||||
|
{
|
||||||
|
return attempt_merge(q, rq, next);
|
||||||
|
}
|
||||||
|
|
|
@ -164,24 +164,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||||
blk_queue_congestion_threshold(q);
|
blk_queue_congestion_threshold(q);
|
||||||
q->nr_batching = BLK_BATCH_REQ;
|
q->nr_batching = BLK_BATCH_REQ;
|
||||||
|
|
||||||
q->unplug_thresh = 4; /* hmm */
|
|
||||||
q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
|
|
||||||
if (q->unplug_delay == 0)
|
|
||||||
q->unplug_delay = 1;
|
|
||||||
|
|
||||||
q->unplug_timer.function = blk_unplug_timeout;
|
|
||||||
q->unplug_timer.data = (unsigned long)q;
|
|
||||||
|
|
||||||
blk_set_default_limits(&q->limits);
|
blk_set_default_limits(&q->limits);
|
||||||
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
|
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
|
||||||
|
|
||||||
/*
|
|
||||||
* If the caller didn't supply a lock, fall back to our embedded
|
|
||||||
* per-queue locks
|
|
||||||
*/
|
|
||||||
if (!q->queue_lock)
|
|
||||||
q->queue_lock = &q->__queue_lock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* by default assume old behaviour and bounce for any highmem page
|
* by default assume old behaviour and bounce for any highmem page
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -471,8 +471,6 @@ static void blk_release_queue(struct kobject *kobj)
|
||||||
|
|
||||||
blk_sync_queue(q);
|
blk_sync_queue(q);
|
||||||
|
|
||||||
blk_throtl_exit(q);
|
|
||||||
|
|
||||||
if (rl->rq_pool)
|
if (rl->rq_pool)
|
||||||
mempool_destroy(rl->rq_pool);
|
mempool_destroy(rl->rq_pool);
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ struct throtl_data
|
||||||
/* Work for dispatching throttled bios */
|
/* Work for dispatching throttled bios */
|
||||||
struct delayed_work throtl_work;
|
struct delayed_work throtl_work;
|
||||||
|
|
||||||
atomic_t limits_changed;
|
bool limits_changed;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum tg_state_flags {
|
enum tg_state_flags {
|
||||||
|
@ -201,6 +201,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||||
RB_CLEAR_NODE(&tg->rb_node);
|
RB_CLEAR_NODE(&tg->rb_node);
|
||||||
bio_list_init(&tg->bio_lists[0]);
|
bio_list_init(&tg->bio_lists[0]);
|
||||||
bio_list_init(&tg->bio_lists[1]);
|
bio_list_init(&tg->bio_lists[1]);
|
||||||
|
td->limits_changed = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Take the initial reference that will be released on destroy
|
* Take the initial reference that will be released on destroy
|
||||||
|
@ -737,34 +738,36 @@ static void throtl_process_limit_change(struct throtl_data *td)
|
||||||
struct throtl_grp *tg;
|
struct throtl_grp *tg;
|
||||||
struct hlist_node *pos, *n;
|
struct hlist_node *pos, *n;
|
||||||
|
|
||||||
if (!atomic_read(&td->limits_changed))
|
if (!td->limits_changed)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
|
xchg(&td->limits_changed, false);
|
||||||
|
|
||||||
/*
|
throtl_log(td, "limits changed");
|
||||||
* Make sure updates from throtl_update_blkio_group_read_bps() group
|
|
||||||
* of functions to tg->limits_changed are visible. We do not
|
|
||||||
* want update td->limits_changed to be visible but update to
|
|
||||||
* tg->limits_changed not being visible yet on this cpu. Hence
|
|
||||||
* the read barrier.
|
|
||||||
*/
|
|
||||||
smp_rmb();
|
|
||||||
|
|
||||||
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
||||||
if (throtl_tg_on_rr(tg) && tg->limits_changed) {
|
if (!tg->limits_changed)
|
||||||
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
|
continue;
|
||||||
" riops=%u wiops=%u", tg->bps[READ],
|
|
||||||
tg->bps[WRITE], tg->iops[READ],
|
|
||||||
tg->iops[WRITE]);
|
|
||||||
tg_update_disptime(td, tg);
|
|
||||||
tg->limits_changed = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
smp_mb__before_atomic_dec();
|
if (!xchg(&tg->limits_changed, false))
|
||||||
atomic_dec(&td->limits_changed);
|
continue;
|
||||||
smp_mb__after_atomic_dec();
|
|
||||||
|
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
|
||||||
|
" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
|
||||||
|
tg->iops[READ], tg->iops[WRITE]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restart the slices for both READ and WRITES. It
|
||||||
|
* might happen that a group's limit are dropped
|
||||||
|
* suddenly and we don't want to account recently
|
||||||
|
* dispatched IO with new low rate
|
||||||
|
*/
|
||||||
|
throtl_start_new_slice(td, tg, 0);
|
||||||
|
throtl_start_new_slice(td, tg, 1);
|
||||||
|
|
||||||
|
if (throtl_tg_on_rr(tg))
|
||||||
|
tg_update_disptime(td, tg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dispatch throttled bios. Should be called without queue lock held. */
|
/* Dispatch throttled bios. Should be called without queue lock held. */
|
||||||
|
@ -774,6 +777,7 @@ static int throtl_dispatch(struct request_queue *q)
|
||||||
unsigned int nr_disp = 0;
|
unsigned int nr_disp = 0;
|
||||||
struct bio_list bio_list_on_stack;
|
struct bio_list bio_list_on_stack;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
struct blk_plug plug;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
@ -802,9 +806,10 @@ out:
|
||||||
* immediate dispatch
|
* immediate dispatch
|
||||||
*/
|
*/
|
||||||
if (nr_disp) {
|
if (nr_disp) {
|
||||||
|
blk_start_plug(&plug);
|
||||||
while((bio = bio_list_pop(&bio_list_on_stack)))
|
while((bio = bio_list_pop(&bio_list_on_stack)))
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
blk_unplug(q);
|
blk_finish_plug(&plug);
|
||||||
}
|
}
|
||||||
return nr_disp;
|
return nr_disp;
|
||||||
}
|
}
|
||||||
|
@ -825,7 +830,8 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
||||||
|
|
||||||
struct delayed_work *dwork = &td->throtl_work;
|
struct delayed_work *dwork = &td->throtl_work;
|
||||||
|
|
||||||
if (total_nr_queued(td) > 0) {
|
/* schedule work if limits changed even if no bio is queued */
|
||||||
|
if (total_nr_queued(td) > 0 || td->limits_changed) {
|
||||||
/*
|
/*
|
||||||
* We might have a work scheduled to be executed in future.
|
* We might have a work scheduled to be executed in future.
|
||||||
* Cancel that and schedule a new one.
|
* Cancel that and schedule a new one.
|
||||||
|
@ -898,6 +904,15 @@ void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
|
||||||
spin_unlock_irqrestore(td->queue->queue_lock, flags);
|
spin_unlock_irqrestore(td->queue->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void throtl_update_blkio_group_common(struct throtl_data *td,
|
||||||
|
struct throtl_grp *tg)
|
||||||
|
{
|
||||||
|
xchg(&tg->limits_changed, true);
|
||||||
|
xchg(&td->limits_changed, true);
|
||||||
|
/* Schedule a work now to process the limit change */
|
||||||
|
throtl_schedule_delayed_work(td, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For all update functions, key should be a valid pointer because these
|
* For all update functions, key should be a valid pointer because these
|
||||||
* update functions are called under blkcg_lock, that means, blkg is
|
* update functions are called under blkcg_lock, that means, blkg is
|
||||||
|
@ -911,64 +926,43 @@ static void throtl_update_blkio_group_read_bps(void *key,
|
||||||
struct blkio_group *blkg, u64 read_bps)
|
struct blkio_group *blkg, u64 read_bps)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = key;
|
struct throtl_data *td = key;
|
||||||
|
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||||
|
|
||||||
tg_of_blkg(blkg)->bps[READ] = read_bps;
|
tg->bps[READ] = read_bps;
|
||||||
/* Make sure read_bps is updated before setting limits_changed */
|
throtl_update_blkio_group_common(td, tg);
|
||||||
smp_wmb();
|
|
||||||
tg_of_blkg(blkg)->limits_changed = true;
|
|
||||||
|
|
||||||
/* Make sure tg->limits_changed is updated before td->limits_changed */
|
|
||||||
smp_mb__before_atomic_inc();
|
|
||||||
atomic_inc(&td->limits_changed);
|
|
||||||
smp_mb__after_atomic_inc();
|
|
||||||
|
|
||||||
/* Schedule a work now to process the limit change */
|
|
||||||
throtl_schedule_delayed_work(td, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void throtl_update_blkio_group_write_bps(void *key,
|
static void throtl_update_blkio_group_write_bps(void *key,
|
||||||
struct blkio_group *blkg, u64 write_bps)
|
struct blkio_group *blkg, u64 write_bps)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = key;
|
struct throtl_data *td = key;
|
||||||
|
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||||
|
|
||||||
tg_of_blkg(blkg)->bps[WRITE] = write_bps;
|
tg->bps[WRITE] = write_bps;
|
||||||
smp_wmb();
|
throtl_update_blkio_group_common(td, tg);
|
||||||
tg_of_blkg(blkg)->limits_changed = true;
|
|
||||||
smp_mb__before_atomic_inc();
|
|
||||||
atomic_inc(&td->limits_changed);
|
|
||||||
smp_mb__after_atomic_inc();
|
|
||||||
throtl_schedule_delayed_work(td, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void throtl_update_blkio_group_read_iops(void *key,
|
static void throtl_update_blkio_group_read_iops(void *key,
|
||||||
struct blkio_group *blkg, unsigned int read_iops)
|
struct blkio_group *blkg, unsigned int read_iops)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = key;
|
struct throtl_data *td = key;
|
||||||
|
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||||
|
|
||||||
tg_of_blkg(blkg)->iops[READ] = read_iops;
|
tg->iops[READ] = read_iops;
|
||||||
smp_wmb();
|
throtl_update_blkio_group_common(td, tg);
|
||||||
tg_of_blkg(blkg)->limits_changed = true;
|
|
||||||
smp_mb__before_atomic_inc();
|
|
||||||
atomic_inc(&td->limits_changed);
|
|
||||||
smp_mb__after_atomic_inc();
|
|
||||||
throtl_schedule_delayed_work(td, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void throtl_update_blkio_group_write_iops(void *key,
|
static void throtl_update_blkio_group_write_iops(void *key,
|
||||||
struct blkio_group *blkg, unsigned int write_iops)
|
struct blkio_group *blkg, unsigned int write_iops)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = key;
|
struct throtl_data *td = key;
|
||||||
|
struct throtl_grp *tg = tg_of_blkg(blkg);
|
||||||
|
|
||||||
tg_of_blkg(blkg)->iops[WRITE] = write_iops;
|
tg->iops[WRITE] = write_iops;
|
||||||
smp_wmb();
|
throtl_update_blkio_group_common(td, tg);
|
||||||
tg_of_blkg(blkg)->limits_changed = true;
|
|
||||||
smp_mb__before_atomic_inc();
|
|
||||||
atomic_inc(&td->limits_changed);
|
|
||||||
smp_mb__after_atomic_inc();
|
|
||||||
throtl_schedule_delayed_work(td, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void throtl_shutdown_timer_wq(struct request_queue *q)
|
static void throtl_shutdown_wq(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct throtl_data *td = q->td;
|
struct throtl_data *td = q->td;
|
||||||
|
|
||||||
|
@ -1009,20 +1003,28 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
|
||||||
/*
|
/*
|
||||||
* There is already another bio queued in same dir. No
|
* There is already another bio queued in same dir. No
|
||||||
* need to update dispatch time.
|
* need to update dispatch time.
|
||||||
* Still update the disptime if rate limits on this group
|
|
||||||
* were changed.
|
|
||||||
*/
|
*/
|
||||||
if (!tg->limits_changed)
|
update_disptime = false;
|
||||||
update_disptime = false;
|
|
||||||
else
|
|
||||||
tg->limits_changed = false;
|
|
||||||
|
|
||||||
goto queue_bio;
|
goto queue_bio;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bio is with-in rate limit of group */
|
/* Bio is with-in rate limit of group */
|
||||||
if (tg_may_dispatch(td, tg, bio, NULL)) {
|
if (tg_may_dispatch(td, tg, bio, NULL)) {
|
||||||
throtl_charge_bio(tg, bio);
|
throtl_charge_bio(tg, bio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to trim slice even when bios are not being queued
|
||||||
|
* otherwise it might happen that a bio is not queued for
|
||||||
|
* a long time and slice keeps on extending and trim is not
|
||||||
|
* called for a long time. Now if limits are reduced suddenly
|
||||||
|
* we take into account all the IO dispatched so far at new
|
||||||
|
* low rate and * newly queued IO gets a really long dispatch
|
||||||
|
* time.
|
||||||
|
*
|
||||||
|
* So keep on trimming slice even if bio is not queued.
|
||||||
|
*/
|
||||||
|
throtl_trim_slice(td, tg, rw);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1058,7 +1060,7 @@ int blk_throtl_init(struct request_queue *q)
|
||||||
|
|
||||||
INIT_HLIST_HEAD(&td->tg_list);
|
INIT_HLIST_HEAD(&td->tg_list);
|
||||||
td->tg_service_tree = THROTL_RB_ROOT;
|
td->tg_service_tree = THROTL_RB_ROOT;
|
||||||
atomic_set(&td->limits_changed, 0);
|
td->limits_changed = false;
|
||||||
|
|
||||||
/* Init root group */
|
/* Init root group */
|
||||||
tg = &td->root_tg;
|
tg = &td->root_tg;
|
||||||
|
@ -1070,6 +1072,7 @@ int blk_throtl_init(struct request_queue *q)
|
||||||
/* Practically unlimited BW */
|
/* Practically unlimited BW */
|
||||||
tg->bps[0] = tg->bps[1] = -1;
|
tg->bps[0] = tg->bps[1] = -1;
|
||||||
tg->iops[0] = tg->iops[1] = -1;
|
tg->iops[0] = tg->iops[1] = -1;
|
||||||
|
td->limits_changed = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set root group reference to 2. One reference will be dropped when
|
* Set root group reference to 2. One reference will be dropped when
|
||||||
|
@ -1102,7 +1105,7 @@ void blk_throtl_exit(struct request_queue *q)
|
||||||
|
|
||||||
BUG_ON(!td);
|
BUG_ON(!td);
|
||||||
|
|
||||||
throtl_shutdown_timer_wq(q);
|
throtl_shutdown_wq(q);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
throtl_release_tgs(td);
|
throtl_release_tgs(td);
|
||||||
|
@ -1132,7 +1135,7 @@ void blk_throtl_exit(struct request_queue *q)
|
||||||
* update limits through cgroup and another work got queued, cancel
|
* update limits through cgroup and another work got queued, cancel
|
||||||
* it.
|
* it.
|
||||||
*/
|
*/
|
||||||
throtl_shutdown_timer_wq(q);
|
throtl_shutdown_wq(q);
|
||||||
throtl_td_free(td);
|
throtl_td_free(td);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
16
block/blk.h
16
block/blk.h
|
@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||||
void blk_dequeue_request(struct request *rq);
|
void blk_dequeue_request(struct request *rq);
|
||||||
void __blk_queue_free_tags(struct request_queue *q);
|
void __blk_queue_free_tags(struct request_queue *q);
|
||||||
|
|
||||||
void blk_unplug_work(struct work_struct *work);
|
|
||||||
void blk_unplug_timeout(unsigned long data);
|
|
||||||
void blk_rq_timed_out_timer(unsigned long data);
|
void blk_rq_timed_out_timer(unsigned long data);
|
||||||
void blk_delete_timer(struct request *);
|
void blk_delete_timer(struct request *);
|
||||||
void blk_add_timer(struct request *);
|
void blk_add_timer(struct request *);
|
||||||
|
@ -51,21 +49,17 @@ static inline void blk_clear_rq_complete(struct request *rq)
|
||||||
*/
|
*/
|
||||||
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
|
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
|
||||||
|
|
||||||
struct request *blk_do_flush(struct request_queue *q, struct request *rq);
|
void blk_insert_flush(struct request *rq);
|
||||||
|
void blk_abort_flushes(struct request_queue *q);
|
||||||
|
|
||||||
static inline struct request *__elv_next_request(struct request_queue *q)
|
static inline struct request *__elv_next_request(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
while (!list_empty(&q->queue_head)) {
|
if (!list_empty(&q->queue_head)) {
|
||||||
rq = list_entry_rq(q->queue_head.next);
|
rq = list_entry_rq(q->queue_head.next);
|
||||||
if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
|
return rq;
|
||||||
rq == &q->flush_rq)
|
|
||||||
return rq;
|
|
||||||
rq = blk_do_flush(q, rq);
|
|
||||||
if (rq)
|
|
||||||
return rq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
|
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
|
||||||
|
@ -109,6 +103,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||||
struct bio *bio);
|
struct bio *bio);
|
||||||
int attempt_back_merge(struct request_queue *q, struct request *rq);
|
int attempt_back_merge(struct request_queue *q, struct request *rq);
|
||||||
int attempt_front_merge(struct request_queue *q, struct request *rq);
|
int attempt_front_merge(struct request_queue *q, struct request *rq);
|
||||||
|
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||||
|
struct request *next);
|
||||||
void blk_recalc_rq_segments(struct request *rq);
|
void blk_recalc_rq_segments(struct request *rq);
|
||||||
void blk_rq_set_mixed_merge(struct request *rq);
|
void blk_rq_set_mixed_merge(struct request *rq);
|
||||||
|
|
||||||
|
|
|
@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4;
|
||||||
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
|
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
|
||||||
|
|
||||||
#define RQ_CIC(rq) \
|
#define RQ_CIC(rq) \
|
||||||
((struct cfq_io_context *) (rq)->elevator_private)
|
((struct cfq_io_context *) (rq)->elevator_private[0])
|
||||||
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
|
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1])
|
||||||
#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
|
#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2])
|
||||||
|
|
||||||
static struct kmem_cache *cfq_pool;
|
static struct kmem_cache *cfq_pool;
|
||||||
static struct kmem_cache *cfq_ioc_pool;
|
static struct kmem_cache *cfq_ioc_pool;
|
||||||
|
@ -146,7 +146,6 @@ struct cfq_queue {
|
||||||
struct cfq_rb_root *service_tree;
|
struct cfq_rb_root *service_tree;
|
||||||
struct cfq_queue *new_cfqq;
|
struct cfq_queue *new_cfqq;
|
||||||
struct cfq_group *cfqg;
|
struct cfq_group *cfqg;
|
||||||
struct cfq_group *orig_cfqg;
|
|
||||||
/* Number of sectors dispatched from queue in single dispatch round */
|
/* Number of sectors dispatched from queue in single dispatch round */
|
||||||
unsigned long nr_sectors;
|
unsigned long nr_sectors;
|
||||||
};
|
};
|
||||||
|
@ -179,6 +178,8 @@ struct cfq_group {
|
||||||
/* group service_tree key */
|
/* group service_tree key */
|
||||||
u64 vdisktime;
|
u64 vdisktime;
|
||||||
unsigned int weight;
|
unsigned int weight;
|
||||||
|
unsigned int new_weight;
|
||||||
|
bool needs_update;
|
||||||
|
|
||||||
/* number of cfqq currently on this group */
|
/* number of cfqq currently on this group */
|
||||||
int nr_cfqq;
|
int nr_cfqq;
|
||||||
|
@ -238,6 +239,7 @@ struct cfq_data {
|
||||||
struct rb_root prio_trees[CFQ_PRIO_LISTS];
|
struct rb_root prio_trees[CFQ_PRIO_LISTS];
|
||||||
|
|
||||||
unsigned int busy_queues;
|
unsigned int busy_queues;
|
||||||
|
unsigned int busy_sync_queues;
|
||||||
|
|
||||||
int rq_in_driver;
|
int rq_in_driver;
|
||||||
int rq_in_flight[2];
|
int rq_in_flight[2];
|
||||||
|
@ -285,7 +287,6 @@ struct cfq_data {
|
||||||
unsigned int cfq_slice_idle;
|
unsigned int cfq_slice_idle;
|
||||||
unsigned int cfq_group_idle;
|
unsigned int cfq_group_idle;
|
||||||
unsigned int cfq_latency;
|
unsigned int cfq_latency;
|
||||||
unsigned int cfq_group_isolation;
|
|
||||||
|
|
||||||
unsigned int cic_index;
|
unsigned int cic_index;
|
||||||
struct list_head cic_list;
|
struct list_head cic_list;
|
||||||
|
@ -501,13 +502,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cfq_queue_empty(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
||||||
|
|
||||||
return !cfqd->rq_queued;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scale schedule slice based on io priority. Use the sync time slice only
|
* Scale schedule slice based on io priority. Use the sync time slice only
|
||||||
* if a queue is marked sync and has sync io queued. A sync queue with async
|
* if a queue is marked sync and has sync io queued. A sync queue with async
|
||||||
|
@ -558,15 +552,13 @@ static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
|
||||||
|
|
||||||
static void update_min_vdisktime(struct cfq_rb_root *st)
|
static void update_min_vdisktime(struct cfq_rb_root *st)
|
||||||
{
|
{
|
||||||
u64 vdisktime = st->min_vdisktime;
|
|
||||||
struct cfq_group *cfqg;
|
struct cfq_group *cfqg;
|
||||||
|
|
||||||
if (st->left) {
|
if (st->left) {
|
||||||
cfqg = rb_entry_cfqg(st->left);
|
cfqg = rb_entry_cfqg(st->left);
|
||||||
vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
|
st->min_vdisktime = max_vdisktime(st->min_vdisktime,
|
||||||
|
cfqg->vdisktime);
|
||||||
}
|
}
|
||||||
|
|
||||||
st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -863,7 +855,27 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
cfq_update_group_weight(struct cfq_group *cfqg)
|
||||||
|
{
|
||||||
|
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
||||||
|
if (cfqg->needs_update) {
|
||||||
|
cfqg->weight = cfqg->new_weight;
|
||||||
|
cfqg->needs_update = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||||
|
{
|
||||||
|
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
||||||
|
|
||||||
|
cfq_update_group_weight(cfqg);
|
||||||
|
__cfq_group_service_tree_add(st, cfqg);
|
||||||
|
st->total_weight += cfqg->weight;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
{
|
{
|
||||||
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
||||||
struct cfq_group *__cfqg;
|
struct cfq_group *__cfqg;
|
||||||
|
@ -884,13 +896,19 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
|
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
|
||||||
} else
|
} else
|
||||||
cfqg->vdisktime = st->min_vdisktime;
|
cfqg->vdisktime = st->min_vdisktime;
|
||||||
|
cfq_group_service_tree_add(st, cfqg);
|
||||||
__cfq_group_service_tree_add(st, cfqg);
|
|
||||||
st->total_weight += cfqg->weight;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||||
|
{
|
||||||
|
st->total_weight -= cfqg->weight;
|
||||||
|
if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
||||||
|
cfq_rb_erase(&cfqg->rb_node, st);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
{
|
{
|
||||||
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
||||||
|
|
||||||
|
@ -902,14 +920,13 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
|
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
|
||||||
st->total_weight -= cfqg->weight;
|
cfq_group_service_tree_del(st, cfqg);
|
||||||
if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
|
||||||
cfq_rb_erase(&cfqg->rb_node, st);
|
|
||||||
cfqg->saved_workload_slice = 0;
|
cfqg->saved_workload_slice = 0;
|
||||||
cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
|
cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
|
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
|
||||||
|
unsigned int *unaccounted_time)
|
||||||
{
|
{
|
||||||
unsigned int slice_used;
|
unsigned int slice_used;
|
||||||
|
|
||||||
|
@ -928,8 +945,13 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
|
||||||
1);
|
1);
|
||||||
} else {
|
} else {
|
||||||
slice_used = jiffies - cfqq->slice_start;
|
slice_used = jiffies - cfqq->slice_start;
|
||||||
if (slice_used > cfqq->allocated_slice)
|
if (slice_used > cfqq->allocated_slice) {
|
||||||
|
*unaccounted_time = slice_used - cfqq->allocated_slice;
|
||||||
slice_used = cfqq->allocated_slice;
|
slice_used = cfqq->allocated_slice;
|
||||||
|
}
|
||||||
|
if (time_after(cfqq->slice_start, cfqq->dispatch_start))
|
||||||
|
*unaccounted_time += cfqq->slice_start -
|
||||||
|
cfqq->dispatch_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
return slice_used;
|
return slice_used;
|
||||||
|
@ -939,12 +961,12 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
||||||
struct cfq_queue *cfqq)
|
struct cfq_queue *cfqq)
|
||||||
{
|
{
|
||||||
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
||||||
unsigned int used_sl, charge;
|
unsigned int used_sl, charge, unaccounted_sl = 0;
|
||||||
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
|
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
|
||||||
- cfqg->service_tree_idle.count;
|
- cfqg->service_tree_idle.count;
|
||||||
|
|
||||||
BUG_ON(nr_sync < 0);
|
BUG_ON(nr_sync < 0);
|
||||||
used_sl = charge = cfq_cfqq_slice_usage(cfqq);
|
used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
|
||||||
|
|
||||||
if (iops_mode(cfqd))
|
if (iops_mode(cfqd))
|
||||||
charge = cfqq->slice_dispatch;
|
charge = cfqq->slice_dispatch;
|
||||||
|
@ -952,9 +974,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
||||||
charge = cfqq->allocated_slice;
|
charge = cfqq->allocated_slice;
|
||||||
|
|
||||||
/* Can't update vdisktime while group is on service tree */
|
/* Can't update vdisktime while group is on service tree */
|
||||||
cfq_rb_erase(&cfqg->rb_node, st);
|
cfq_group_service_tree_del(st, cfqg);
|
||||||
cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
|
cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
|
||||||
__cfq_group_service_tree_add(st, cfqg);
|
/* If a new weight was requested, update now, off tree */
|
||||||
|
cfq_group_service_tree_add(st, cfqg);
|
||||||
|
|
||||||
/* This group is being expired. Save the context */
|
/* This group is being expired. Save the context */
|
||||||
if (time_after(cfqd->workload_expires, jiffies)) {
|
if (time_after(cfqd->workload_expires, jiffies)) {
|
||||||
|
@ -970,7 +993,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
||||||
cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
|
cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
|
||||||
" sect=%u", used_sl, cfqq->slice_dispatch, charge,
|
" sect=%u", used_sl, cfqq->slice_dispatch, charge,
|
||||||
iops_mode(cfqd), cfqq->nr_sectors);
|
iops_mode(cfqd), cfqq->nr_sectors);
|
||||||
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
|
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
|
||||||
|
unaccounted_sl);
|
||||||
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
|
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -985,7 +1009,9 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
|
||||||
void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
|
void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
|
||||||
unsigned int weight)
|
unsigned int weight)
|
||||||
{
|
{
|
||||||
cfqg_of_blkg(blkg)->weight = weight;
|
struct cfq_group *cfqg = cfqg_of_blkg(blkg);
|
||||||
|
cfqg->new_weight = weight;
|
||||||
|
cfqg->needs_update = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_group *
|
static struct cfq_group *
|
||||||
|
@ -1187,32 +1213,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
int new_cfqq = 1;
|
int new_cfqq = 1;
|
||||||
int group_changed = 0;
|
int group_changed = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
||||||
if (!cfqd->cfq_group_isolation
|
|
||||||
&& cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
|
|
||||||
&& cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
|
|
||||||
/* Move this cfq to root group */
|
|
||||||
cfq_log_cfqq(cfqd, cfqq, "moving to root group");
|
|
||||||
if (!RB_EMPTY_NODE(&cfqq->rb_node))
|
|
||||||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
|
||||||
cfqq->orig_cfqg = cfqq->cfqg;
|
|
||||||
cfqq->cfqg = &cfqd->root_group;
|
|
||||||
cfqd->root_group.ref++;
|
|
||||||
group_changed = 1;
|
|
||||||
} else if (!cfqd->cfq_group_isolation
|
|
||||||
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
|
|
||||||
/* cfqq is sequential now needs to go to its original group */
|
|
||||||
BUG_ON(cfqq->cfqg != &cfqd->root_group);
|
|
||||||
if (!RB_EMPTY_NODE(&cfqq->rb_node))
|
|
||||||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
|
||||||
cfq_put_cfqg(cfqq->cfqg);
|
|
||||||
cfqq->cfqg = cfqq->orig_cfqg;
|
|
||||||
cfqq->orig_cfqg = NULL;
|
|
||||||
group_changed = 1;
|
|
||||||
cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
|
service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
|
||||||
cfqq_type(cfqq));
|
cfqq_type(cfqq));
|
||||||
if (cfq_class_idle(cfqq)) {
|
if (cfq_class_idle(cfqq)) {
|
||||||
|
@ -1284,7 +1284,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
service_tree->count++;
|
service_tree->count++;
|
||||||
if ((add_front || !new_cfqq) && !group_changed)
|
if ((add_front || !new_cfqq) && !group_changed)
|
||||||
return;
|
return;
|
||||||
cfq_group_service_tree_add(cfqd, cfqq->cfqg);
|
cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_queue *
|
static struct cfq_queue *
|
||||||
|
@ -1372,6 +1372,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
||||||
cfq_mark_cfqq_on_rr(cfqq);
|
cfq_mark_cfqq_on_rr(cfqq);
|
||||||
cfqd->busy_queues++;
|
cfqd->busy_queues++;
|
||||||
|
if (cfq_cfqq_sync(cfqq))
|
||||||
|
cfqd->busy_sync_queues++;
|
||||||
|
|
||||||
cfq_resort_rr_list(cfqd, cfqq);
|
cfq_resort_rr_list(cfqd, cfqq);
|
||||||
}
|
}
|
||||||
|
@ -1395,9 +1397,11 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
cfqq->p_root = NULL;
|
cfqq->p_root = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
|
||||||
BUG_ON(!cfqd->busy_queues);
|
BUG_ON(!cfqd->busy_queues);
|
||||||
cfqd->busy_queues--;
|
cfqd->busy_queues--;
|
||||||
|
if (cfq_cfqq_sync(cfqq))
|
||||||
|
cfqd->busy_sync_queues--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2405,22 +2409,34 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||||
* Does this cfqq already have too much IO in flight?
|
* Does this cfqq already have too much IO in flight?
|
||||||
*/
|
*/
|
||||||
if (cfqq->dispatched >= max_dispatch) {
|
if (cfqq->dispatched >= max_dispatch) {
|
||||||
|
bool promote_sync = false;
|
||||||
/*
|
/*
|
||||||
* idle queue must always only have a single IO in flight
|
* idle queue must always only have a single IO in flight
|
||||||
*/
|
*/
|
||||||
if (cfq_class_idle(cfqq))
|
if (cfq_class_idle(cfqq))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there is only one sync queue
|
||||||
|
* we can ignore async queue here and give the sync
|
||||||
|
* queue no dispatch limit. The reason is a sync queue can
|
||||||
|
* preempt async queue, limiting the sync queue doesn't make
|
||||||
|
* sense. This is useful for aiostress test.
|
||||||
|
*/
|
||||||
|
if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
|
||||||
|
promote_sync = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have other queues, don't allow more IO from this one
|
* We have other queues, don't allow more IO from this one
|
||||||
*/
|
*/
|
||||||
if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
|
if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
|
||||||
|
!promote_sync)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sole queue user, no limit
|
* Sole queue user, no limit
|
||||||
*/
|
*/
|
||||||
if (cfqd->busy_queues == 1)
|
if (cfqd->busy_queues == 1 || promote_sync)
|
||||||
max_dispatch = -1;
|
max_dispatch = -1;
|
||||||
else
|
else
|
||||||
/*
|
/*
|
||||||
|
@ -2542,7 +2558,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||||
static void cfq_put_queue(struct cfq_queue *cfqq)
|
static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd = cfqq->cfqd;
|
struct cfq_data *cfqd = cfqq->cfqd;
|
||||||
struct cfq_group *cfqg, *orig_cfqg;
|
struct cfq_group *cfqg;
|
||||||
|
|
||||||
BUG_ON(cfqq->ref <= 0);
|
BUG_ON(cfqq->ref <= 0);
|
||||||
|
|
||||||
|
@ -2554,7 +2570,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
BUG_ON(rb_first(&cfqq->sort_list));
|
BUG_ON(rb_first(&cfqq->sort_list));
|
||||||
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
|
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
|
||||||
cfqg = cfqq->cfqg;
|
cfqg = cfqq->cfqg;
|
||||||
orig_cfqg = cfqq->orig_cfqg;
|
|
||||||
|
|
||||||
if (unlikely(cfqd->active_queue == cfqq)) {
|
if (unlikely(cfqd->active_queue == cfqq)) {
|
||||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||||
|
@ -2564,8 +2579,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
||||||
kmem_cache_free(cfq_pool, cfqq);
|
kmem_cache_free(cfq_pool, cfqq);
|
||||||
cfq_put_cfqg(cfqg);
|
cfq_put_cfqg(cfqg);
|
||||||
if (orig_cfqg)
|
|
||||||
cfq_put_cfqg(orig_cfqg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3613,12 +3626,12 @@ static void cfq_put_request(struct request *rq)
|
||||||
|
|
||||||
put_io_context(RQ_CIC(rq)->ioc);
|
put_io_context(RQ_CIC(rq)->ioc);
|
||||||
|
|
||||||
rq->elevator_private = NULL;
|
rq->elevator_private[0] = NULL;
|
||||||
rq->elevator_private2 = NULL;
|
rq->elevator_private[1] = NULL;
|
||||||
|
|
||||||
/* Put down rq reference on cfqg */
|
/* Put down rq reference on cfqg */
|
||||||
cfq_put_cfqg(RQ_CFQG(rq));
|
cfq_put_cfqg(RQ_CFQG(rq));
|
||||||
rq->elevator_private3 = NULL;
|
rq->elevator_private[2] = NULL;
|
||||||
|
|
||||||
cfq_put_queue(cfqq);
|
cfq_put_queue(cfqq);
|
||||||
}
|
}
|
||||||
|
@ -3705,13 +3718,12 @@ new_queue:
|
||||||
}
|
}
|
||||||
|
|
||||||
cfqq->allocated[rw]++;
|
cfqq->allocated[rw]++;
|
||||||
|
|
||||||
cfqq->ref++;
|
cfqq->ref++;
|
||||||
rq->elevator_private = cic;
|
rq->elevator_private[0] = cic;
|
||||||
rq->elevator_private2 = cfqq;
|
rq->elevator_private[1] = cfqq;
|
||||||
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
|
rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
queue_fail:
|
queue_fail:
|
||||||
|
@ -3953,7 +3965,6 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||||
cfqd->cfq_slice_idle = cfq_slice_idle;
|
cfqd->cfq_slice_idle = cfq_slice_idle;
|
||||||
cfqd->cfq_group_idle = cfq_group_idle;
|
cfqd->cfq_group_idle = cfq_group_idle;
|
||||||
cfqd->cfq_latency = 1;
|
cfqd->cfq_latency = 1;
|
||||||
cfqd->cfq_group_isolation = 0;
|
|
||||||
cfqd->hw_tag = -1;
|
cfqd->hw_tag = -1;
|
||||||
/*
|
/*
|
||||||
* we optimistically start assuming sync ops weren't delayed in last
|
* we optimistically start assuming sync ops weren't delayed in last
|
||||||
|
@ -4029,7 +4040,6 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
|
||||||
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
||||||
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
|
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
|
||||||
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
|
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
|
||||||
SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
|
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||||
|
@ -4063,7 +4073,6 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
|
||||||
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
|
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
|
||||||
UINT_MAX, 0);
|
UINT_MAX, 0);
|
||||||
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
|
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
|
||||||
STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
|
|
||||||
#undef STORE_FUNCTION
|
#undef STORE_FUNCTION
|
||||||
|
|
||||||
#define CFQ_ATTR(name) \
|
#define CFQ_ATTR(name) \
|
||||||
|
@ -4081,7 +4090,6 @@ static struct elv_fs_entry cfq_attrs[] = {
|
||||||
CFQ_ATTR(slice_idle),
|
CFQ_ATTR(slice_idle),
|
||||||
CFQ_ATTR(group_idle),
|
CFQ_ATTR(group_idle),
|
||||||
CFQ_ATTR(low_latency),
|
CFQ_ATTR(low_latency),
|
||||||
CFQ_ATTR(group_isolation),
|
|
||||||
__ATTR_NULL
|
__ATTR_NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4096,7 +4104,6 @@ static struct elevator_type iosched_cfq = {
|
||||||
.elevator_add_req_fn = cfq_insert_request,
|
.elevator_add_req_fn = cfq_insert_request,
|
||||||
.elevator_activate_req_fn = cfq_activate_request,
|
.elevator_activate_req_fn = cfq_activate_request,
|
||||||
.elevator_deactivate_req_fn = cfq_deactivate_request,
|
.elevator_deactivate_req_fn = cfq_deactivate_request,
|
||||||
.elevator_queue_empty_fn = cfq_queue_empty,
|
|
||||||
.elevator_completed_req_fn = cfq_completed_request,
|
.elevator_completed_req_fn = cfq_completed_request,
|
||||||
.elevator_former_req_fn = elv_rb_former_request,
|
.elevator_former_req_fn = elv_rb_former_request,
|
||||||
.elevator_latter_req_fn = elv_rb_latter_request,
|
.elevator_latter_req_fn = elv_rb_latter_request,
|
||||||
|
|
|
@ -16,9 +16,9 @@ static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||||
unsigned long time)
|
unsigned long time, unsigned long unaccounted_time)
|
||||||
{
|
{
|
||||||
blkiocg_update_timeslice_used(blkg, time);
|
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
||||||
|
@ -85,7 +85,7 @@ static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||||
unsigned long dequeue) {}
|
unsigned long dequeue) {}
|
||||||
|
|
||||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||||
unsigned long time) {}
|
unsigned long time, unsigned long unaccounted_time) {}
|
||||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||||
bool direction, bool sync) {}
|
bool direction, bool sync) {}
|
||||||
|
|
|
@ -326,14 +326,6 @@ dispatch_request:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int deadline_queue_empty(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct deadline_data *dd = q->elevator->elevator_data;
|
|
||||||
|
|
||||||
return list_empty(&dd->fifo_list[WRITE])
|
|
||||||
&& list_empty(&dd->fifo_list[READ]);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void deadline_exit_queue(struct elevator_queue *e)
|
static void deadline_exit_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
struct deadline_data *dd = e->elevator_data;
|
struct deadline_data *dd = e->elevator_data;
|
||||||
|
@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
|
||||||
.elevator_merge_req_fn = deadline_merged_requests,
|
.elevator_merge_req_fn = deadline_merged_requests,
|
||||||
.elevator_dispatch_fn = deadline_dispatch_requests,
|
.elevator_dispatch_fn = deadline_dispatch_requests,
|
||||||
.elevator_add_req_fn = deadline_add_request,
|
.elevator_add_req_fn = deadline_add_request,
|
||||||
.elevator_queue_empty_fn = deadline_queue_empty,
|
|
||||||
.elevator_former_req_fn = elv_rb_former_request,
|
.elevator_former_req_fn = elv_rb_former_request,
|
||||||
.elevator_latter_req_fn = elv_rb_latter_request,
|
.elevator_latter_req_fn = elv_rb_latter_request,
|
||||||
.elevator_init_fn = deadline_init_queue,
|
.elevator_init_fn = deadline_init_queue,
|
||||||
|
|
108
block/elevator.c
108
block/elevator.c
|
@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(elv_rq_merge_ok);
|
EXPORT_SYMBOL(elv_rq_merge_ok);
|
||||||
|
|
||||||
static inline int elv_try_merge(struct request *__rq, struct bio *bio)
|
int elv_try_merge(struct request *__rq, struct bio *bio)
|
||||||
{
|
{
|
||||||
int ret = ELEVATOR_NO_MERGE;
|
int ret = ELEVATOR_NO_MERGE;
|
||||||
|
|
||||||
|
@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
int stop_flags;
|
int stop_flags;
|
||||||
|
|
||||||
|
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
|
||||||
|
|
||||||
if (q->last_merge == rq)
|
if (q->last_merge == rq)
|
||||||
q->last_merge = NULL;
|
q->last_merge = NULL;
|
||||||
|
|
||||||
|
@ -519,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||||
return ELEVATOR_NO_MERGE;
|
return ELEVATOR_NO_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempt to do an insertion back merge. Only check for the case where
|
||||||
|
* we can append 'rq' to an existing request, so we can throw 'rq' away
|
||||||
|
* afterwards.
|
||||||
|
*
|
||||||
|
* Returns true if we merged, false otherwise
|
||||||
|
*/
|
||||||
|
static bool elv_attempt_insert_merge(struct request_queue *q,
|
||||||
|
struct request *rq)
|
||||||
|
{
|
||||||
|
struct request *__rq;
|
||||||
|
|
||||||
|
if (blk_queue_nomerges(q))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First try one-hit cache.
|
||||||
|
*/
|
||||||
|
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (blk_queue_noxmerges(q))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See if our hash lookup can find a potential backmerge.
|
||||||
|
*/
|
||||||
|
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
|
||||||
|
if (__rq && blk_attempt_req_merge(q, __rq, rq))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
||||||
{
|
{
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
@ -536,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
|
||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
const int next_sorted = next->cmd_flags & REQ_SORTED;
|
||||||
|
|
||||||
if (e->ops->elevator_merge_req_fn)
|
if (next_sorted && e->ops->elevator_merge_req_fn)
|
||||||
e->ops->elevator_merge_req_fn(q, rq, next);
|
e->ops->elevator_merge_req_fn(q, rq, next);
|
||||||
|
|
||||||
elv_rqhash_reposition(q, rq);
|
elv_rqhash_reposition(q, rq);
|
||||||
elv_rqhash_del(q, next);
|
|
||||||
|
|
||||||
q->nr_sorted--;
|
if (next_sorted) {
|
||||||
|
elv_rqhash_del(q, next);
|
||||||
|
q->nr_sorted--;
|
||||||
|
}
|
||||||
|
|
||||||
q->last_merge = rq;
|
q->last_merge = rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,21 +657,12 @@ void elv_quiesce_end(struct request_queue *q)
|
||||||
|
|
||||||
void elv_insert(struct request_queue *q, struct request *rq, int where)
|
void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||||
{
|
{
|
||||||
int unplug_it = 1;
|
|
||||||
|
|
||||||
trace_block_rq_insert(q, rq);
|
trace_block_rq_insert(q, rq);
|
||||||
|
|
||||||
rq->q = q;
|
rq->q = q;
|
||||||
|
|
||||||
switch (where) {
|
switch (where) {
|
||||||
case ELEVATOR_INSERT_REQUEUE:
|
case ELEVATOR_INSERT_REQUEUE:
|
||||||
/*
|
|
||||||
* Most requeues happen because of a busy condition,
|
|
||||||
* don't force unplug of the queue for that case.
|
|
||||||
* Clear unplug_it and fall through.
|
|
||||||
*/
|
|
||||||
unplug_it = 0;
|
|
||||||
|
|
||||||
case ELEVATOR_INSERT_FRONT:
|
case ELEVATOR_INSERT_FRONT:
|
||||||
rq->cmd_flags |= REQ_SOFTBARRIER;
|
rq->cmd_flags |= REQ_SOFTBARRIER;
|
||||||
list_add(&rq->queuelist, &q->queue_head);
|
list_add(&rq->queuelist, &q->queue_head);
|
||||||
|
@ -654,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||||
__blk_run_queue(q, false);
|
__blk_run_queue(q, false);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case ELEVATOR_INSERT_SORT_MERGE:
|
||||||
|
/*
|
||||||
|
* If we succeed in merging this request with one in the
|
||||||
|
* queue already, we are done - rq has now been freed,
|
||||||
|
* so no need to do anything further.
|
||||||
|
*/
|
||||||
|
if (elv_attempt_insert_merge(q, rq))
|
||||||
|
break;
|
||||||
case ELEVATOR_INSERT_SORT:
|
case ELEVATOR_INSERT_SORT:
|
||||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
|
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
|
||||||
!(rq->cmd_flags & REQ_DISCARD));
|
!(rq->cmd_flags & REQ_DISCARD));
|
||||||
|
@ -673,24 +712,21 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||||
q->elevator->ops->elevator_add_req_fn(q, rq);
|
q->elevator->ops->elevator_add_req_fn(q, rq);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case ELEVATOR_INSERT_FLUSH:
|
||||||
|
rq->cmd_flags |= REQ_SOFTBARRIER;
|
||||||
|
blk_insert_flush(rq);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
printk(KERN_ERR "%s: bad insertion point %d\n",
|
printk(KERN_ERR "%s: bad insertion point %d\n",
|
||||||
__func__, where);
|
__func__, where);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unplug_it && blk_queue_plugged(q)) {
|
|
||||||
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
|
|
||||||
- queue_in_flight(q);
|
|
||||||
|
|
||||||
if (nrq >= q->unplug_thresh)
|
|
||||||
__generic_unplug_device(q);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||||
int plug)
|
|
||||||
{
|
{
|
||||||
|
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_SOFTBARRIER) {
|
if (rq->cmd_flags & REQ_SOFTBARRIER) {
|
||||||
/* barriers are scheduling boundary, update end_sector */
|
/* barriers are scheduling boundary, update end_sector */
|
||||||
if (rq->cmd_type == REQ_TYPE_FS ||
|
if (rq->cmd_type == REQ_TYPE_FS ||
|
||||||
|
@ -702,38 +738,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
||||||
where == ELEVATOR_INSERT_SORT)
|
where == ELEVATOR_INSERT_SORT)
|
||||||
where = ELEVATOR_INSERT_BACK;
|
where = ELEVATOR_INSERT_BACK;
|
||||||
|
|
||||||
if (plug)
|
|
||||||
blk_plug_device(q);
|
|
||||||
|
|
||||||
elv_insert(q, rq, where);
|
elv_insert(q, rq, where);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__elv_add_request);
|
EXPORT_SYMBOL(__elv_add_request);
|
||||||
|
|
||||||
void elv_add_request(struct request_queue *q, struct request *rq, int where,
|
void elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||||
int plug)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
__elv_add_request(q, rq, where, plug);
|
__elv_add_request(q, rq, where);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(elv_add_request);
|
EXPORT_SYMBOL(elv_add_request);
|
||||||
|
|
||||||
int elv_queue_empty(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct elevator_queue *e = q->elevator;
|
|
||||||
|
|
||||||
if (!list_empty(&q->queue_head))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (e->ops->elevator_queue_empty_fn)
|
|
||||||
return e->ops->elevator_queue_empty_fn(q);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(elv_queue_empty);
|
|
||||||
|
|
||||||
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
@ -759,7 +777,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||||
if (e->ops->elevator_set_req_fn)
|
if (e->ops->elevator_set_req_fn)
|
||||||
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
|
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
|
||||||
|
|
||||||
rq->elevator_private = NULL;
|
rq->elevator_private[0] = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -785,6 +803,8 @@ void elv_abort_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
|
blk_abort_flushes(q);
|
||||||
|
|
||||||
while (!list_empty(&q->queue_head)) {
|
while (!list_empty(&q->queue_head)) {
|
||||||
rq = list_entry_rq(q->queue_head.next);
|
rq = list_entry_rq(q->queue_head.next);
|
||||||
rq->cmd_flags |= REQ_QUIET;
|
rq->cmd_flags |= REQ_QUIET;
|
||||||
|
|
|
@ -1158,14 +1158,14 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
||||||
"%u %lu %lu %llu %u %u %u %u\n",
|
"%u %lu %lu %llu %u %u %u %u\n",
|
||||||
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
|
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
|
||||||
disk_name(gp, hd->partno, buf),
|
disk_name(gp, hd->partno, buf),
|
||||||
part_stat_read(hd, ios[0]),
|
part_stat_read(hd, ios[READ]),
|
||||||
part_stat_read(hd, merges[0]),
|
part_stat_read(hd, merges[READ]),
|
||||||
(unsigned long long)part_stat_read(hd, sectors[0]),
|
(unsigned long long)part_stat_read(hd, sectors[READ]),
|
||||||
jiffies_to_msecs(part_stat_read(hd, ticks[0])),
|
jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
|
||||||
part_stat_read(hd, ios[1]),
|
part_stat_read(hd, ios[WRITE]),
|
||||||
part_stat_read(hd, merges[1]),
|
part_stat_read(hd, merges[WRITE]),
|
||||||
(unsigned long long)part_stat_read(hd, sectors[1]),
|
(unsigned long long)part_stat_read(hd, sectors[WRITE]),
|
||||||
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
|
jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
|
||||||
part_in_flight(hd),
|
part_in_flight(hd),
|
||||||
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
||||||
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
||||||
|
@ -1494,7 +1494,7 @@ void disk_block_events(struct gendisk *disk)
|
||||||
void disk_unblock_events(struct gendisk *disk)
|
void disk_unblock_events(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
if (disk->ev)
|
if (disk->ev)
|
||||||
__disk_unblock_events(disk, true);
|
__disk_unblock_events(disk, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
|
||||||
list_add_tail(&rq->queuelist, &nd->queue);
|
list_add_tail(&rq->queuelist, &nd->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int noop_queue_empty(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct noop_data *nd = q->elevator->elevator_data;
|
|
||||||
|
|
||||||
return list_empty(&nd->queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct request *
|
static struct request *
|
||||||
noop_former_request(struct request_queue *q, struct request *rq)
|
noop_former_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
|
@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
|
||||||
.elevator_merge_req_fn = noop_merged_requests,
|
.elevator_merge_req_fn = noop_merged_requests,
|
||||||
.elevator_dispatch_fn = noop_dispatch,
|
.elevator_dispatch_fn = noop_dispatch,
|
||||||
.elevator_add_req_fn = noop_add_request,
|
.elevator_add_req_fn = noop_add_request,
|
||||||
.elevator_queue_empty_fn = noop_queue_empty,
|
|
||||||
.elevator_former_req_fn = noop_former_request,
|
.elevator_former_req_fn = noop_former_request,
|
||||||
.elevator_latter_req_fn = noop_latter_request,
|
.elevator_latter_req_fn = noop_latter_request,
|
||||||
.elevator_init_fn = noop_init_queue,
|
.elevator_init_fn = noop_init_queue,
|
||||||
|
|
|
@ -140,13 +140,14 @@ static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int DAC960_media_changed(struct gendisk *disk)
|
static unsigned int DAC960_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
DAC960_Controller_T *p = disk->queue->queuedata;
|
DAC960_Controller_T *p = disk->queue->queuedata;
|
||||||
int drive_nr = (long)disk->private_data;
|
int drive_nr = (long)disk->private_data;
|
||||||
|
|
||||||
if (!p->LogicalDriveInitiallyAccessible[drive_nr])
|
if (!p->LogicalDriveInitiallyAccessible[drive_nr])
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +164,7 @@ static const struct block_device_operations DAC960_BlockDeviceOperations = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = DAC960_open,
|
.open = DAC960_open,
|
||||||
.getgeo = DAC960_getgeo,
|
.getgeo = DAC960_getgeo,
|
||||||
.media_changed = DAC960_media_changed,
|
.check_events = DAC960_check_events,
|
||||||
.revalidate_disk = DAC960_revalidate_disk,
|
.revalidate_disk = DAC960_revalidate_disk,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2546,6 +2547,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
|
||||||
disk->major = MajorNumber;
|
disk->major = MajorNumber;
|
||||||
disk->first_minor = n << DAC960_MaxPartitionsBits;
|
disk->first_minor = n << DAC960_MaxPartitionsBits;
|
||||||
disk->fops = &DAC960_BlockDeviceOperations;
|
disk->fops = &DAC960_BlockDeviceOperations;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
Indicate the Block Device Registration completed successfully,
|
Indicate the Block Device Registration completed successfully,
|
||||||
|
|
|
@ -1658,12 +1658,12 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* floppy-change is never called from an interrupt, so we can relax a bit
|
* check_events is never called from an interrupt, so we can relax a bit
|
||||||
* here, sleep etc. Note that floppy-on tries to set current_DOR to point
|
* here, sleep etc. Note that floppy-on tries to set current_DOR to point
|
||||||
* to the desired drive, but it will probably not survive the sleep if
|
* to the desired drive, but it will probably not survive the sleep if
|
||||||
* several floppies are used at the same time: thus the loop.
|
* several floppies are used at the same time: thus the loop.
|
||||||
*/
|
*/
|
||||||
static int amiga_floppy_change(struct gendisk *disk)
|
static unsigned amiga_check_events(struct gendisk *disk, unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct amiga_floppy_struct *p = disk->private_data;
|
struct amiga_floppy_struct *p = disk->private_data;
|
||||||
int drive = p - unit;
|
int drive = p - unit;
|
||||||
|
@ -1686,7 +1686,7 @@ static int amiga_floppy_change(struct gendisk *disk)
|
||||||
p->dirty = 0;
|
p->dirty = 0;
|
||||||
writepending = 0; /* if this was true before, too bad! */
|
writepending = 0; /* if this was true before, too bad! */
|
||||||
writefromint = 0;
|
writefromint = 0;
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1697,7 +1697,7 @@ static const struct block_device_operations floppy_fops = {
|
||||||
.release = floppy_release,
|
.release = floppy_release,
|
||||||
.ioctl = fd_ioctl,
|
.ioctl = fd_ioctl,
|
||||||
.getgeo = fd_getgeo,
|
.getgeo = fd_getgeo,
|
||||||
.media_changed = amiga_floppy_change,
|
.check_events = amiga_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init fd_probe_drives(void)
|
static int __init fd_probe_drives(void)
|
||||||
|
@ -1736,6 +1736,7 @@ static int __init fd_probe_drives(void)
|
||||||
disk->major = FLOPPY_MAJOR;
|
disk->major = FLOPPY_MAJOR;
|
||||||
disk->first_minor = drive;
|
disk->first_minor = drive;
|
||||||
disk->fops = &floppy_fops;
|
disk->fops = &floppy_fops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
sprintf(disk->disk_name, "fd%d", drive);
|
sprintf(disk->disk_name, "fd%d", drive);
|
||||||
disk->private_data = &unit[drive];
|
disk->private_data = &unit[drive];
|
||||||
set_capacity(disk, 880*2);
|
set_capacity(disk, 880*2);
|
||||||
|
|
|
@ -1324,23 +1324,24 @@ static void finish_fdc_done( int dummy )
|
||||||
* due to unrecognised disk changes.
|
* due to unrecognised disk changes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int check_floppy_change(struct gendisk *disk)
|
static unsigned int floppy_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct atari_floppy_struct *p = disk->private_data;
|
struct atari_floppy_struct *p = disk->private_data;
|
||||||
unsigned int drive = p - unit;
|
unsigned int drive = p - unit;
|
||||||
if (test_bit (drive, &fake_change)) {
|
if (test_bit (drive, &fake_change)) {
|
||||||
/* simulated change (e.g. after formatting) */
|
/* simulated change (e.g. after formatting) */
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
if (test_bit (drive, &changed_floppies)) {
|
if (test_bit (drive, &changed_floppies)) {
|
||||||
/* surely changed (the WP signal changed at least once) */
|
/* surely changed (the WP signal changed at least once) */
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
if (UD.wpstat) {
|
if (UD.wpstat) {
|
||||||
/* WP is on -> could be changed: to be sure, buffers should be
|
/* WP is on -> could be changed: to be sure, buffers should be
|
||||||
* invalidated...
|
* invalidated...
|
||||||
*/
|
*/
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1570,7 +1571,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
* or the next access will revalidate - and clear UDT :-(
|
* or the next access will revalidate - and clear UDT :-(
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (check_floppy_change(disk))
|
if (floppy_check_events(disk, 0))
|
||||||
floppy_revalidate(disk);
|
floppy_revalidate(disk);
|
||||||
|
|
||||||
if (UD.flags & FTD_MSG)
|
if (UD.flags & FTD_MSG)
|
||||||
|
@ -1904,7 +1905,7 @@ static const struct block_device_operations floppy_fops = {
|
||||||
.open = floppy_unlocked_open,
|
.open = floppy_unlocked_open,
|
||||||
.release = floppy_release,
|
.release = floppy_release,
|
||||||
.ioctl = fd_ioctl,
|
.ioctl = fd_ioctl,
|
||||||
.media_changed = check_floppy_change,
|
.check_events = floppy_check_events,
|
||||||
.revalidate_disk= floppy_revalidate,
|
.revalidate_disk= floppy_revalidate,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1963,6 +1964,7 @@ static int __init atari_floppy_init (void)
|
||||||
unit[i].disk->first_minor = i;
|
unit[i].disk->first_minor = i;
|
||||||
sprintf(unit[i].disk->disk_name, "fd%d", i);
|
sprintf(unit[i].disk->disk_name, "fd%d", i);
|
||||||
unit[i].disk->fops = &floppy_fops;
|
unit[i].disk->fops = &floppy_fops;
|
||||||
|
unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
unit[i].disk->private_data = &unit[i];
|
unit[i].disk->private_data = &unit[i];
|
||||||
unit[i].disk->queue = blk_init_queue(do_fd_request,
|
unit[i].disk->queue = blk_init_queue(do_fd_request,
|
||||||
&ataflop_lock);
|
&ataflop_lock);
|
||||||
|
|
|
@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
|
||||||
int sg_index = 0;
|
int sg_index = 0;
|
||||||
int chained = 0;
|
int chained = 0;
|
||||||
|
|
||||||
/* We call start_io here in case there is a command waiting on the
|
|
||||||
* queue that has not been sent.
|
|
||||||
*/
|
|
||||||
if (blk_queue_plugged(q))
|
|
||||||
goto startio;
|
|
||||||
|
|
||||||
queue:
|
queue:
|
||||||
creq = blk_peek_request(q);
|
creq = blk_peek_request(q);
|
||||||
if (!creq)
|
if (!creq)
|
||||||
|
|
|
@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
|
||||||
struct scatterlist tmp_sg[SG_MAX];
|
struct scatterlist tmp_sg[SG_MAX];
|
||||||
int i, dir, seg;
|
int i, dir, seg;
|
||||||
|
|
||||||
if (blk_queue_plugged(q))
|
|
||||||
goto startio;
|
|
||||||
|
|
||||||
queue_next:
|
queue_next:
|
||||||
creq = blk_peek_request(q);
|
creq = blk_peek_request(q);
|
||||||
if (!creq)
|
if (!creq)
|
||||||
|
|
|
@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||||
|
|
||||||
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
|
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
|
||||||
rw |= REQ_FUA;
|
rw |= REQ_FUA;
|
||||||
rw |= REQ_UNPLUG | REQ_SYNC;
|
rw |= REQ_SYNC;
|
||||||
|
|
||||||
bio = bio_alloc(GFP_NOIO, 1);
|
bio = bio_alloc(GFP_NOIO, 1);
|
||||||
bio->bi_bdev = bdev->md_bdev;
|
bio->bi_bdev = bdev->md_bdev;
|
||||||
|
@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
|
|
||||||
|
|
||||||
/* always (try to) flush bitmap to stable storage */
|
/* always (try to) flush bitmap to stable storage */
|
||||||
drbd_md_flush(mdev);
|
drbd_md_flush(mdev);
|
||||||
|
|
||||||
|
|
|
@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
|
||||||
for (i = 0; i < num_pages; i++)
|
for (i = 0; i < num_pages; i++)
|
||||||
bm_page_io_async(mdev, b, i, rw);
|
bm_page_io_async(mdev, b, i, rw);
|
||||||
|
|
||||||
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
|
|
||||||
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
|
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
|
||||||
|
|
||||||
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
|
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
|
||||||
|
|
|
@ -377,7 +377,7 @@ union p_header {
|
||||||
#define DP_HARDBARRIER 1 /* depricated */
|
#define DP_HARDBARRIER 1 /* depricated */
|
||||||
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
|
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
|
||||||
#define DP_MAY_SET_IN_SYNC 4
|
#define DP_MAY_SET_IN_SYNC 4
|
||||||
#define DP_UNPLUG 8 /* equals REQ_UNPLUG */
|
#define DP_UNPLUG 8 /* not used anymore */
|
||||||
#define DP_FUA 16 /* equals REQ_FUA */
|
#define DP_FUA 16 /* equals REQ_FUA */
|
||||||
#define DP_FLUSH 32 /* equals REQ_FLUSH */
|
#define DP_FLUSH 32 /* equals REQ_FLUSH */
|
||||||
#define DP_DISCARD 64 /* equals REQ_DISCARD */
|
#define DP_DISCARD 64 /* equals REQ_DISCARD */
|
||||||
|
@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
|
||||||
return QUEUE_ORDERED_NONE;
|
return QUEUE_ORDERED_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void drbd_blk_run_queue(struct request_queue *q)
|
|
||||||
{
|
|
||||||
if (q && q->unplug_fn)
|
|
||||||
q->unplug_fn(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void drbd_kick_lo(struct drbd_conf *mdev)
|
|
||||||
{
|
|
||||||
if (get_ldev(mdev)) {
|
|
||||||
drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
|
|
||||||
put_ldev(mdev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void drbd_md_flush(struct drbd_conf *mdev)
|
static inline void drbd_md_flush(struct drbd_conf *mdev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
|
@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
|
||||||
{
|
{
|
||||||
if (mdev->agreed_pro_version >= 95)
|
if (mdev->agreed_pro_version >= 95)
|
||||||
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
|
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
|
||||||
(bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
|
|
||||||
(bi_rw & REQ_FUA ? DP_FUA : 0) |
|
(bi_rw & REQ_FUA ? DP_FUA : 0) |
|
||||||
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
|
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
|
||||||
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
|
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
|
||||||
else
|
else
|
||||||
return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0;
|
return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Used to send write requests
|
/* Used to send write requests
|
||||||
|
@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drbd_unplug_fn(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct drbd_conf *mdev = q->queuedata;
|
|
||||||
|
|
||||||
/* unplug FIRST */
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
blk_remove_plug(q);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
/* only if connected */
|
|
||||||
spin_lock_irq(&mdev->req_lock);
|
|
||||||
if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
|
|
||||||
D_ASSERT(mdev->state.role == R_PRIMARY);
|
|
||||||
if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
|
|
||||||
/* add to the data.work queue,
|
|
||||||
* unless already queued.
|
|
||||||
* XXX this might be a good addition to drbd_queue_work
|
|
||||||
* anyways, to detect "double queuing" ... */
|
|
||||||
if (list_empty(&mdev->unplug_work.list))
|
|
||||||
drbd_queue_work(&mdev->data.work,
|
|
||||||
&mdev->unplug_work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&mdev->req_lock);
|
|
||||||
|
|
||||||
if (mdev->state.disk >= D_INCONSISTENT)
|
|
||||||
drbd_kick_lo(mdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drbd_set_defaults(struct drbd_conf *mdev)
|
static void drbd_set_defaults(struct drbd_conf *mdev)
|
||||||
{
|
{
|
||||||
/* This way we get a compile error when sync_conf grows,
|
/* This way we get a compile error when sync_conf grows,
|
||||||
|
@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
|
||||||
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
|
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
|
||||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||||
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
||||||
q->queue_lock = &mdev->req_lock; /* needed since we use */
|
q->queue_lock = &mdev->req_lock;
|
||||||
/* plugging on a queue, that actually has no requests! */
|
|
||||||
q->unplug_fn = drbd_unplug_fn;
|
|
||||||
|
|
||||||
mdev->md_io_page = alloc_page(GFP_KERNEL);
|
mdev->md_io_page = alloc_page(GFP_KERNEL);
|
||||||
if (!mdev->md_io_page)
|
if (!mdev->md_io_page)
|
||||||
|
|
|
@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* kick lower level device, if we have more than (arbitrary number)
|
|
||||||
* reference counts on it, which typically are locally submitted io
|
|
||||||
* requests. don't use unacked_cnt, so we speed up proto A and B, too. */
|
|
||||||
static void maybe_kick_lo(struct drbd_conf *mdev)
|
|
||||||
{
|
|
||||||
if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
|
|
||||||
drbd_kick_lo(mdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
|
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
|
||||||
{
|
{
|
||||||
struct drbd_epoch_entry *e;
|
struct drbd_epoch_entry *e;
|
||||||
|
@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
|
||||||
LIST_HEAD(reclaimed);
|
LIST_HEAD(reclaimed);
|
||||||
struct drbd_epoch_entry *e, *t;
|
struct drbd_epoch_entry *e, *t;
|
||||||
|
|
||||||
maybe_kick_lo(mdev);
|
|
||||||
spin_lock_irq(&mdev->req_lock);
|
spin_lock_irq(&mdev->req_lock);
|
||||||
reclaim_net_ee(mdev, &reclaimed);
|
reclaim_net_ee(mdev, &reclaimed);
|
||||||
spin_unlock_irq(&mdev->req_lock);
|
spin_unlock_irq(&mdev->req_lock);
|
||||||
|
@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
|
||||||
while (!list_empty(head)) {
|
while (!list_empty(head)) {
|
||||||
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock_irq(&mdev->req_lock);
|
spin_unlock_irq(&mdev->req_lock);
|
||||||
drbd_kick_lo(mdev);
|
io_schedule();
|
||||||
schedule();
|
|
||||||
finish_wait(&mdev->ee_wait, &wait);
|
finish_wait(&mdev->ee_wait, &wait);
|
||||||
spin_lock_irq(&mdev->req_lock);
|
spin_lock_irq(&mdev->req_lock);
|
||||||
}
|
}
|
||||||
|
@ -1111,8 +1100,6 @@ next_bio:
|
||||||
/* > e->sector, unless this is the first bio */
|
/* > e->sector, unless this is the first bio */
|
||||||
bio->bi_sector = sector;
|
bio->bi_sector = sector;
|
||||||
bio->bi_bdev = mdev->ldev->backing_bdev;
|
bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||||
/* we special case some flags in the multi-bio case, see below
|
|
||||||
* (REQ_UNPLUG) */
|
|
||||||
bio->bi_rw = rw;
|
bio->bi_rw = rw;
|
||||||
bio->bi_private = e;
|
bio->bi_private = e;
|
||||||
bio->bi_end_io = drbd_endio_sec;
|
bio->bi_end_io = drbd_endio_sec;
|
||||||
|
@ -1141,13 +1128,8 @@ next_bio:
|
||||||
bios = bios->bi_next;
|
bios = bios->bi_next;
|
||||||
bio->bi_next = NULL;
|
bio->bi_next = NULL;
|
||||||
|
|
||||||
/* strip off REQ_UNPLUG unless it is the last bio */
|
|
||||||
if (bios)
|
|
||||||
bio->bi_rw &= ~REQ_UNPLUG;
|
|
||||||
|
|
||||||
drbd_generic_make_request(mdev, fault_type, bio);
|
drbd_generic_make_request(mdev, fault_type, bio);
|
||||||
} while (bios);
|
} while (bios);
|
||||||
maybe_kick_lo(mdev);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
|
||||||
|
|
||||||
inc_unacked(mdev);
|
inc_unacked(mdev);
|
||||||
|
|
||||||
if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
|
|
||||||
drbd_kick_lo(mdev);
|
|
||||||
|
|
||||||
mdev->current_epoch->barrier_nr = p->barrier;
|
mdev->current_epoch->barrier_nr = p->barrier;
|
||||||
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
|
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
|
||||||
|
|
||||||
|
@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
|
||||||
{
|
{
|
||||||
if (mdev->agreed_pro_version >= 95)
|
if (mdev->agreed_pro_version >= 95)
|
||||||
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
|
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
|
||||||
(dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
|
|
||||||
(dpf & DP_FUA ? REQ_FUA : 0) |
|
(dpf & DP_FUA ? REQ_FUA : 0) |
|
||||||
(dpf & DP_FLUSH ? REQ_FUA : 0) |
|
(dpf & DP_FLUSH ? REQ_FUA : 0) |
|
||||||
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
|
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
|
||||||
else
|
else
|
||||||
return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
|
return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mirrored write */
|
/* mirrored write */
|
||||||
|
@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
||||||
|
|
||||||
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
|
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
|
||||||
{
|
{
|
||||||
if (mdev->state.disk >= D_INCONSISTENT)
|
|
||||||
drbd_kick_lo(mdev);
|
|
||||||
|
|
||||||
/* Make sure we've acked all the TCP data associated
|
/* Make sure we've acked all the TCP data associated
|
||||||
* with the data requests being unplugged */
|
* with the data requests being unplugged */
|
||||||
drbd_tcp_quickack(mdev->data.socket);
|
drbd_tcp_quickack(mdev->data.socket);
|
||||||
|
|
|
@ -960,10 +960,6 @@ allocate_barrier:
|
||||||
bio_endio(req->private_bio, -EIO);
|
bio_endio(req->private_bio, -EIO);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we need to plug ALWAYS since we possibly need to kick lo_dev.
|
|
||||||
* we plug after submit, so we won't miss an unplug event */
|
|
||||||
drbd_plug_device(mdev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_conflicting:
|
fail_conflicting:
|
||||||
|
|
|
@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
|
||||||
* queue (or even the read operations for those packets
|
* queue (or even the read operations for those packets
|
||||||
* is not finished by now). Retry in 100ms. */
|
* is not finished by now). Retry in 100ms. */
|
||||||
|
|
||||||
drbd_kick_lo(mdev);
|
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
schedule_timeout(HZ / 10);
|
schedule_timeout(HZ / 10);
|
||||||
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
|
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
|
||||||
|
|
|
@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void drbd_plug_device(struct drbd_conf *mdev)
|
|
||||||
{
|
|
||||||
struct request_queue *q;
|
|
||||||
q = bdev_get_queue(mdev->this_bdev);
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
/* XXX the check on !blk_queue_plugged is redundant,
|
|
||||||
* implicitly checked in blk_plug_device */
|
|
||||||
|
|
||||||
if (!blk_queue_plugged(q)) {
|
|
||||||
blk_plug_device(q);
|
|
||||||
del_timer(&q->unplug_timer);
|
|
||||||
/* unplugging should not happen automatically... */
|
|
||||||
}
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
|
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
|
return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
|
||||||
|
|
|
@ -3770,13 +3770,14 @@ out2:
|
||||||
/*
|
/*
|
||||||
* Check if the disk has been changed or if a change has been faked.
|
* Check if the disk has been changed or if a change has been faked.
|
||||||
*/
|
*/
|
||||||
static int check_floppy_change(struct gendisk *disk)
|
static unsigned int floppy_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
int drive = (long)disk->private_data;
|
int drive = (long)disk->private_data;
|
||||||
|
|
||||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
|
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
|
||||||
test_bit(FD_VERIFY_BIT, &UDRS->flags))
|
test_bit(FD_VERIFY_BIT, &UDRS->flags))
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
|
|
||||||
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
|
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
|
||||||
lock_fdc(drive, false);
|
lock_fdc(drive, false);
|
||||||
|
@ -3788,7 +3789,7 @@ static int check_floppy_change(struct gendisk *disk)
|
||||||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
|
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
|
||||||
test_bit(drive, &fake_change) ||
|
test_bit(drive, &fake_change) ||
|
||||||
drive_no_geom(drive))
|
drive_no_geom(drive))
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3837,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
|
||||||
bio.bi_end_io = floppy_rb0_complete;
|
bio.bi_end_io = floppy_rb0_complete;
|
||||||
|
|
||||||
submit_bio(READ, &bio);
|
submit_bio(READ, &bio);
|
||||||
generic_unplug_device(bdev_get_queue(bdev));
|
|
||||||
process_fd_request();
|
process_fd_request();
|
||||||
wait_for_completion(&complete);
|
wait_for_completion(&complete);
|
||||||
|
|
||||||
|
@ -3898,7 +3898,7 @@ static const struct block_device_operations floppy_fops = {
|
||||||
.release = floppy_release,
|
.release = floppy_release,
|
||||||
.ioctl = fd_ioctl,
|
.ioctl = fd_ioctl,
|
||||||
.getgeo = fd_getgeo,
|
.getgeo = fd_getgeo,
|
||||||
.media_changed = check_floppy_change,
|
.check_events = floppy_check_events,
|
||||||
.revalidate_disk = floppy_revalidate,
|
.revalidate_disk = floppy_revalidate,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4205,6 +4205,7 @@ static int __init floppy_init(void)
|
||||||
disks[dr]->major = FLOPPY_MAJOR;
|
disks[dr]->major = FLOPPY_MAJOR;
|
||||||
disks[dr]->first_minor = TOMINOR(dr);
|
disks[dr]->first_minor = TOMINOR(dr);
|
||||||
disks[dr]->fops = &floppy_fops;
|
disks[dr]->fops = &floppy_fops;
|
||||||
|
disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
sprintf(disks[dr]->disk_name, "fd%d", dr);
|
sprintf(disks[dr]->disk_name, "fd%d", dr);
|
||||||
|
|
||||||
init_timer(&motor_off_timer[dr]);
|
init_timer(&motor_off_timer[dr]);
|
||||||
|
|
|
@ -540,17 +540,6 @@ out:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* kick off io on the underlying address space
|
|
||||||
*/
|
|
||||||
static void loop_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct loop_device *lo = q->queuedata;
|
|
||||||
|
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
|
|
||||||
blk_run_address_space(lo->lo_backing_file->f_mapping);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct switch_request {
|
struct switch_request {
|
||||||
struct file *file;
|
struct file *file;
|
||||||
struct completion wait;
|
struct completion wait;
|
||||||
|
@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
*/
|
*/
|
||||||
blk_queue_make_request(lo->lo_queue, loop_make_request);
|
blk_queue_make_request(lo->lo_queue, loop_make_request);
|
||||||
lo->lo_queue->queuedata = lo;
|
lo->lo_queue->queuedata = lo;
|
||||||
lo->lo_queue->unplug_fn = loop_unplug;
|
|
||||||
|
|
||||||
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
||||||
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
|
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
|
||||||
|
@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
|
||||||
|
|
||||||
kthread_stop(lo->lo_thread);
|
kthread_stop(lo->lo_thread);
|
||||||
|
|
||||||
lo->lo_queue->unplug_fn = NULL;
|
|
||||||
lo->lo_backing_file = NULL;
|
lo->lo_backing_file = NULL;
|
||||||
|
|
||||||
loop_release_xfer(lo);
|
loop_release_xfer(lo);
|
||||||
|
@ -1636,9 +1623,6 @@ out:
|
||||||
|
|
||||||
static void loop_free(struct loop_device *lo)
|
static void loop_free(struct loop_device *lo)
|
||||||
{
|
{
|
||||||
if (!lo->lo_queue->queue_lock)
|
|
||||||
lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
|
|
||||||
|
|
||||||
blk_cleanup_queue(lo->lo_queue);
|
blk_cleanup_queue(lo->lo_queue);
|
||||||
put_disk(lo->lo_disk);
|
put_disk(lo->lo_disk);
|
||||||
list_del(&lo->lo_list);
|
list_del(&lo->lo_list);
|
||||||
|
|
|
@ -172,7 +172,8 @@ module_param_array(drive3, int, NULL, 0);
|
||||||
static int pcd_open(struct cdrom_device_info *cdi, int purpose);
|
static int pcd_open(struct cdrom_device_info *cdi, int purpose);
|
||||||
static void pcd_release(struct cdrom_device_info *cdi);
|
static void pcd_release(struct cdrom_device_info *cdi);
|
||||||
static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
|
static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
|
||||||
static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr);
|
static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
|
||||||
|
unsigned int clearing, int slot_nr);
|
||||||
static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
|
static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
|
||||||
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
|
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
|
||||||
static int pcd_drive_reset(struct cdrom_device_info *cdi);
|
static int pcd_drive_reset(struct cdrom_device_info *cdi);
|
||||||
|
@ -257,10 +258,11 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pcd_block_media_changed(struct gendisk *disk)
|
static unsigned int pcd_block_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct pcd_unit *cd = disk->private_data;
|
struct pcd_unit *cd = disk->private_data;
|
||||||
return cdrom_media_changed(&cd->info);
|
return cdrom_check_events(&cd->info, clearing);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct block_device_operations pcd_bdops = {
|
static const struct block_device_operations pcd_bdops = {
|
||||||
|
@ -268,14 +270,14 @@ static const struct block_device_operations pcd_bdops = {
|
||||||
.open = pcd_block_open,
|
.open = pcd_block_open,
|
||||||
.release = pcd_block_release,
|
.release = pcd_block_release,
|
||||||
.ioctl = pcd_block_ioctl,
|
.ioctl = pcd_block_ioctl,
|
||||||
.media_changed = pcd_block_media_changed,
|
.check_events = pcd_block_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct cdrom_device_ops pcd_dops = {
|
static struct cdrom_device_ops pcd_dops = {
|
||||||
.open = pcd_open,
|
.open = pcd_open,
|
||||||
.release = pcd_release,
|
.release = pcd_release,
|
||||||
.drive_status = pcd_drive_status,
|
.drive_status = pcd_drive_status,
|
||||||
.media_changed = pcd_media_changed,
|
.check_events = pcd_check_events,
|
||||||
.tray_move = pcd_tray_move,
|
.tray_move = pcd_tray_move,
|
||||||
.lock_door = pcd_lock_door,
|
.lock_door = pcd_lock_door,
|
||||||
.get_mcn = pcd_get_mcn,
|
.get_mcn = pcd_get_mcn,
|
||||||
|
@ -318,6 +320,7 @@ static void pcd_init_units(void)
|
||||||
disk->first_minor = unit;
|
disk->first_minor = unit;
|
||||||
strcpy(disk->disk_name, cd->name); /* umm... */
|
strcpy(disk->disk_name, cd->name); /* umm... */
|
||||||
disk->fops = &pcd_bdops;
|
disk->fops = &pcd_bdops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,13 +505,14 @@ static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
|
||||||
|
|
||||||
#define DBMSG(msg) ((verbose>1)?(msg):NULL)
|
#define DBMSG(msg) ((verbose>1)?(msg):NULL)
|
||||||
|
|
||||||
static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr)
|
static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
|
||||||
|
unsigned int clearing, int slot_nr)
|
||||||
{
|
{
|
||||||
struct pcd_unit *cd = cdi->handle;
|
struct pcd_unit *cd = cdi->handle;
|
||||||
int res = cd->changed;
|
int res = cd->changed;
|
||||||
if (res)
|
if (res)
|
||||||
cd->changed = 0;
|
cd->changed = 0;
|
||||||
return res;
|
return res ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
|
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
|
||||||
|
|
|
@ -794,7 +794,7 @@ static int pd_release(struct gendisk *p, fmode_t mode)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pd_check_media(struct gendisk *p)
|
static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct pd_unit *disk = p->private_data;
|
struct pd_unit *disk = p->private_data;
|
||||||
int r;
|
int r;
|
||||||
|
@ -803,7 +803,7 @@ static int pd_check_media(struct gendisk *p)
|
||||||
pd_special_command(disk, pd_media_check);
|
pd_special_command(disk, pd_media_check);
|
||||||
r = disk->changed;
|
r = disk->changed;
|
||||||
disk->changed = 0;
|
disk->changed = 0;
|
||||||
return r;
|
return r ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pd_revalidate(struct gendisk *p)
|
static int pd_revalidate(struct gendisk *p)
|
||||||
|
@ -822,7 +822,7 @@ static const struct block_device_operations pd_fops = {
|
||||||
.release = pd_release,
|
.release = pd_release,
|
||||||
.ioctl = pd_ioctl,
|
.ioctl = pd_ioctl,
|
||||||
.getgeo = pd_getgeo,
|
.getgeo = pd_getgeo,
|
||||||
.media_changed = pd_check_media,
|
.check_events = pd_check_events,
|
||||||
.revalidate_disk= pd_revalidate
|
.revalidate_disk= pd_revalidate
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -837,6 +837,7 @@ static void pd_probe_drive(struct pd_unit *disk)
|
||||||
p->fops = &pd_fops;
|
p->fops = &pd_fops;
|
||||||
p->major = major;
|
p->major = major;
|
||||||
p->first_minor = (disk - pd) << PD_BITS;
|
p->first_minor = (disk - pd) << PD_BITS;
|
||||||
|
p->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
disk->gd = p;
|
disk->gd = p;
|
||||||
p->private_data = disk;
|
p->private_data = disk;
|
||||||
p->queue = pd_queue;
|
p->queue = pd_queue;
|
||||||
|
|
|
@ -243,7 +243,8 @@ static struct pf_unit units[PF_UNITS];
|
||||||
static int pf_identify(struct pf_unit *pf);
|
static int pf_identify(struct pf_unit *pf);
|
||||||
static void pf_lock(struct pf_unit *pf, int func);
|
static void pf_lock(struct pf_unit *pf, int func);
|
||||||
static void pf_eject(struct pf_unit *pf);
|
static void pf_eject(struct pf_unit *pf);
|
||||||
static int pf_check_media(struct gendisk *disk);
|
static unsigned int pf_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing);
|
||||||
|
|
||||||
static char pf_scratch[512]; /* scratch block buffer */
|
static char pf_scratch[512]; /* scratch block buffer */
|
||||||
|
|
||||||
|
@ -270,7 +271,7 @@ static const struct block_device_operations pf_fops = {
|
||||||
.release = pf_release,
|
.release = pf_release,
|
||||||
.ioctl = pf_ioctl,
|
.ioctl = pf_ioctl,
|
||||||
.getgeo = pf_getgeo,
|
.getgeo = pf_getgeo,
|
||||||
.media_changed = pf_check_media,
|
.check_events = pf_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init pf_init_units(void)
|
static void __init pf_init_units(void)
|
||||||
|
@ -293,6 +294,7 @@ static void __init pf_init_units(void)
|
||||||
disk->first_minor = unit;
|
disk->first_minor = unit;
|
||||||
strcpy(disk->disk_name, pf->name);
|
strcpy(disk->disk_name, pf->name);
|
||||||
disk->fops = &pf_fops;
|
disk->fops = &pf_fops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
if (!(*drives[unit])[D_PRT])
|
if (!(*drives[unit])[D_PRT])
|
||||||
pf_drive_count++;
|
pf_drive_count++;
|
||||||
}
|
}
|
||||||
|
@ -377,9 +379,9 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pf_check_media(struct gendisk *disk)
|
static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
|
||||||
{
|
{
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int status_reg(struct pf_unit *pf)
|
static inline int status_reg(struct pf_unit *pf)
|
||||||
|
|
|
@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
|
||||||
min_sleep_time = pkt->sleep_time;
|
min_sleep_time = pkt->sleep_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
generic_unplug_device(bdev_get_queue(pd->bdev));
|
|
||||||
|
|
||||||
VPRINTK("kcdrwd: sleeping\n");
|
VPRINTK("kcdrwd: sleeping\n");
|
||||||
residue = schedule_timeout(min_sleep_time);
|
residue = schedule_timeout(min_sleep_time);
|
||||||
VPRINTK("kcdrwd: wake up\n");
|
VPRINTK("kcdrwd: wake up\n");
|
||||||
|
@ -2796,7 +2794,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pkt_media_changed(struct gendisk *disk)
|
static unsigned int pkt_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct pktcdvd_device *pd = disk->private_data;
|
struct pktcdvd_device *pd = disk->private_data;
|
||||||
struct gendisk *attached_disk;
|
struct gendisk *attached_disk;
|
||||||
|
@ -2806,9 +2805,9 @@ static int pkt_media_changed(struct gendisk *disk)
|
||||||
if (!pd->bdev)
|
if (!pd->bdev)
|
||||||
return 0;
|
return 0;
|
||||||
attached_disk = pd->bdev->bd_disk;
|
attached_disk = pd->bdev->bd_disk;
|
||||||
if (!attached_disk)
|
if (!attached_disk || !attached_disk->fops->check_events)
|
||||||
return 0;
|
return 0;
|
||||||
return attached_disk->fops->media_changed(attached_disk);
|
return attached_disk->fops->check_events(attached_disk, clearing);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct block_device_operations pktcdvd_ops = {
|
static const struct block_device_operations pktcdvd_ops = {
|
||||||
|
@ -2816,7 +2815,7 @@ static const struct block_device_operations pktcdvd_ops = {
|
||||||
.open = pkt_open,
|
.open = pkt_open,
|
||||||
.release = pkt_close,
|
.release = pkt_close,
|
||||||
.ioctl = pkt_ioctl,
|
.ioctl = pkt_ioctl,
|
||||||
.media_changed = pkt_media_changed,
|
.check_events = pkt_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
|
static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
|
||||||
|
@ -2889,6 +2888,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_new_dev;
|
goto out_new_dev;
|
||||||
|
|
||||||
|
/* inherit events of the host device */
|
||||||
|
disk->events = pd->bdev->bd_disk->events;
|
||||||
|
disk->async_events = pd->bdev->bd_disk->async_events;
|
||||||
|
|
||||||
add_disk(disk);
|
add_disk(disk);
|
||||||
|
|
||||||
pkt_sysfs_dev_new(pd);
|
pkt_sysfs_dev_new(pd);
|
||||||
|
|
|
@ -741,11 +741,12 @@ static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int floppy_check_change(struct gendisk *disk)
|
static unsigned int floppy_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct floppy_state *fs = disk->private_data;
|
struct floppy_state *fs = disk->private_data;
|
||||||
|
|
||||||
return fs->ejected;
|
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int floppy_revalidate(struct gendisk *disk)
|
static int floppy_revalidate(struct gendisk *disk)
|
||||||
|
@ -772,7 +773,7 @@ static const struct block_device_operations floppy_fops = {
|
||||||
.release = floppy_release,
|
.release = floppy_release,
|
||||||
.ioctl = floppy_ioctl,
|
.ioctl = floppy_ioctl,
|
||||||
.getgeo = floppy_getgeo,
|
.getgeo = floppy_getgeo,
|
||||||
.media_changed = floppy_check_change,
|
.check_events = floppy_check_events,
|
||||||
.revalidate_disk = floppy_revalidate,
|
.revalidate_disk = floppy_revalidate,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -857,6 +858,7 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
|
||||||
swd->unit[drive].disk->first_minor = drive;
|
swd->unit[drive].disk->first_minor = drive;
|
||||||
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
|
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
|
||||||
swd->unit[drive].disk->fops = &floppy_fops;
|
swd->unit[drive].disk->fops = &floppy_fops;
|
||||||
|
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
swd->unit[drive].disk->private_data = &swd->unit[drive];
|
swd->unit[drive].disk->private_data = &swd->unit[drive];
|
||||||
swd->unit[drive].disk->queue = swd->queue;
|
swd->unit[drive].disk->queue = swd->queue;
|
||||||
set_capacity(swd->unit[drive].disk, 2880);
|
set_capacity(swd->unit[drive].disk, 2880);
|
||||||
|
|
|
@ -250,7 +250,8 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
unsigned int cmd, unsigned long param);
|
unsigned int cmd, unsigned long param);
|
||||||
static int floppy_open(struct block_device *bdev, fmode_t mode);
|
static int floppy_open(struct block_device *bdev, fmode_t mode);
|
||||||
static int floppy_release(struct gendisk *disk, fmode_t mode);
|
static int floppy_release(struct gendisk *disk, fmode_t mode);
|
||||||
static int floppy_check_change(struct gendisk *disk);
|
static unsigned int floppy_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing);
|
||||||
static int floppy_revalidate(struct gendisk *disk);
|
static int floppy_revalidate(struct gendisk *disk);
|
||||||
|
|
||||||
static bool swim3_end_request(int err, unsigned int nr_bytes)
|
static bool swim3_end_request(int err, unsigned int nr_bytes)
|
||||||
|
@ -975,10 +976,11 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int floppy_check_change(struct gendisk *disk)
|
static unsigned int floppy_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct floppy_state *fs = disk->private_data;
|
struct floppy_state *fs = disk->private_data;
|
||||||
return fs->ejected;
|
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int floppy_revalidate(struct gendisk *disk)
|
static int floppy_revalidate(struct gendisk *disk)
|
||||||
|
@ -1025,7 +1027,7 @@ static const struct block_device_operations floppy_fops = {
|
||||||
.open = floppy_unlocked_open,
|
.open = floppy_unlocked_open,
|
||||||
.release = floppy_release,
|
.release = floppy_release,
|
||||||
.ioctl = floppy_ioctl,
|
.ioctl = floppy_ioctl,
|
||||||
.media_changed = floppy_check_change,
|
.check_events = floppy_check_events,
|
||||||
.revalidate_disk= floppy_revalidate,
|
.revalidate_disk= floppy_revalidate,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1161,6 +1163,7 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
|
||||||
disk->major = FLOPPY_MAJOR;
|
disk->major = FLOPPY_MAJOR;
|
||||||
disk->first_minor = i;
|
disk->first_minor = i;
|
||||||
disk->fops = &floppy_fops;
|
disk->fops = &floppy_fops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
disk->private_data = &floppy_states[i];
|
disk->private_data = &floppy_states[i];
|
||||||
disk->queue = swim3_queue;
|
disk->queue = swim3_queue;
|
||||||
disk->flags |= GENHD_FL_REMOVABLE;
|
disk->flags |= GENHD_FL_REMOVABLE;
|
||||||
|
|
|
@ -1788,7 +1788,8 @@ static int ub_bd_revalidate(struct gendisk *disk)
|
||||||
*
|
*
|
||||||
* The return code is bool!
|
* The return code is bool!
|
||||||
*/
|
*/
|
||||||
static int ub_bd_media_changed(struct gendisk *disk)
|
static unsigned int ub_bd_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct ub_lun *lun = disk->private_data;
|
struct ub_lun *lun = disk->private_data;
|
||||||
|
|
||||||
|
@ -1806,10 +1807,10 @@ static int ub_bd_media_changed(struct gendisk *disk)
|
||||||
*/
|
*/
|
||||||
if (ub_sync_tur(lun->udev, lun) != 0) {
|
if (ub_sync_tur(lun->udev, lun) != 0) {
|
||||||
lun->changed = 1;
|
lun->changed = 1;
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return lun->changed;
|
return lun->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct block_device_operations ub_bd_fops = {
|
static const struct block_device_operations ub_bd_fops = {
|
||||||
|
@ -1817,7 +1818,7 @@ static const struct block_device_operations ub_bd_fops = {
|
||||||
.open = ub_bd_unlocked_open,
|
.open = ub_bd_unlocked_open,
|
||||||
.release = ub_bd_release,
|
.release = ub_bd_release,
|
||||||
.ioctl = ub_bd_ioctl,
|
.ioctl = ub_bd_ioctl,
|
||||||
.media_changed = ub_bd_media_changed,
|
.check_events = ub_bd_check_events,
|
||||||
.revalidate_disk = ub_bd_revalidate,
|
.revalidate_disk = ub_bd_revalidate,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2333,6 +2334,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
|
||||||
disk->major = UB_MAJOR;
|
disk->major = UB_MAJOR;
|
||||||
disk->first_minor = lun->id * UB_PARTS_PER_LUN;
|
disk->first_minor = lun->id * UB_PARTS_PER_LUN;
|
||||||
disk->fops = &ub_bd_fops;
|
disk->fops = &ub_bd_fops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
disk->private_data = lun;
|
disk->private_data = lun;
|
||||||
disk->driverfs_dev = &sc->intf->dev;
|
disk->driverfs_dev = &sc->intf->dev;
|
||||||
|
|
||||||
|
|
|
@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
|
||||||
*
|
*
|
||||||
* Whenever IO on the active page completes, the Ready page is activated
|
* Whenever IO on the active page completes, the Ready page is activated
|
||||||
* and the ex-Active page is clean out and made Ready.
|
* and the ex-Active page is clean out and made Ready.
|
||||||
* Otherwise the Ready page is only activated when it becomes full, or
|
* Otherwise the Ready page is only activated when it becomes full.
|
||||||
* when mm_unplug_device is called via the unplug_io_fn.
|
|
||||||
*
|
*
|
||||||
* If a request arrives while both pages a full, it is queued, and b_rdev is
|
* If a request arrives while both pages a full, it is queued, and b_rdev is
|
||||||
* overloaded to record whether it was a read or a write.
|
* overloaded to record whether it was a read or a write.
|
||||||
|
@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
|
||||||
page->biotail = &page->bio;
|
page->biotail = &page->bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mm_unplug_device(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct cardinfo *card = q->queuedata;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&card->lock, flags);
|
|
||||||
if (blk_remove_plug(q))
|
|
||||||
activate(card);
|
|
||||||
spin_unlock_irqrestore(&card->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is room on Ready page, take
|
* If there is room on Ready page, take
|
||||||
* one bh off list and add it.
|
* one bh off list and add it.
|
||||||
|
@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
|
||||||
*card->biotail = bio;
|
*card->biotail = bio;
|
||||||
bio->bi_next = NULL;
|
bio->bi_next = NULL;
|
||||||
card->biotail = &bio->bi_next;
|
card->biotail = &bio->bi_next;
|
||||||
blk_plug_device(q);
|
|
||||||
spin_unlock_irq(&card->lock);
|
spin_unlock_irq(&card->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -779,20 +766,10 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Future support for removable devices
|
|
||||||
*/
|
|
||||||
static int mm_check_change(struct gendisk *disk)
|
|
||||||
{
|
|
||||||
/* struct cardinfo *dev = disk->private_data; */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct block_device_operations mm_fops = {
|
static const struct block_device_operations mm_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.getgeo = mm_getgeo,
|
.getgeo = mm_getgeo,
|
||||||
.revalidate_disk = mm_revalidate,
|
.revalidate_disk = mm_revalidate,
|
||||||
.media_changed = mm_check_change,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit mm_pci_probe(struct pci_dev *dev,
|
static int __devinit mm_pci_probe(struct pci_dev *dev,
|
||||||
|
@ -907,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
|
||||||
blk_queue_make_request(card->queue, mm_make_request);
|
blk_queue_make_request(card->queue, mm_make_request);
|
||||||
card->queue->queue_lock = &card->lock;
|
card->queue->queue_lock = &card->lock;
|
||||||
card->queue->queuedata = card;
|
card->queue->queuedata = card;
|
||||||
card->queue->unplug_fn = mm_unplug_device;
|
|
||||||
|
|
||||||
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
|
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
|
||||||
|
|
||||||
|
|
|
@ -867,12 +867,12 @@ static void ace_request(struct request_queue * q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ace_media_changed(struct gendisk *gd)
|
static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct ace_device *ace = gd->private_data;
|
struct ace_device *ace = gd->private_data;
|
||||||
dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
|
dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change);
|
||||||
|
|
||||||
return ace->media_change;
|
return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ace_revalidate_disk(struct gendisk *gd)
|
static int ace_revalidate_disk(struct gendisk *gd)
|
||||||
|
@ -953,7 +953,7 @@ static const struct block_device_operations ace_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = ace_open,
|
.open = ace_open,
|
||||||
.release = ace_release,
|
.release = ace_release,
|
||||||
.media_changed = ace_media_changed,
|
.check_events = ace_check_events,
|
||||||
.revalidate_disk = ace_revalidate_disk,
|
.revalidate_disk = ace_revalidate_disk,
|
||||||
.getgeo = ace_getgeo,
|
.getgeo = ace_getgeo,
|
||||||
};
|
};
|
||||||
|
@ -1005,6 +1005,7 @@ static int __devinit ace_setup(struct ace_device *ace)
|
||||||
ace->gd->major = ace_major;
|
ace->gd->major = ace_major;
|
||||||
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
|
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
|
||||||
ace->gd->fops = &ace_fops;
|
ace->gd->fops = &ace_fops;
|
||||||
|
ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
ace->gd->queue = ace->queue;
|
ace->gd->queue = ace->queue;
|
||||||
ace->gd->private_data = ace;
|
ace->gd->private_data = ace;
|
||||||
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
|
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
|
||||||
|
|
|
@ -395,10 +395,12 @@ static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
|
||||||
return CDS_NO_INFO;
|
return CDS_NO_INFO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore)
|
static unsigned int gdrom_check_events(struct cdrom_device_info *cd_info,
|
||||||
|
unsigned int clearing, int ignore)
|
||||||
{
|
{
|
||||||
/* check the sense key */
|
/* check the sense key */
|
||||||
return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60;
|
return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60 ?
|
||||||
|
DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reset the G1 bus */
|
/* reset the G1 bus */
|
||||||
|
@ -483,7 +485,7 @@ static struct cdrom_device_ops gdrom_ops = {
|
||||||
.open = gdrom_open,
|
.open = gdrom_open,
|
||||||
.release = gdrom_release,
|
.release = gdrom_release,
|
||||||
.drive_status = gdrom_drivestatus,
|
.drive_status = gdrom_drivestatus,
|
||||||
.media_changed = gdrom_mediachanged,
|
.check_events = gdrom_check_events,
|
||||||
.get_last_session = gdrom_get_last_session,
|
.get_last_session = gdrom_get_last_session,
|
||||||
.reset = gdrom_hardreset,
|
.reset = gdrom_hardreset,
|
||||||
.audio_ioctl = gdrom_audio_ioctl,
|
.audio_ioctl = gdrom_audio_ioctl,
|
||||||
|
@ -509,9 +511,10 @@ static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gdrom_bdops_mediachanged(struct gendisk *disk)
|
static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
return cdrom_media_changed(gd.cd_info);
|
return cdrom_check_events(gd.cd_info, clearing);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
|
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
|
@ -530,7 +533,7 @@ static const struct block_device_operations gdrom_bdops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = gdrom_bdops_open,
|
.open = gdrom_bdops_open,
|
||||||
.release = gdrom_bdops_release,
|
.release = gdrom_bdops_release,
|
||||||
.media_changed = gdrom_bdops_mediachanged,
|
.check_events = gdrom_bdops_check_events,
|
||||||
.ioctl = gdrom_bdops_ioctl,
|
.ioctl = gdrom_bdops_ioctl,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -800,6 +803,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
|
||||||
goto probe_fail_cdrom_register;
|
goto probe_fail_cdrom_register;
|
||||||
}
|
}
|
||||||
gd.disk->fops = &gdrom_bdops;
|
gd.disk->fops = &gdrom_bdops;
|
||||||
|
gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
/* latch on to the interrupt */
|
/* latch on to the interrupt */
|
||||||
err = gdrom_set_interrupt_handlers();
|
err = gdrom_set_interrupt_handlers();
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -186,10 +186,11 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int viocd_blk_media_changed(struct gendisk *disk)
|
static unsigned int viocd_blk_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct disk_info *di = disk->private_data;
|
struct disk_info *di = disk->private_data;
|
||||||
return cdrom_media_changed(&di->viocd_info);
|
return cdrom_check_events(&di->viocd_info, clearing);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct block_device_operations viocd_fops = {
|
static const struct block_device_operations viocd_fops = {
|
||||||
|
@ -197,7 +198,7 @@ static const struct block_device_operations viocd_fops = {
|
||||||
.open = viocd_blk_open,
|
.open = viocd_blk_open,
|
||||||
.release = viocd_blk_release,
|
.release = viocd_blk_release,
|
||||||
.ioctl = viocd_blk_ioctl,
|
.ioctl = viocd_blk_ioctl,
|
||||||
.media_changed = viocd_blk_media_changed,
|
.check_events = viocd_blk_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int viocd_open(struct cdrom_device_info *cdi, int purpose)
|
static int viocd_open(struct cdrom_device_info *cdi, int purpose)
|
||||||
|
@ -320,7 +321,8 @@ static void do_viocd_request(struct request_queue *q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
|
static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
|
||||||
|
unsigned int clearing, int disc_nr)
|
||||||
{
|
{
|
||||||
struct viocd_waitevent we;
|
struct viocd_waitevent we;
|
||||||
HvLpEvent_Rc hvrc;
|
HvLpEvent_Rc hvrc;
|
||||||
|
@ -340,7 +342,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
|
||||||
if (hvrc != 0) {
|
if (hvrc != 0) {
|
||||||
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||||
(int)hvrc);
|
(int)hvrc);
|
||||||
return -EIO;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_completion(&we.com);
|
wait_for_completion(&we.com);
|
||||||
|
@ -354,7 +356,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return we.changed;
|
return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
|
static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
|
||||||
|
@ -550,7 +552,7 @@ static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
|
||||||
static struct cdrom_device_ops viocd_dops = {
|
static struct cdrom_device_ops viocd_dops = {
|
||||||
.open = viocd_open,
|
.open = viocd_open,
|
||||||
.release = viocd_release,
|
.release = viocd_release,
|
||||||
.media_changed = viocd_media_changed,
|
.check_events = viocd_check_events,
|
||||||
.lock_door = viocd_lock_door,
|
.lock_door = viocd_lock_door,
|
||||||
.generic_packet = viocd_packet,
|
.generic_packet = viocd_packet,
|
||||||
.audio_ioctl = viocd_audio_ioctl,
|
.audio_ioctl = viocd_audio_ioctl,
|
||||||
|
@ -624,6 +626,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||||
gendisk->queue = q;
|
gendisk->queue = q;
|
||||||
gendisk->fops = &viocd_fops;
|
gendisk->fops = &viocd_fops;
|
||||||
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
|
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
|
||||||
|
gendisk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
set_capacity(gendisk, 0);
|
set_capacity(gendisk, 0);
|
||||||
gendisk->private_data = d;
|
gendisk->private_data = d;
|
||||||
d->viocd_disk = gendisk;
|
d->viocd_disk = gendisk;
|
||||||
|
|
|
@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
|
||||||
|
|
||||||
drive->hwif->rq = NULL;
|
drive->hwif->rq = NULL;
|
||||||
|
|
||||||
elv_add_request(drive->queue, &drive->sense_rq,
|
elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
|
||||||
ELEVATOR_INSERT_FRONT, 0);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
|
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
|
||||||
|
|
|
@ -258,17 +258,10 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
|
||||||
if (time_after(jiffies, info->write_timeout))
|
if (time_after(jiffies, info->write_timeout))
|
||||||
return 0;
|
return 0;
|
||||||
else {
|
else {
|
||||||
struct request_queue *q = drive->queue;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* take a breather relying on the unplug timer to kick us again
|
* take a breather
|
||||||
*/
|
*/
|
||||||
|
blk_delay_queue(drive->queue, 1);
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
|
||||||
blk_plug_device(q);
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1177,7 +1170,7 @@ static struct cdrom_device_ops ide_cdrom_dops = {
|
||||||
.open = ide_cdrom_open_real,
|
.open = ide_cdrom_open_real,
|
||||||
.release = ide_cdrom_release_real,
|
.release = ide_cdrom_release_real,
|
||||||
.drive_status = ide_cdrom_drive_status,
|
.drive_status = ide_cdrom_drive_status,
|
||||||
.media_changed = ide_cdrom_check_media_change_real,
|
.check_events = ide_cdrom_check_events_real,
|
||||||
.tray_move = ide_cdrom_tray_move,
|
.tray_move = ide_cdrom_tray_move,
|
||||||
.lock_door = ide_cdrom_lock_door,
|
.lock_door = ide_cdrom_lock_door,
|
||||||
.select_speed = ide_cdrom_select_speed,
|
.select_speed = ide_cdrom_select_speed,
|
||||||
|
@ -1514,8 +1507,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
|
||||||
blk_queue_dma_alignment(q, 31);
|
blk_queue_dma_alignment(q, 31);
|
||||||
blk_queue_update_dma_pad(q, 15);
|
blk_queue_update_dma_pad(q, 15);
|
||||||
|
|
||||||
q->unplug_delay = max((1 * HZ) / 1000, 1);
|
|
||||||
|
|
||||||
drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
|
drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
|
||||||
drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
|
drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
|
||||||
|
|
||||||
|
@ -1702,10 +1693,11 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int idecd_media_changed(struct gendisk *disk)
|
static unsigned int idecd_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
|
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
|
||||||
return cdrom_media_changed(&info->devinfo);
|
return cdrom_check_events(&info->devinfo, clearing);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int idecd_revalidate_disk(struct gendisk *disk)
|
static int idecd_revalidate_disk(struct gendisk *disk)
|
||||||
|
@ -1723,7 +1715,7 @@ static const struct block_device_operations idecd_ops = {
|
||||||
.open = idecd_open,
|
.open = idecd_open,
|
||||||
.release = idecd_release,
|
.release = idecd_release,
|
||||||
.ioctl = idecd_ioctl,
|
.ioctl = idecd_ioctl,
|
||||||
.media_changed = idecd_media_changed,
|
.check_events = idecd_check_events,
|
||||||
.revalidate_disk = idecd_revalidate_disk
|
.revalidate_disk = idecd_revalidate_disk
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1790,6 +1782,7 @@ static int ide_cd_probe(ide_drive_t *drive)
|
||||||
ide_cd_read_toc(drive, &sense);
|
ide_cd_read_toc(drive, &sense);
|
||||||
g->fops = &idecd_ops;
|
g->fops = &idecd_ops;
|
||||||
g->flags |= GENHD_FL_REMOVABLE;
|
g->flags |= GENHD_FL_REMOVABLE;
|
||||||
|
g->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
add_disk(g);
|
add_disk(g);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,8 @@ int cdrom_check_status(ide_drive_t *, struct request_sense *);
|
||||||
int ide_cdrom_open_real(struct cdrom_device_info *, int);
|
int ide_cdrom_open_real(struct cdrom_device_info *, int);
|
||||||
void ide_cdrom_release_real(struct cdrom_device_info *);
|
void ide_cdrom_release_real(struct cdrom_device_info *);
|
||||||
int ide_cdrom_drive_status(struct cdrom_device_info *, int);
|
int ide_cdrom_drive_status(struct cdrom_device_info *, int);
|
||||||
int ide_cdrom_check_media_change_real(struct cdrom_device_info *, int);
|
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *,
|
||||||
|
unsigned int clearing, int slot_nr);
|
||||||
int ide_cdrom_tray_move(struct cdrom_device_info *, int);
|
int ide_cdrom_tray_move(struct cdrom_device_info *, int);
|
||||||
int ide_cdrom_lock_door(struct cdrom_device_info *, int);
|
int ide_cdrom_lock_door(struct cdrom_device_info *, int);
|
||||||
int ide_cdrom_select_speed(struct cdrom_device_info *, int);
|
int ide_cdrom_select_speed(struct cdrom_device_info *, int);
|
||||||
|
|
|
@ -79,8 +79,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
|
||||||
return CDS_DRIVE_NOT_READY;
|
return CDS_DRIVE_NOT_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
|
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
|
||||||
int slot_nr)
|
unsigned int clearing, int slot_nr)
|
||||||
{
|
{
|
||||||
ide_drive_t *drive = cdi->handle;
|
ide_drive_t *drive = cdi->handle;
|
||||||
int retval;
|
int retval;
|
||||||
|
@ -89,9 +89,9 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
|
||||||
(void) cdrom_check_status(drive, NULL);
|
(void) cdrom_check_status(drive, NULL);
|
||||||
retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
|
retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
|
||||||
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
|
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
|
||||||
return retval;
|
return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
} else {
|
} else {
|
||||||
return -EINVAL;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -285,11 +285,12 @@ static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ide_gd_media_changed(struct gendisk *disk)
|
static unsigned int ide_gd_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
|
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
|
||||||
ide_drive_t *drive = idkp->drive;
|
ide_drive_t *drive = idkp->drive;
|
||||||
int ret;
|
bool ret;
|
||||||
|
|
||||||
/* do not scan partitions twice if this is a removable device */
|
/* do not scan partitions twice if this is a removable device */
|
||||||
if (drive->dev_flags & IDE_DFLAG_ATTACH) {
|
if (drive->dev_flags & IDE_DFLAG_ATTACH) {
|
||||||
|
@ -297,10 +298,10 @@ static int ide_gd_media_changed(struct gendisk *disk)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED);
|
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
|
||||||
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
|
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
|
||||||
|
|
||||||
return ret;
|
return ret ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ide_gd_unlock_native_capacity(struct gendisk *disk)
|
static void ide_gd_unlock_native_capacity(struct gendisk *disk)
|
||||||
|
@ -318,7 +319,7 @@ static int ide_gd_revalidate_disk(struct gendisk *disk)
|
||||||
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
|
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
|
||||||
ide_drive_t *drive = idkp->drive;
|
ide_drive_t *drive = idkp->drive;
|
||||||
|
|
||||||
if (ide_gd_media_changed(disk))
|
if (ide_gd_check_events(disk, 0))
|
||||||
drive->disk_ops->get_capacity(drive);
|
drive->disk_ops->get_capacity(drive);
|
||||||
|
|
||||||
set_capacity(disk, ide_gd_capacity(drive));
|
set_capacity(disk, ide_gd_capacity(drive));
|
||||||
|
@ -340,7 +341,7 @@ static const struct block_device_operations ide_gd_ops = {
|
||||||
.release = ide_gd_release,
|
.release = ide_gd_release,
|
||||||
.ioctl = ide_gd_ioctl,
|
.ioctl = ide_gd_ioctl,
|
||||||
.getgeo = ide_gd_getgeo,
|
.getgeo = ide_gd_getgeo,
|
||||||
.media_changed = ide_gd_media_changed,
|
.check_events = ide_gd_check_events,
|
||||||
.unlock_native_capacity = ide_gd_unlock_native_capacity,
|
.unlock_native_capacity = ide_gd_unlock_native_capacity,
|
||||||
.revalidate_disk = ide_gd_revalidate_disk
|
.revalidate_disk = ide_gd_revalidate_disk
|
||||||
};
|
};
|
||||||
|
@ -412,6 +413,7 @@ static int ide_gd_probe(ide_drive_t *drive)
|
||||||
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
|
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
|
||||||
g->flags = GENHD_FL_REMOVABLE;
|
g->flags = GENHD_FL_REMOVABLE;
|
||||||
g->fops = &ide_gd_ops;
|
g->fops = &ide_gd_ops;
|
||||||
|
g->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
add_disk(g);
|
add_disk(g);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -549,8 +549,6 @@ plug_device_2:
|
||||||
|
|
||||||
if (rq)
|
if (rq)
|
||||||
blk_requeue_request(q, rq);
|
blk_requeue_request(q, rq);
|
||||||
if (!elv_queue_empty(q))
|
|
||||||
blk_plug_device(q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
|
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
|
||||||
|
@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
|
||||||
|
|
||||||
if (rq)
|
if (rq)
|
||||||
blk_requeue_request(q, rq);
|
blk_requeue_request(q, rq);
|
||||||
if (!elv_queue_empty(q))
|
|
||||||
blk_plug_device(q);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
|
||||||
rq->cmd[0] = REQ_UNPARK_HEADS;
|
rq->cmd[0] = REQ_UNPARK_HEADS;
|
||||||
rq->cmd_len = 1;
|
rq->cmd_len = 1;
|
||||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||||
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
|
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
|
||||||
atomic_inc(&bitmap->pending_writes);
|
atomic_inc(&bitmap->pending_writes);
|
||||||
set_buffer_locked(bh);
|
set_buffer_locked(bh);
|
||||||
set_buffer_mapped(bh);
|
set_buffer_mapped(bh);
|
||||||
submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh);
|
submit_bh(WRITE | REQ_SYNC, bh);
|
||||||
bh = bh->b_this_page;
|
bh = bh->b_this_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
||||||
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock_irq(&bitmap->lock);
|
spin_unlock_irq(&bitmap->lock);
|
||||||
md_unplug(bitmap->mddev);
|
io_schedule();
|
||||||
schedule();
|
|
||||||
finish_wait(&bitmap->overflow_wait, &__wait);
|
finish_wait(&bitmap->overflow_wait, &__wait);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
||||||
clone->bi_destructor = dm_crypt_bio_destructor;
|
clone->bi_destructor = dm_crypt_bio_destructor;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kcryptd_unplug(struct crypt_config *cc)
|
|
||||||
{
|
|
||||||
blk_unplug(bdev_get_queue(cc->dev->bdev));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct crypt_config *cc = io->target->private;
|
struct crypt_config *cc = io->target->private;
|
||||||
|
@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||||
* one in order to decrypt the whole bio data *afterwards*.
|
* one in order to decrypt the whole bio data *afterwards*.
|
||||||
*/
|
*/
|
||||||
clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
|
clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
|
||||||
if (!clone) {
|
if (!clone)
|
||||||
kcryptd_unplug(cc);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
|
||||||
|
|
||||||
crypt_inc_pending(io);
|
crypt_inc_pending(io);
|
||||||
|
|
||||||
|
|
|
@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
|
||||||
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
|
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
|
||||||
|
|
||||||
if (sync)
|
if (sync)
|
||||||
rw |= REQ_SYNC | REQ_UNPLUG;
|
rw |= REQ_SYNC;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For multiple regions we need to be careful to rewind
|
* For multiple regions we need to be careful to rewind
|
||||||
|
|
|
@ -37,13 +37,6 @@ struct dm_kcopyd_client {
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
unsigned int nr_free_pages;
|
unsigned int nr_free_pages;
|
||||||
|
|
||||||
/*
|
|
||||||
* Block devices to unplug.
|
|
||||||
* Non-NULL pointer means that a block device has some pending requests
|
|
||||||
* and needs to be unplugged.
|
|
||||||
*/
|
|
||||||
struct block_device *unplug[2];
|
|
||||||
|
|
||||||
struct dm_io_client *io_client;
|
struct dm_io_client *io_client;
|
||||||
|
|
||||||
wait_queue_head_t destroyq;
|
wait_queue_head_t destroyq;
|
||||||
|
@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Unplug the block device at the specified index.
|
|
||||||
*/
|
|
||||||
static void unplug(struct dm_kcopyd_client *kc, int rw)
|
|
||||||
{
|
|
||||||
if (kc->unplug[rw] != NULL) {
|
|
||||||
blk_unplug(bdev_get_queue(kc->unplug[rw]));
|
|
||||||
kc->unplug[rw] = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Prepare block device unplug. If there's another device
|
|
||||||
* to be unplugged at the same array index, we unplug that
|
|
||||||
* device first.
|
|
||||||
*/
|
|
||||||
static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
|
|
||||||
struct block_device *bdev)
|
|
||||||
{
|
|
||||||
if (likely(kc->unplug[rw] == bdev))
|
|
||||||
return;
|
|
||||||
unplug(kc, rw);
|
|
||||||
kc->unplug[rw] = bdev;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void complete_io(unsigned long error, void *context)
|
static void complete_io(unsigned long error, void *context)
|
||||||
{
|
{
|
||||||
struct kcopyd_job *job = (struct kcopyd_job *) context;
|
struct kcopyd_job *job = (struct kcopyd_job *) context;
|
||||||
|
@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job)
|
||||||
.client = job->kc->io_client,
|
.client = job->kc->io_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (job->rw == READ) {
|
if (job->rw == READ)
|
||||||
r = dm_io(&io_req, 1, &job->source, NULL);
|
r = dm_io(&io_req, 1, &job->source, NULL);
|
||||||
prepare_unplug(job->kc, READ, job->source.bdev);
|
else
|
||||||
} else {
|
|
||||||
if (job->num_dests > 1)
|
|
||||||
io_req.bi_rw |= REQ_UNPLUG;
|
|
||||||
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
|
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
|
||||||
if (!(io_req.bi_rw & REQ_UNPLUG))
|
|
||||||
prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -466,6 +428,7 @@ static void do_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct dm_kcopyd_client *kc = container_of(work,
|
struct dm_kcopyd_client *kc = container_of(work,
|
||||||
struct dm_kcopyd_client, kcopyd_work);
|
struct dm_kcopyd_client, kcopyd_work);
|
||||||
|
struct blk_plug plug;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The order that these are called is *very* important.
|
* The order that these are called is *very* important.
|
||||||
|
@ -473,18 +436,12 @@ static void do_work(struct work_struct *work)
|
||||||
* Pages jobs when successful will jump onto the io jobs
|
* Pages jobs when successful will jump onto the io jobs
|
||||||
* list. io jobs call wake when they complete and it all
|
* list. io jobs call wake when they complete and it all
|
||||||
* starts again.
|
* starts again.
|
||||||
*
|
|
||||||
* Note that io_jobs add block devices to the unplug array,
|
|
||||||
* this array is cleared with "unplug" calls. It is thus
|
|
||||||
* forbidden to run complete_jobs after io_jobs and before
|
|
||||||
* unplug because the block device could be destroyed in
|
|
||||||
* job completion callback.
|
|
||||||
*/
|
*/
|
||||||
|
blk_start_plug(&plug);
|
||||||
process_jobs(&kc->complete_jobs, kc, run_complete_job);
|
process_jobs(&kc->complete_jobs, kc, run_complete_job);
|
||||||
process_jobs(&kc->pages_jobs, kc, run_pages_job);
|
process_jobs(&kc->pages_jobs, kc, run_pages_job);
|
||||||
process_jobs(&kc->io_jobs, kc, run_io_job);
|
process_jobs(&kc->io_jobs, kc, run_io_job);
|
||||||
unplug(kc, READ);
|
blk_finish_plug(&plug);
|
||||||
unplug(kc, WRITE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
|
||||||
INIT_LIST_HEAD(&kc->io_jobs);
|
INIT_LIST_HEAD(&kc->io_jobs);
|
||||||
INIT_LIST_HEAD(&kc->pages_jobs);
|
INIT_LIST_HEAD(&kc->pages_jobs);
|
||||||
|
|
||||||
memset(kc->unplug, 0, sizeof(kc->unplug));
|
|
||||||
|
|
||||||
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
|
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
|
||||||
if (!kc->job_pool)
|
if (!kc->job_pool)
|
||||||
goto bad_slab;
|
goto bad_slab;
|
||||||
|
|
|
@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
|
||||||
{
|
{
|
||||||
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
|
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
|
||||||
|
|
||||||
md_raid5_unplug_device(rs->md.private);
|
md_raid5_kick_device(rs->md.private);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
|
||||||
do_reads(ms, &reads);
|
do_reads(ms, &reads);
|
||||||
do_writes(ms, &writes);
|
do_writes(ms, &writes);
|
||||||
do_failures(ms, &failures);
|
do_failures(ms, &failures);
|
||||||
|
|
||||||
dm_table_unplug_all(ms->ti->table);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*-----------------------------------------------------------------
|
/*-----------------------------------------------------------------
|
||||||
|
|
|
@ -55,6 +55,7 @@ struct dm_table {
|
||||||
struct dm_target *targets;
|
struct dm_target *targets;
|
||||||
|
|
||||||
unsigned discards_supported:1;
|
unsigned discards_supported:1;
|
||||||
|
unsigned integrity_supported:1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Indicates the rw permissions for the new logical
|
* Indicates the rw permissions for the new logical
|
||||||
|
@ -859,7 +860,7 @@ int dm_table_alloc_md_mempools(struct dm_table *t)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
t->mempools = dm_alloc_md_mempools(type);
|
t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
|
||||||
if (!t->mempools)
|
if (!t->mempools)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -935,8 +936,10 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
|
||||||
struct dm_dev_internal *dd;
|
struct dm_dev_internal *dd;
|
||||||
|
|
||||||
list_for_each_entry(dd, devices, list)
|
list_for_each_entry(dd, devices, list)
|
||||||
if (bdev_get_integrity(dd->dm_dev.bdev))
|
if (bdev_get_integrity(dd->dm_dev.bdev)) {
|
||||||
|
t->integrity_supported = 1;
|
||||||
return blk_integrity_register(dm_disk(md), NULL);
|
return blk_integrity_register(dm_disk(md), NULL);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1275,29 +1278,6 @@ int dm_table_any_busy_target(struct dm_table *t)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dm_table_unplug_all(struct dm_table *t)
|
|
||||||
{
|
|
||||||
struct dm_dev_internal *dd;
|
|
||||||
struct list_head *devices = dm_table_get_devices(t);
|
|
||||||
struct dm_target_callbacks *cb;
|
|
||||||
|
|
||||||
list_for_each_entry(dd, devices, list) {
|
|
||||||
struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
|
|
||||||
char b[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (likely(q))
|
|
||||||
blk_unplug(q);
|
|
||||||
else
|
|
||||||
DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
|
|
||||||
dm_device_name(t->md),
|
|
||||||
bdevname(dd->dm_dev.bdev, b));
|
|
||||||
}
|
|
||||||
|
|
||||||
list_for_each_entry(cb, &t->target_callbacks, list)
|
|
||||||
if (cb->unplug_fn)
|
|
||||||
cb->unplug_fn(cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
||||||
{
|
{
|
||||||
return t->md;
|
return t->md;
|
||||||
|
@ -1345,4 +1325,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
|
||||||
EXPORT_SYMBOL(dm_table_get_md);
|
EXPORT_SYMBOL(dm_table_get_md);
|
||||||
EXPORT_SYMBOL(dm_table_put);
|
EXPORT_SYMBOL(dm_table_put);
|
||||||
EXPORT_SYMBOL(dm_table_get);
|
EXPORT_SYMBOL(dm_table_get);
|
||||||
EXPORT_SYMBOL(dm_table_unplug_all);
|
|
||||||
|
|
|
@ -477,7 +477,8 @@ static void start_io_acct(struct dm_io *io)
|
||||||
cpu = part_stat_lock();
|
cpu = part_stat_lock();
|
||||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
|
atomic_set(&dm_disk(md)->part0.in_flight[rw],
|
||||||
|
atomic_inc_return(&md->pending[rw]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void end_io_acct(struct dm_io *io)
|
static void end_io_acct(struct dm_io *io)
|
||||||
|
@ -497,8 +498,8 @@ static void end_io_acct(struct dm_io *io)
|
||||||
* After this is decremented the bio must not be touched if it is
|
* After this is decremented the bio must not be touched if it is
|
||||||
* a flush.
|
* a flush.
|
||||||
*/
|
*/
|
||||||
dm_disk(md)->part0.in_flight[rw] = pending =
|
pending = atomic_dec_return(&md->pending[rw]);
|
||||||
atomic_dec_return(&md->pending[rw]);
|
atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
|
||||||
pending += atomic_read(&md->pending[rw^0x1]);
|
pending += atomic_read(&md->pending[rw^0x1]);
|
||||||
|
|
||||||
/* nudge anyone waiting on suspend queue */
|
/* nudge anyone waiting on suspend queue */
|
||||||
|
@ -807,8 +808,6 @@ void dm_requeue_unmapped_request(struct request *clone)
|
||||||
dm_unprep_request(rq);
|
dm_unprep_request(rq);
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
if (elv_queue_empty(q))
|
|
||||||
blk_plug_device(q);
|
|
||||||
blk_requeue_request(q, rq);
|
blk_requeue_request(q, rq);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
|
@ -1613,10 +1612,10 @@ static void dm_request_fn(struct request_queue *q)
|
||||||
* number of in-flight I/Os after the queue is stopped in
|
* number of in-flight I/Os after the queue is stopped in
|
||||||
* dm_suspend().
|
* dm_suspend().
|
||||||
*/
|
*/
|
||||||
while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
|
while (!blk_queue_stopped(q)) {
|
||||||
rq = blk_peek_request(q);
|
rq = blk_peek_request(q);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
goto plug_and_out;
|
goto delay_and_out;
|
||||||
|
|
||||||
/* always use block 0 to find the target for flushes for now */
|
/* always use block 0 to find the target for flushes for now */
|
||||||
pos = 0;
|
pos = 0;
|
||||||
|
@ -1627,7 +1626,7 @@ static void dm_request_fn(struct request_queue *q)
|
||||||
BUG_ON(!dm_target_is_valid(ti));
|
BUG_ON(!dm_target_is_valid(ti));
|
||||||
|
|
||||||
if (ti->type->busy && ti->type->busy(ti))
|
if (ti->type->busy && ti->type->busy(ti))
|
||||||
goto plug_and_out;
|
goto delay_and_out;
|
||||||
|
|
||||||
blk_start_request(rq);
|
blk_start_request(rq);
|
||||||
clone = rq->special;
|
clone = rq->special;
|
||||||
|
@ -1647,11 +1646,8 @@ requeued:
|
||||||
BUG_ON(!irqs_disabled());
|
BUG_ON(!irqs_disabled());
|
||||||
spin_lock(q->queue_lock);
|
spin_lock(q->queue_lock);
|
||||||
|
|
||||||
plug_and_out:
|
delay_and_out:
|
||||||
if (!elv_queue_empty(q))
|
blk_delay_queue(q, HZ / 10);
|
||||||
/* Some requests still remain, retry later */
|
|
||||||
blk_plug_device(q);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
dm_table_put(map);
|
dm_table_put(map);
|
||||||
|
|
||||||
|
@ -1680,20 +1676,6 @@ static int dm_lld_busy(struct request_queue *q)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dm_unplug_all(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct mapped_device *md = q->queuedata;
|
|
||||||
struct dm_table *map = dm_get_live_table(md);
|
|
||||||
|
|
||||||
if (map) {
|
|
||||||
if (dm_request_based(md))
|
|
||||||
generic_unplug_device(q);
|
|
||||||
|
|
||||||
dm_table_unplug_all(map);
|
|
||||||
dm_table_put(map);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int dm_any_congested(void *congested_data, int bdi_bits)
|
static int dm_any_congested(void *congested_data, int bdi_bits)
|
||||||
{
|
{
|
||||||
int r = bdi_bits;
|
int r = bdi_bits;
|
||||||
|
@ -1817,7 +1799,6 @@ static void dm_init_md_queue(struct mapped_device *md)
|
||||||
md->queue->backing_dev_info.congested_data = md;
|
md->queue->backing_dev_info.congested_data = md;
|
||||||
blk_queue_make_request(md->queue, dm_request);
|
blk_queue_make_request(md->queue, dm_request);
|
||||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||||
md->queue->unplug_fn = dm_unplug_all;
|
|
||||||
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
|
||||||
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
|
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
|
||||||
}
|
}
|
||||||
|
@ -2263,8 +2244,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
|
||||||
int r = 0;
|
int r = 0;
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
||||||
|
|
||||||
dm_unplug_all(md->queue);
|
|
||||||
|
|
||||||
add_wait_queue(&md->wait, &wait);
|
add_wait_queue(&md->wait, &wait);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -2539,7 +2518,6 @@ int dm_resume(struct mapped_device *md)
|
||||||
|
|
||||||
clear_bit(DMF_SUSPENDED, &md->flags);
|
clear_bit(DMF_SUSPENDED, &md->flags);
|
||||||
|
|
||||||
dm_table_unplug_all(map);
|
|
||||||
r = 0;
|
r = 0;
|
||||||
out:
|
out:
|
||||||
dm_table_put(map);
|
dm_table_put(map);
|
||||||
|
@ -2643,9 +2621,10 @@ int dm_noflush_suspending(struct dm_target *ti)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
||||||
|
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
|
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
||||||
{
|
{
|
||||||
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
|
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
|
||||||
|
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
|
||||||
|
|
||||||
if (!pools)
|
if (!pools)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -2662,13 +2641,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
|
||||||
if (!pools->tio_pool)
|
if (!pools->tio_pool)
|
||||||
goto free_io_pool_and_out;
|
goto free_io_pool_and_out;
|
||||||
|
|
||||||
pools->bs = (type == DM_TYPE_BIO_BASED) ?
|
pools->bs = bioset_create(pool_size, 0);
|
||||||
bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
|
|
||||||
if (!pools->bs)
|
if (!pools->bs)
|
||||||
goto free_tio_pool_and_out;
|
goto free_tio_pool_and_out;
|
||||||
|
|
||||||
|
if (integrity && bioset_integrity_create(pools->bs, pool_size))
|
||||||
|
goto free_bioset_and_out;
|
||||||
|
|
||||||
return pools;
|
return pools;
|
||||||
|
|
||||||
|
free_bioset_and_out:
|
||||||
|
bioset_free(pools->bs);
|
||||||
|
|
||||||
free_tio_pool_and_out:
|
free_tio_pool_and_out:
|
||||||
mempool_destroy(pools->tio_pool);
|
mempool_destroy(pools->tio_pool);
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ void dm_kcopyd_exit(void);
|
||||||
/*
|
/*
|
||||||
* Mempool operations
|
* Mempool operations
|
||||||
*/
|
*/
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type);
|
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
|
||||||
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
|
||||||
return maxsectors << 9;
|
return maxsectors << 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void linear_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
linear_conf_t *conf;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
conf = rcu_dereference(mddev->private);
|
|
||||||
|
|
||||||
for (i=0; i < mddev->raid_disks; i++) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int linear_congested(void *data, int bits)
|
static int linear_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
|
@ -224,11 +208,9 @@ static int linear_run (mddev_t *mddev)
|
||||||
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
||||||
|
|
||||||
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
|
||||||
mddev->queue->unplug_fn = linear_unplug;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
mddev->queue->backing_dev_info.congested_fn = linear_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
md_integrity_register(mddev);
|
return md_integrity_register(mddev);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_conf(struct rcu_head *head)
|
static void free_conf(struct rcu_head *head)
|
||||||
|
|
|
@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||||
bio->bi_end_io = super_written;
|
bio->bi_end_io = super_written;
|
||||||
|
|
||||||
atomic_inc(&mddev->pending_writes);
|
atomic_inc(&mddev->pending_writes);
|
||||||
submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
|
submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
|
||||||
bio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void md_super_wait(mddev_t *mddev)
|
void md_super_wait(mddev_t *mddev)
|
||||||
|
@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
|
||||||
struct completion event;
|
struct completion event;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
rw |= REQ_SYNC | REQ_UNPLUG;
|
rw |= REQ_SYNC;
|
||||||
|
|
||||||
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
|
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
|
||||||
rdev->meta_bdev : rdev->bdev;
|
rdev->meta_bdev : rdev->bdev;
|
||||||
|
@ -1804,8 +1803,12 @@ int md_integrity_register(mddev_t *mddev)
|
||||||
mdname(mddev));
|
mdname(mddev));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
printk(KERN_NOTICE "md: data integrity on %s enabled\n",
|
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
|
||||||
mdname(mddev));
|
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
|
||||||
|
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
|
||||||
|
mdname(mddev));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(md_integrity_register);
|
EXPORT_SYMBOL(md_integrity_register);
|
||||||
|
@ -4817,7 +4820,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
|
||||||
__md_stop_writes(mddev);
|
__md_stop_writes(mddev);
|
||||||
md_stop(mddev);
|
md_stop(mddev);
|
||||||
mddev->queue->merge_bvec_fn = NULL;
|
mddev->queue->merge_bvec_fn = NULL;
|
||||||
mddev->queue->unplug_fn = NULL;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||||
|
|
||||||
/* tell userspace to handle 'inactive' */
|
/* tell userspace to handle 'inactive' */
|
||||||
|
@ -6692,8 +6694,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
|
||||||
|
|
||||||
void md_unplug(mddev_t *mddev)
|
void md_unplug(mddev_t *mddev)
|
||||||
{
|
{
|
||||||
if (mddev->queue)
|
|
||||||
blk_unplug(mddev->queue);
|
|
||||||
if (mddev->plug)
|
if (mddev->plug)
|
||||||
mddev->plug->unplug_fn(mddev->plug);
|
mddev->plug->unplug_fn(mddev->plug);
|
||||||
}
|
}
|
||||||
|
@ -6876,7 +6876,6 @@ void md_do_sync(mddev_t *mddev)
|
||||||
>= mddev->resync_max - mddev->curr_resync_completed
|
>= mddev->resync_max - mddev->curr_resync_completed
|
||||||
)) {
|
)) {
|
||||||
/* time to update curr_resync_completed */
|
/* time to update curr_resync_completed */
|
||||||
md_unplug(mddev);
|
|
||||||
wait_event(mddev->recovery_wait,
|
wait_event(mddev->recovery_wait,
|
||||||
atomic_read(&mddev->recovery_active) == 0);
|
atomic_read(&mddev->recovery_active) == 0);
|
||||||
mddev->curr_resync_completed = j;
|
mddev->curr_resync_completed = j;
|
||||||
|
@ -6952,7 +6951,6 @@ void md_do_sync(mddev_t *mddev)
|
||||||
* about not overloading the IO subsystem. (things like an
|
* about not overloading the IO subsystem. (things like an
|
||||||
* e2fsck being done on the RAID array should execute fast)
|
* e2fsck being done on the RAID array should execute fast)
|
||||||
*/
|
*/
|
||||||
md_unplug(mddev);
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
|
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
|
||||||
|
@ -6971,8 +6969,6 @@ void md_do_sync(mddev_t *mddev)
|
||||||
* this also signals 'finished resyncing' to md_stop
|
* this also signals 'finished resyncing' to md_stop
|
||||||
*/
|
*/
|
||||||
out:
|
out:
|
||||||
md_unplug(mddev);
|
|
||||||
|
|
||||||
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
|
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
|
||||||
|
|
||||||
/* tell personality that we are finished */
|
/* tell personality that we are finished */
|
||||||
|
|
|
@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||||
rdev_dec_pending(rdev, conf->mddev);
|
rdev_dec_pending(rdev, conf->mddev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev)
|
|
||||||
{
|
|
||||||
multipath_conf_t *conf = mddev->private;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags)
|
|
||||||
&& atomic_read(&rdev->nr_pending)) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
|
||||||
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void multipath_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
unplug_slaves(q->queuedata);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
||||||
{
|
{
|
||||||
multipath_conf_t *conf = mddev->private;
|
multipath_conf_t *conf = mddev->private;
|
||||||
|
@ -345,7 +315,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
|
||||||
p->rdev = rdev;
|
p->rdev = rdev;
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
md_integrity_register(mddev);
|
err = md_integrity_register(mddev);
|
||||||
}
|
}
|
||||||
abort:
|
abort:
|
||||||
|
|
||||||
|
@ -517,10 +487,12 @@ static int multipath_run (mddev_t *mddev)
|
||||||
*/
|
*/
|
||||||
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
|
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
|
||||||
|
|
||||||
mddev->queue->unplug_fn = multipath_unplug;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
|
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
md_integrity_register(mddev);
|
|
||||||
|
if (md_integrity_register(mddev))
|
||||||
|
goto out_free_conf;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_conf:
|
out_free_conf:
|
||||||
|
|
|
@ -25,21 +25,6 @@
|
||||||
#include "raid0.h"
|
#include "raid0.h"
|
||||||
#include "raid5.h"
|
#include "raid5.h"
|
||||||
|
|
||||||
static void raid0_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
raid0_conf_t *conf = mddev->private;
|
|
||||||
mdk_rdev_t **devlist = conf->devlist;
|
|
||||||
int raid_disks = conf->strip_zone[0].nb_dev;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i=0; i < raid_disks; i++) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
|
|
||||||
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid0_congested(void *data, int bits)
|
static int raid0_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
|
@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||||
mdname(mddev),
|
mdname(mddev),
|
||||||
(unsigned long long)smallest->sectors);
|
(unsigned long long)smallest->sectors);
|
||||||
}
|
}
|
||||||
mddev->queue->unplug_fn = raid0_unplug;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -395,8 +379,7 @@ static int raid0_run(mddev_t *mddev)
|
||||||
|
|
||||||
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
|
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
|
||||||
dump_zones(mddev);
|
dump_zones(mddev);
|
||||||
md_integrity_register(mddev);
|
return md_integrity_register(mddev);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int raid0_stop(mddev_t *mddev)
|
static int raid0_stop(mddev_t *mddev)
|
||||||
|
|
|
@ -52,23 +52,16 @@
|
||||||
#define NR_RAID1_BIOS 256
|
#define NR_RAID1_BIOS 256
|
||||||
|
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev);
|
|
||||||
|
|
||||||
static void allow_barrier(conf_t *conf);
|
static void allow_barrier(conf_t *conf);
|
||||||
static void lower_barrier(conf_t *conf);
|
static void lower_barrier(conf_t *conf);
|
||||||
|
|
||||||
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
||||||
{
|
{
|
||||||
struct pool_info *pi = data;
|
struct pool_info *pi = data;
|
||||||
r1bio_t *r1_bio;
|
|
||||||
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
|
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
|
||||||
|
|
||||||
/* allocate a r1bio with room for raid_disks entries in the bios array */
|
/* allocate a r1bio with room for raid_disks entries in the bios array */
|
||||||
r1_bio = kzalloc(size, gfp_flags);
|
return kzalloc(size, gfp_flags);
|
||||||
if (!r1_bio && pi->mddev)
|
|
||||||
unplug_slaves(pi->mddev);
|
|
||||||
|
|
||||||
return r1_bio;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void r1bio_pool_free(void *r1_bio, void *data)
|
static void r1bio_pool_free(void *r1_bio, void *data)
|
||||||
|
@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
|
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
|
||||||
if (!r1_bio) {
|
if (!r1_bio)
|
||||||
unplug_slaves(pi->mddev);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate bios : 1 for reading, n-1 for writing
|
* Allocate bios : 1 for reading, n-1 for writing
|
||||||
|
@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
||||||
return new_disk;
|
return new_disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev)
|
|
||||||
{
|
|
||||||
conf_t *conf = mddev->private;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i<mddev->raid_disks; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
|
||||||
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void raid1_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
|
|
||||||
unplug_slaves(mddev);
|
|
||||||
md_wakeup_thread(mddev->thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid1_congested(void *data, int bits)
|
static int raid1_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
|
@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int flush_pending_writes(conf_t *conf)
|
static void flush_pending_writes(conf_t *conf)
|
||||||
{
|
{
|
||||||
/* Any writes that have been queued but are awaiting
|
/* Any writes that have been queued but are awaiting
|
||||||
* bitmap updates get flushed here.
|
* bitmap updates get flushed here.
|
||||||
* We return 1 if any requests were actually submitted.
|
|
||||||
*/
|
*/
|
||||||
int rv = 0;
|
|
||||||
|
|
||||||
spin_lock_irq(&conf->device_lock);
|
spin_lock_irq(&conf->device_lock);
|
||||||
|
|
||||||
if (conf->pending_bio_list.head) {
|
if (conf->pending_bio_list.head) {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bio = bio_list_get(&conf->pending_bio_list);
|
bio = bio_list_get(&conf->pending_bio_list);
|
||||||
/* Only take the spinlock to quiet a warning */
|
|
||||||
spin_lock(conf->mddev->queue->queue_lock);
|
|
||||||
blk_remove_plug(conf->mddev->queue);
|
|
||||||
spin_unlock(conf->mddev->queue->queue_lock);
|
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
/* flush any pending bitmap writes to
|
/* flush any pending bitmap writes to
|
||||||
* disk before proceeding w/ I/O */
|
* disk before proceeding w/ I/O */
|
||||||
|
@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
bio = next;
|
bio = next;
|
||||||
}
|
}
|
||||||
rv = 1;
|
|
||||||
} else
|
} else
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
return rv;
|
}
|
||||||
|
|
||||||
|
static void md_kick_device(mddev_t *mddev)
|
||||||
|
{
|
||||||
|
blk_flush_plug(current);
|
||||||
|
md_wakeup_thread(mddev->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Barriers....
|
/* Barriers....
|
||||||
|
@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf)
|
||||||
|
|
||||||
/* Wait until no block IO is waiting */
|
/* Wait until no block IO is waiting */
|
||||||
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
|
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
|
||||||
conf->resync_lock,
|
conf->resync_lock, md_kick_device(conf->mddev));
|
||||||
raid1_unplug(conf->mddev->queue));
|
|
||||||
|
|
||||||
/* block any new IO from starting */
|
/* block any new IO from starting */
|
||||||
conf->barrier++;
|
conf->barrier++;
|
||||||
|
@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf)
|
||||||
/* Now wait for all pending IO to complete */
|
/* Now wait for all pending IO to complete */
|
||||||
wait_event_lock_irq(conf->wait_barrier,
|
wait_event_lock_irq(conf->wait_barrier,
|
||||||
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
||||||
conf->resync_lock,
|
conf->resync_lock, md_kick_device(conf->mddev));
|
||||||
raid1_unplug(conf->mddev->queue));
|
|
||||||
|
|
||||||
spin_unlock_irq(&conf->resync_lock);
|
spin_unlock_irq(&conf->resync_lock);
|
||||||
}
|
}
|
||||||
|
@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf)
|
||||||
conf->nr_waiting++;
|
conf->nr_waiting++;
|
||||||
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
|
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
|
||||||
conf->resync_lock,
|
conf->resync_lock,
|
||||||
raid1_unplug(conf->mddev->queue));
|
md_kick_device(conf->mddev));
|
||||||
conf->nr_waiting--;
|
conf->nr_waiting--;
|
||||||
}
|
}
|
||||||
conf->nr_pending++;
|
conf->nr_pending++;
|
||||||
|
@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf)
|
||||||
conf->nr_pending == conf->nr_queued+1,
|
conf->nr_pending == conf->nr_queued+1,
|
||||||
conf->resync_lock,
|
conf->resync_lock,
|
||||||
({ flush_pending_writes(conf);
|
({ flush_pending_writes(conf);
|
||||||
raid1_unplug(conf->mddev->queue); }));
|
md_kick_device(conf->mddev); }));
|
||||||
spin_unlock_irq(&conf->resync_lock);
|
spin_unlock_irq(&conf->resync_lock);
|
||||||
}
|
}
|
||||||
static void unfreeze_array(conf_t *conf)
|
static void unfreeze_array(conf_t *conf)
|
||||||
|
@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
atomic_inc(&r1_bio->remaining);
|
atomic_inc(&r1_bio->remaining);
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
bio_list_add(&conf->pending_bio_list, mbio);
|
bio_list_add(&conf->pending_bio_list, mbio);
|
||||||
blk_plug_device_unlocked(mddev->queue);
|
|
||||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||||
}
|
}
|
||||||
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
|
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
|
||||||
|
@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
/* In case raid1d snuck in to freeze_array */
|
/* In case raid1d snuck in to freeze_array */
|
||||||
wake_up(&conf->wait_barrier);
|
wake_up(&conf->wait_barrier);
|
||||||
|
|
||||||
if (do_sync)
|
if (do_sync || !bitmap)
|
||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1178,7 +1132,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
|
||||||
p->rdev = rdev;
|
p->rdev = rdev;
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
md_integrity_register(mddev);
|
err = md_integrity_register(mddev);
|
||||||
}
|
}
|
||||||
abort:
|
abort:
|
||||||
|
|
||||||
|
@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
conf_t *conf = mddev->private;
|
conf_t *conf = mddev->private;
|
||||||
struct list_head *head = &conf->retry_list;
|
struct list_head *head = &conf->retry_list;
|
||||||
int unplug=0;
|
|
||||||
mdk_rdev_t *rdev;
|
mdk_rdev_t *rdev;
|
||||||
|
|
||||||
md_check_recovery(mddev);
|
md_check_recovery(mddev);
|
||||||
|
@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
|
||||||
for (;;) {
|
for (;;) {
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
unplug += flush_pending_writes(conf);
|
flush_pending_writes(conf);
|
||||||
|
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
if (list_empty(head)) {
|
if (list_empty(head)) {
|
||||||
|
@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
|
||||||
|
|
||||||
mddev = r1_bio->mddev;
|
mddev = r1_bio->mddev;
|
||||||
conf = mddev->private;
|
conf = mddev->private;
|
||||||
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
|
if (test_bit(R1BIO_IsSync, &r1_bio->state))
|
||||||
sync_request_write(mddev, r1_bio);
|
sync_request_write(mddev, r1_bio);
|
||||||
unplug = 1;
|
else {
|
||||||
} else {
|
|
||||||
int disk;
|
int disk;
|
||||||
|
|
||||||
/* we got a read error. Maybe the drive is bad. Maybe just
|
/* we got a read error. Maybe the drive is bad. Maybe just
|
||||||
|
@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
|
||||||
bio->bi_end_io = raid1_end_read_request;
|
bio->bi_end_io = raid1_end_read_request;
|
||||||
bio->bi_rw = READ | do_sync;
|
bio->bi_rw = READ | do_sync;
|
||||||
bio->bi_private = r1_bio;
|
bio->bi_private = r1_bio;
|
||||||
unplug = 1;
|
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
if (unplug)
|
|
||||||
unplug_slaves(mddev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2066,11 +2015,9 @@ static int run(mddev_t *mddev)
|
||||||
|
|
||||||
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
|
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
|
||||||
|
|
||||||
mddev->queue->unplug_fn = raid1_unplug;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
md_integrity_register(mddev);
|
return md_integrity_register(mddev);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stop(mddev_t *mddev)
|
static int stop(mddev_t *mddev)
|
||||||
|
|
|
@ -57,23 +57,16 @@
|
||||||
*/
|
*/
|
||||||
#define NR_RAID10_BIOS 256
|
#define NR_RAID10_BIOS 256
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev);
|
|
||||||
|
|
||||||
static void allow_barrier(conf_t *conf);
|
static void allow_barrier(conf_t *conf);
|
||||||
static void lower_barrier(conf_t *conf);
|
static void lower_barrier(conf_t *conf);
|
||||||
|
|
||||||
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
||||||
{
|
{
|
||||||
conf_t *conf = data;
|
conf_t *conf = data;
|
||||||
r10bio_t *r10_bio;
|
|
||||||
int size = offsetof(struct r10bio_s, devs[conf->copies]);
|
int size = offsetof(struct r10bio_s, devs[conf->copies]);
|
||||||
|
|
||||||
/* allocate a r10bio with room for raid_disks entries in the bios array */
|
/* allocate a r10bio with room for raid_disks entries in the bios array */
|
||||||
r10_bio = kzalloc(size, gfp_flags);
|
return kzalloc(size, gfp_flags);
|
||||||
if (!r10_bio && conf->mddev)
|
|
||||||
unplug_slaves(conf->mddev);
|
|
||||||
|
|
||||||
return r10_bio;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void r10bio_pool_free(void *r10_bio, void *data)
|
static void r10bio_pool_free(void *r10_bio, void *data)
|
||||||
|
@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
|
||||||
int nalloc;
|
int nalloc;
|
||||||
|
|
||||||
r10_bio = r10bio_pool_alloc(gfp_flags, conf);
|
r10_bio = r10bio_pool_alloc(gfp_flags, conf);
|
||||||
if (!r10_bio) {
|
if (!r10_bio)
|
||||||
unplug_slaves(conf->mddev);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
|
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
|
||||||
nalloc = conf->copies; /* resync */
|
nalloc = conf->copies; /* resync */
|
||||||
|
@ -597,37 +588,6 @@ rb_out:
|
||||||
return disk;
|
return disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev)
|
|
||||||
{
|
|
||||||
conf_t *conf = mddev->private;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i=0; i < conf->raid_disks; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
|
||||||
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void raid10_unplug(struct request_queue *q)
|
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
|
|
||||||
unplug_slaves(q->queuedata);
|
|
||||||
md_wakeup_thread(mddev->thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int raid10_congested(void *data, int bits)
|
static int raid10_congested(void *data, int bits)
|
||||||
{
|
{
|
||||||
mddev_t *mddev = data;
|
mddev_t *mddev = data;
|
||||||
|
@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flush_pending_writes(conf_t *conf)
|
static void flush_pending_writes(conf_t *conf)
|
||||||
{
|
{
|
||||||
/* Any writes that have been queued but are awaiting
|
/* Any writes that have been queued but are awaiting
|
||||||
* bitmap updates get flushed here.
|
* bitmap updates get flushed here.
|
||||||
* We return 1 if any requests were actually submitted.
|
|
||||||
*/
|
*/
|
||||||
int rv = 0;
|
|
||||||
|
|
||||||
spin_lock_irq(&conf->device_lock);
|
spin_lock_irq(&conf->device_lock);
|
||||||
|
|
||||||
if (conf->pending_bio_list.head) {
|
if (conf->pending_bio_list.head) {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bio = bio_list_get(&conf->pending_bio_list);
|
bio = bio_list_get(&conf->pending_bio_list);
|
||||||
/* Spinlock only taken to quiet a warning */
|
|
||||||
spin_lock(conf->mddev->queue->queue_lock);
|
|
||||||
blk_remove_plug(conf->mddev->queue);
|
|
||||||
spin_unlock(conf->mddev->queue->queue_lock);
|
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
/* flush any pending bitmap writes to disk
|
/* flush any pending bitmap writes to disk
|
||||||
* before proceeding w/ I/O */
|
* before proceeding w/ I/O */
|
||||||
|
@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
bio = next;
|
bio = next;
|
||||||
}
|
}
|
||||||
rv = 1;
|
|
||||||
} else
|
} else
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
return rv;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void md_kick_device(mddev_t *mddev)
|
||||||
|
{
|
||||||
|
blk_flush_plug(current);
|
||||||
|
md_wakeup_thread(mddev->thread);
|
||||||
|
}
|
||||||
|
|
||||||
/* Barriers....
|
/* Barriers....
|
||||||
* Sometimes we need to suspend IO while we do something else,
|
* Sometimes we need to suspend IO while we do something else,
|
||||||
* either some resync/recovery, or reconfigure the array.
|
* either some resync/recovery, or reconfigure the array.
|
||||||
|
@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
|
||||||
|
|
||||||
/* Wait until no block IO is waiting (unless 'force') */
|
/* Wait until no block IO is waiting (unless 'force') */
|
||||||
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
|
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
|
||||||
conf->resync_lock,
|
conf->resync_lock, md_kick_device(conf->mddev));
|
||||||
raid10_unplug(conf->mddev->queue));
|
|
||||||
|
|
||||||
/* block any new IO from starting */
|
/* block any new IO from starting */
|
||||||
conf->barrier++;
|
conf->barrier++;
|
||||||
|
@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
|
||||||
/* No wait for all pending IO to complete */
|
/* No wait for all pending IO to complete */
|
||||||
wait_event_lock_irq(conf->wait_barrier,
|
wait_event_lock_irq(conf->wait_barrier,
|
||||||
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
||||||
conf->resync_lock,
|
conf->resync_lock, md_kick_device(conf->mddev));
|
||||||
raid10_unplug(conf->mddev->queue));
|
|
||||||
|
|
||||||
spin_unlock_irq(&conf->resync_lock);
|
spin_unlock_irq(&conf->resync_lock);
|
||||||
}
|
}
|
||||||
|
@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf)
|
||||||
conf->nr_waiting++;
|
conf->nr_waiting++;
|
||||||
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
|
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
|
||||||
conf->resync_lock,
|
conf->resync_lock,
|
||||||
raid10_unplug(conf->mddev->queue));
|
md_kick_device(conf->mddev));
|
||||||
conf->nr_waiting--;
|
conf->nr_waiting--;
|
||||||
}
|
}
|
||||||
conf->nr_pending++;
|
conf->nr_pending++;
|
||||||
|
@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf)
|
||||||
conf->nr_pending == conf->nr_queued+1,
|
conf->nr_pending == conf->nr_queued+1,
|
||||||
conf->resync_lock,
|
conf->resync_lock,
|
||||||
({ flush_pending_writes(conf);
|
({ flush_pending_writes(conf);
|
||||||
raid10_unplug(conf->mddev->queue); }));
|
md_kick_device(conf->mddev); }));
|
||||||
spin_unlock_irq(&conf->resync_lock);
|
spin_unlock_irq(&conf->resync_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
atomic_inc(&r10_bio->remaining);
|
atomic_inc(&r10_bio->remaining);
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
bio_list_add(&conf->pending_bio_list, mbio);
|
bio_list_add(&conf->pending_bio_list, mbio);
|
||||||
blk_plug_device_unlocked(mddev->queue);
|
|
||||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
/* In case raid10d snuck in to freeze_array */
|
/* In case raid10d snuck in to freeze_array */
|
||||||
wake_up(&conf->wait_barrier);
|
wake_up(&conf->wait_barrier);
|
||||||
|
|
||||||
if (do_sync)
|
if (do_sync || !mddev->bitmap)
|
||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1233,7 +1188,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
|
||||||
p->rdev = rdev;
|
p->rdev = rdev;
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
md_integrity_register(mddev);
|
err = md_integrity_register(mddev);
|
||||||
}
|
}
|
||||||
abort:
|
abort:
|
||||||
|
|
||||||
|
@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
conf_t *conf = mddev->private;
|
conf_t *conf = mddev->private;
|
||||||
struct list_head *head = &conf->retry_list;
|
struct list_head *head = &conf->retry_list;
|
||||||
int unplug=0;
|
|
||||||
mdk_rdev_t *rdev;
|
mdk_rdev_t *rdev;
|
||||||
|
|
||||||
md_check_recovery(mddev);
|
md_check_recovery(mddev);
|
||||||
|
@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
|
||||||
for (;;) {
|
for (;;) {
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
unplug += flush_pending_writes(conf);
|
flush_pending_writes(conf);
|
||||||
|
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
if (list_empty(head)) {
|
if (list_empty(head)) {
|
||||||
|
@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
|
||||||
|
|
||||||
mddev = r10_bio->mddev;
|
mddev = r10_bio->mddev;
|
||||||
conf = mddev->private;
|
conf = mddev->private;
|
||||||
if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
|
if (test_bit(R10BIO_IsSync, &r10_bio->state))
|
||||||
sync_request_write(mddev, r10_bio);
|
sync_request_write(mddev, r10_bio);
|
||||||
unplug = 1;
|
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
|
||||||
} else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
|
|
||||||
recovery_request_write(mddev, r10_bio);
|
recovery_request_write(mddev, r10_bio);
|
||||||
unplug = 1;
|
else {
|
||||||
} else {
|
|
||||||
int mirror;
|
int mirror;
|
||||||
/* we got a read error. Maybe the drive is bad. Maybe just
|
/* we got a read error. Maybe the drive is bad. Maybe just
|
||||||
* the block and we can fix it.
|
* the block and we can fix it.
|
||||||
|
@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
|
||||||
bio->bi_rw = READ | do_sync;
|
bio->bi_rw = READ | do_sync;
|
||||||
bio->bi_private = r10_bio;
|
bio->bi_private = r10_bio;
|
||||||
bio->bi_end_io = raid10_end_read_request;
|
bio->bi_end_io = raid10_end_read_request;
|
||||||
unplug = 1;
|
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
if (unplug)
|
|
||||||
unplug_slaves(mddev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev)
|
||||||
md_set_array_sectors(mddev, size);
|
md_set_array_sectors(mddev, size);
|
||||||
mddev->resync_max_sectors = size;
|
mddev->resync_max_sectors = size;
|
||||||
|
|
||||||
mddev->queue->unplug_fn = raid10_unplug;
|
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
|
|
||||||
|
@ -2395,7 +2343,10 @@ static int run(mddev_t *mddev)
|
||||||
|
|
||||||
if (conf->near_copies < conf->raid_disks)
|
if (conf->near_copies < conf->raid_disks)
|
||||||
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
|
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
|
||||||
md_integrity_register(mddev);
|
|
||||||
|
if (md_integrity_register(mddev))
|
||||||
|
goto out_free_conf;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_conf:
|
out_free_conf:
|
||||||
|
|
|
@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev);
|
|
||||||
|
|
||||||
static struct stripe_head *
|
static struct stripe_head *
|
||||||
get_active_stripe(raid5_conf_t *conf, sector_t sector,
|
get_active_stripe(raid5_conf_t *conf, sector_t sector,
|
||||||
int previous, int noblock, int noquiesce)
|
int previous, int noblock, int noquiesce)
|
||||||
|
@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
|
||||||
< (conf->max_nr_stripes *3/4)
|
< (conf->max_nr_stripes *3/4)
|
||||||
|| !conf->inactive_blocked),
|
|| !conf->inactive_blocked),
|
||||||
conf->device_lock,
|
conf->device_lock,
|
||||||
md_raid5_unplug_device(conf)
|
md_raid5_kick_device(conf));
|
||||||
);
|
|
||||||
conf->inactive_blocked = 0;
|
conf->inactive_blocked = 0;
|
||||||
} else
|
} else
|
||||||
init_stripe(sh, sector, previous);
|
init_stripe(sh, sector, previous);
|
||||||
|
@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
|
||||||
wait_event_lock_irq(conf->wait_for_stripe,
|
wait_event_lock_irq(conf->wait_for_stripe,
|
||||||
!list_empty(&conf->inactive_list),
|
!list_empty(&conf->inactive_list),
|
||||||
conf->device_lock,
|
conf->device_lock,
|
||||||
unplug_slaves(conf->mddev)
|
blk_flush_plug(current));
|
||||||
);
|
|
||||||
osh = get_free_stripe(conf);
|
osh = get_free_stripe(conf);
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
atomic_set(&nsh->count, 1);
|
atomic_set(&nsh->count, 1);
|
||||||
|
@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unplug_slaves(mddev_t *mddev)
|
void md_raid5_kick_device(raid5_conf_t *conf)
|
||||||
{
|
{
|
||||||
raid5_conf_t *conf = mddev->private;
|
blk_flush_plug(current);
|
||||||
int i;
|
raid5_activate_delayed(conf);
|
||||||
int devs = max(conf->raid_disks, conf->previous_raid_disks);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for (i = 0; i < devs; i++) {
|
|
||||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
|
||||||
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
|
|
||||||
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
|
||||||
|
|
||||||
atomic_inc(&rdev->nr_pending);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
blk_unplug(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void md_raid5_unplug_device(raid5_conf_t *conf)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
|
||||||
|
|
||||||
if (plugger_remove_plug(&conf->plug)) {
|
|
||||||
conf->seq_flush++;
|
|
||||||
raid5_activate_delayed(conf);
|
|
||||||
}
|
|
||||||
md_wakeup_thread(conf->mddev->thread);
|
md_wakeup_thread(conf->mddev->thread);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
||||||
|
|
||||||
unplug_slaves(conf->mddev);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
|
EXPORT_SYMBOL_GPL(md_raid5_kick_device);
|
||||||
|
|
||||||
static void raid5_unplug(struct plug_handle *plug)
|
static void raid5_unplug(struct plug_handle *plug)
|
||||||
{
|
{
|
||||||
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
|
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
|
||||||
md_raid5_unplug_device(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void raid5_unplug_queue(struct request_queue *q)
|
md_raid5_kick_device(conf);
|
||||||
{
|
|
||||||
mddev_t *mddev = q->queuedata;
|
|
||||||
md_raid5_unplug_device(mddev->private);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int md_raid5_congested(mddev_t *mddev, int bits)
|
int md_raid5_congested(mddev_t *mddev, int bits)
|
||||||
|
@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
|
||||||
* add failed due to overlap. Flush everything
|
* add failed due to overlap. Flush everything
|
||||||
* and wait a while
|
* and wait a while
|
||||||
*/
|
*/
|
||||||
md_raid5_unplug_device(conf);
|
md_raid5_kick_device(conf);
|
||||||
release_stripe(sh);
|
release_stripe(sh);
|
||||||
schedule();
|
schedule();
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
|
||||||
|
|
||||||
if (sector_nr >= max_sector) {
|
if (sector_nr >= max_sector) {
|
||||||
/* just being told to finish up .. nothing much to do */
|
/* just being told to finish up .. nothing much to do */
|
||||||
unplug_slaves(mddev);
|
|
||||||
|
|
||||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
||||||
end_reshape(conf);
|
end_reshape(conf);
|
||||||
|
@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
|
|
||||||
async_tx_issue_pending_all();
|
async_tx_issue_pending_all();
|
||||||
unplug_slaves(mddev);
|
|
||||||
|
|
||||||
pr_debug("--- raid5d inactive\n");
|
pr_debug("--- raid5d inactive\n");
|
||||||
}
|
}
|
||||||
|
@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev)
|
||||||
|
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
||||||
mddev->queue->unplug_fn = raid5_unplug_queue;
|
mddev->queue->queue_lock = &conf->device_lock;
|
||||||
|
|
||||||
chunk_size = mddev->chunk_sectors << 9;
|
chunk_size = mddev->chunk_sectors << 9;
|
||||||
blk_queue_io_min(mddev->queue, chunk_size);
|
blk_queue_io_min(mddev->queue, chunk_size);
|
||||||
|
|
|
@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int md_raid5_congested(mddev_t *mddev, int bits);
|
extern int md_raid5_congested(mddev_t *mddev, int bits);
|
||||||
extern void md_raid5_unplug_device(raid5_conf_t *conf);
|
extern void md_raid5_kick_device(raid5_conf_t *conf);
|
||||||
extern int raid5_set_cache_size(mddev_t *mddev, int size);
|
extern int raid5_set_cache_size(mddev_t *mddev, int size);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -695,20 +695,22 @@ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i2o_block_media_changed - Have we seen a media change?
|
* i2o_block_check_events - Have we seen a media change?
|
||||||
* @disk: gendisk which should be verified
|
* @disk: gendisk which should be verified
|
||||||
|
* @clearing: events being cleared
|
||||||
*
|
*
|
||||||
* Verifies if the media has changed.
|
* Verifies if the media has changed.
|
||||||
*
|
*
|
||||||
* Returns 1 if the media was changed or 0 otherwise.
|
* Returns 1 if the media was changed or 0 otherwise.
|
||||||
*/
|
*/
|
||||||
static int i2o_block_media_changed(struct gendisk *disk)
|
static unsigned int i2o_block_check_events(struct gendisk *disk,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct i2o_block_device *p = disk->private_data;
|
struct i2o_block_device *p = disk->private_data;
|
||||||
|
|
||||||
if (p->media_change_flag) {
|
if (p->media_change_flag) {
|
||||||
p->media_change_flag = 0;
|
p->media_change_flag = 0;
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -895,11 +897,7 @@ static void i2o_block_request_fn(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct request *req;
|
struct request *req;
|
||||||
|
|
||||||
while (!blk_queue_plugged(q)) {
|
while ((req = blk_peek_request(q)) != NULL) {
|
||||||
req = blk_peek_request(q);
|
|
||||||
if (!req)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (req->cmd_type == REQ_TYPE_FS) {
|
if (req->cmd_type == REQ_TYPE_FS) {
|
||||||
struct i2o_block_delayed_request *dreq;
|
struct i2o_block_delayed_request *dreq;
|
||||||
struct i2o_block_request *ireq = req->special;
|
struct i2o_block_request *ireq = req->special;
|
||||||
|
@ -950,7 +948,7 @@ static const struct block_device_operations i2o_block_fops = {
|
||||||
.ioctl = i2o_block_ioctl,
|
.ioctl = i2o_block_ioctl,
|
||||||
.compat_ioctl = i2o_block_ioctl,
|
.compat_ioctl = i2o_block_ioctl,
|
||||||
.getgeo = i2o_block_getgeo,
|
.getgeo = i2o_block_getgeo,
|
||||||
.media_changed = i2o_block_media_changed
|
.check_events = i2o_block_check_events,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1002,6 +1000,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
|
||||||
gd->major = I2O_MAJOR;
|
gd->major = I2O_MAJOR;
|
||||||
gd->queue = queue;
|
gd->queue = queue;
|
||||||
gd->fops = &i2o_block_fops;
|
gd->fops = &i2o_block_fops;
|
||||||
|
gd->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
gd->private_data = dev;
|
gd->private_data = dev;
|
||||||
|
|
||||||
dev->gd = gd;
|
dev->gd = gd;
|
||||||
|
|
|
@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!blk_queue_plugged(q))
|
req = blk_fetch_request(q);
|
||||||
req = blk_fetch_request(q);
|
|
||||||
mq->req = req;
|
mq->req = req;
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
|
|
@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* Now we try to fetch requests from the request queue */
|
/* Now we try to fetch requests from the request queue */
|
||||||
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
|
while ((req = blk_peek_request(queue))) {
|
||||||
if (basedev->features & DASD_FEATURE_READONLY &&
|
if (basedev->features & DASD_FEATURE_READONLY &&
|
||||||
rq_data_dir(req) == WRITE) {
|
rq_data_dir(req) == WRITE) {
|
||||||
DBF_DEV_EVENT(DBF_ERR, basedev,
|
DBF_DEV_EVENT(DBF_ERR, basedev,
|
||||||
|
|
|
@ -48,14 +48,14 @@
|
||||||
static DEFINE_MUTEX(tape_block_mutex);
|
static DEFINE_MUTEX(tape_block_mutex);
|
||||||
static int tapeblock_open(struct block_device *, fmode_t);
|
static int tapeblock_open(struct block_device *, fmode_t);
|
||||||
static int tapeblock_release(struct gendisk *, fmode_t);
|
static int tapeblock_release(struct gendisk *, fmode_t);
|
||||||
static int tapeblock_medium_changed(struct gendisk *);
|
static unsigned int tapeblock_check_events(struct gendisk *, unsigned int);
|
||||||
static int tapeblock_revalidate_disk(struct gendisk *);
|
static int tapeblock_revalidate_disk(struct gendisk *);
|
||||||
|
|
||||||
static const struct block_device_operations tapeblock_fops = {
|
static const struct block_device_operations tapeblock_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = tapeblock_open,
|
.open = tapeblock_open,
|
||||||
.release = tapeblock_release,
|
.release = tapeblock_release,
|
||||||
.media_changed = tapeblock_medium_changed,
|
.check_events = tapeblock_check_events,
|
||||||
.revalidate_disk = tapeblock_revalidate_disk,
|
.revalidate_disk = tapeblock_revalidate_disk,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
|
||||||
|
|
||||||
spin_lock_irq(&device->blk_data.request_queue_lock);
|
spin_lock_irq(&device->blk_data.request_queue_lock);
|
||||||
while (
|
while (
|
||||||
!blk_queue_plugged(queue) &&
|
|
||||||
blk_peek_request(queue) &&
|
blk_peek_request(queue) &&
|
||||||
nr_queued < TAPEBLOCK_MIN_REQUEUE
|
nr_queued < TAPEBLOCK_MIN_REQUEUE
|
||||||
) {
|
) {
|
||||||
|
@ -237,6 +236,7 @@ tapeblock_setup_device(struct tape_device * device)
|
||||||
disk->major = tapeblock_major;
|
disk->major = tapeblock_major;
|
||||||
disk->first_minor = device->first_minor;
|
disk->first_minor = device->first_minor;
|
||||||
disk->fops = &tapeblock_fops;
|
disk->fops = &tapeblock_fops;
|
||||||
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
disk->private_data = tape_get_device(device);
|
disk->private_data = tape_get_device(device);
|
||||||
disk->queue = blkdat->request_queue;
|
disk->queue = blkdat->request_queue;
|
||||||
set_capacity(disk, 0);
|
set_capacity(disk, 0);
|
||||||
|
@ -340,8 +340,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static unsigned int
|
||||||
tapeblock_medium_changed(struct gendisk *disk)
|
tapeblock_check_events(struct gendisk *disk, unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct tape_device *device;
|
struct tape_device *device;
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ tapeblock_medium_changed(struct gendisk *disk)
|
||||||
DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
|
DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
|
||||||
device, device->blk_data.medium_changed);
|
device, device->blk_data.medium_changed);
|
||||||
|
|
||||||
return device->blk_data.medium_changed;
|
return device->blk_data.medium_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
|
||||||
|
|
||||||
struct kmem_cache *scsi_sdb_cache;
|
struct kmem_cache *scsi_sdb_cache;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
|
||||||
|
* not change behaviour from the previous unplug mechanism, experimentation
|
||||||
|
* may prove this needs changing.
|
||||||
|
*/
|
||||||
|
#define SCSI_QUEUE_DELAY 3
|
||||||
|
|
||||||
static void scsi_run_queue(struct request_queue *q);
|
static void scsi_run_queue(struct request_queue *q);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
||||||
/*
|
/*
|
||||||
* Requeue this command. It will go before all other commands
|
* Requeue this command. It will go before all other commands
|
||||||
* that are already in the queue.
|
* that are already in the queue.
|
||||||
*
|
*/
|
||||||
* NOTE: there is magic here about the way the queue is plugged if
|
|
||||||
* we have no outstanding commands.
|
|
||||||
*
|
|
||||||
* Although we *don't* plug the queue, we call the request
|
|
||||||
* function. The SCSI request function detects the blocked condition
|
|
||||||
* and plugs the queue appropriately.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
blk_requeue_request(q, cmd->request);
|
blk_requeue_request(q, cmd->request);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
@ -1226,11 +1226,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
|
||||||
case BLKPREP_DEFER:
|
case BLKPREP_DEFER:
|
||||||
/*
|
/*
|
||||||
* If we defer, the blk_peek_request() returns NULL, but the
|
* If we defer, the blk_peek_request() returns NULL, but the
|
||||||
* queue must be restarted, so we plug here if no returning
|
* queue must be restarted, so we schedule a callback to happen
|
||||||
* command will automatically do that.
|
* shortly.
|
||||||
*/
|
*/
|
||||||
if (sdev->device_busy == 0)
|
if (sdev->device_busy == 0)
|
||||||
blk_plug_device(q);
|
blk_delay_queue(q, SCSI_QUEUE_DELAY);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
req->cmd_flags |= REQ_DONTPREP;
|
req->cmd_flags |= REQ_DONTPREP;
|
||||||
|
@ -1269,7 +1269,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
|
||||||
sdev_printk(KERN_INFO, sdev,
|
sdev_printk(KERN_INFO, sdev,
|
||||||
"unblocking device at zero depth\n"));
|
"unblocking device at zero depth\n"));
|
||||||
} else {
|
} else {
|
||||||
blk_plug_device(q);
|
blk_delay_queue(q, SCSI_QUEUE_DELAY);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1499,7 +1499,7 @@ static void scsi_request_fn(struct request_queue *q)
|
||||||
* the host is no longer able to accept any more requests.
|
* the host is no longer able to accept any more requests.
|
||||||
*/
|
*/
|
||||||
shost = sdev->host;
|
shost = sdev->host;
|
||||||
while (!blk_queue_plugged(q)) {
|
for (;;) {
|
||||||
int rtn;
|
int rtn;
|
||||||
/*
|
/*
|
||||||
* get next queueable request. We do this early to make sure
|
* get next queueable request. We do this early to make sure
|
||||||
|
@ -1578,15 +1578,8 @@ static void scsi_request_fn(struct request_queue *q)
|
||||||
*/
|
*/
|
||||||
rtn = scsi_dispatch_cmd(cmd);
|
rtn = scsi_dispatch_cmd(cmd);
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if(rtn) {
|
if (rtn)
|
||||||
/* we're refusing the command; because of
|
goto out_delay;
|
||||||
* the way locks get dropped, we need to
|
|
||||||
* check here if plugging is required */
|
|
||||||
if(sdev->device_busy == 0)
|
|
||||||
blk_plug_device(q);
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1605,9 +1598,10 @@ static void scsi_request_fn(struct request_queue *q)
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
blk_requeue_request(q, req);
|
blk_requeue_request(q, req);
|
||||||
sdev->device_busy--;
|
sdev->device_busy--;
|
||||||
if(sdev->device_busy == 0)
|
out_delay:
|
||||||
blk_plug_device(q);
|
if (sdev->device_busy == 0)
|
||||||
out:
|
blk_delay_queue(q, SCSI_QUEUE_DELAY);
|
||||||
|
out:
|
||||||
/* must be careful here...if we trigger the ->remove() function
|
/* must be careful here...if we trigger the ->remove() function
|
||||||
* we cannot be holding the q lock */
|
* we cannot be holding the q lock */
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
|
@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
|
||||||
if (!get_device(dev))
|
if (!get_device(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (!blk_queue_plugged(q)) {
|
while (1) {
|
||||||
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
|
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
|
||||||
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
|
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
|
||||||
int ret;
|
int ret;
|
||||||
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
|
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
|
||||||
|
|
||||||
while (!blk_queue_plugged(q)) {
|
while ((req = blk_fetch_request(q)) != NULL) {
|
||||||
req = blk_fetch_request(q);
|
|
||||||
if (!req)
|
|
||||||
break;
|
|
||||||
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
handler = to_sas_internal(shost->transportt)->f->smp_handler;
|
handler = to_sas_internal(shost->transportt)->f->smp_handler;
|
||||||
|
|
|
@ -124,7 +124,8 @@ static void blkvsc_shutdown(struct device *device);
|
||||||
|
|
||||||
static int blkvsc_open(struct block_device *bdev, fmode_t mode);
|
static int blkvsc_open(struct block_device *bdev, fmode_t mode);
|
||||||
static int blkvsc_release(struct gendisk *disk, fmode_t mode);
|
static int blkvsc_release(struct gendisk *disk, fmode_t mode);
|
||||||
static int blkvsc_media_changed(struct gendisk *gd);
|
static unsigned int blkvsc_check_events(struct gendisk *gd,
|
||||||
|
unsigned int clearing);
|
||||||
static int blkvsc_revalidate_disk(struct gendisk *gd);
|
static int blkvsc_revalidate_disk(struct gendisk *gd);
|
||||||
static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
|
static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
|
||||||
static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
|
static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
|
||||||
|
@ -155,7 +156,7 @@ static const struct block_device_operations block_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = blkvsc_open,
|
.open = blkvsc_open,
|
||||||
.release = blkvsc_release,
|
.release = blkvsc_release,
|
||||||
.media_changed = blkvsc_media_changed,
|
.check_events = blkvsc_check_events,
|
||||||
.revalidate_disk = blkvsc_revalidate_disk,
|
.revalidate_disk = blkvsc_revalidate_disk,
|
||||||
.getgeo = blkvsc_getgeo,
|
.getgeo = blkvsc_getgeo,
|
||||||
.ioctl = blkvsc_ioctl,
|
.ioctl = blkvsc_ioctl,
|
||||||
|
@ -357,6 +358,7 @@ static int blkvsc_probe(struct device *device)
|
||||||
else
|
else
|
||||||
blkdev->gd->first_minor = 0;
|
blkdev->gd->first_minor = 0;
|
||||||
blkdev->gd->fops = &block_ops;
|
blkdev->gd->fops = &block_ops;
|
||||||
|
blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
blkdev->gd->private_data = blkdev;
|
blkdev->gd->private_data = blkdev;
|
||||||
blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
|
blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
|
||||||
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
|
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
|
||||||
|
@ -1337,10 +1339,11 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blkvsc_media_changed(struct gendisk *gd)
|
static unsigned int blkvsc_check_events(struct gendisk *gd,
|
||||||
|
unsigned int clearing)
|
||||||
{
|
{
|
||||||
DPRINT_DBG(BLKVSC_DRV, "- enter\n");
|
DPRINT_DBG(BLKVSC_DRV, "- enter\n");
|
||||||
return 1;
|
return DISK_EVENT_MEDIA_CHANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blkvsc_revalidate_disk(struct gendisk *gd)
|
static int blkvsc_revalidate_disk(struct gendisk *gd)
|
||||||
|
|
|
@ -381,10 +381,10 @@ static int cyasblkdev_blk_ioctl(
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Media_changed block_device opp
|
/* check_events block_device opp
|
||||||
* this one is called by kernel to confirm if the media really changed
|
* this one is called by kernel to confirm if the media really changed
|
||||||
* as we indicated by issuing check_disk_change() call */
|
* as we indicated by issuing check_disk_change() call */
|
||||||
int cyasblkdev_media_changed(struct gendisk *gd)
|
unsigned int cyasblkdev_check_events(struct gendisk *gd, unsigned int clearing)
|
||||||
{
|
{
|
||||||
struct cyasblkdev_blk_data *bd;
|
struct cyasblkdev_blk_data *bd;
|
||||||
|
|
||||||
|
@ -402,7 +402,7 @@ int cyasblkdev_media_changed(struct gendisk *gd)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return media change state "1" yes, 0 no */
|
/* return media change state - DISK_EVENT_MEDIA_CHANGE yes, 0 no */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,7 +432,7 @@ static struct block_device_operations cyasblkdev_bdops = {
|
||||||
.ioctl = cyasblkdev_blk_ioctl,
|
.ioctl = cyasblkdev_blk_ioctl,
|
||||||
/* .getgeo = cyasblkdev_blk_getgeo, */
|
/* .getgeo = cyasblkdev_blk_getgeo, */
|
||||||
/* added to support media removal( real and simulated) media */
|
/* added to support media removal( real and simulated) media */
|
||||||
.media_changed = cyasblkdev_media_changed,
|
.check_events = cyasblkdev_check_events,
|
||||||
/* added to support media removal( real and simulated) media */
|
/* added to support media removal( real and simulated) media */
|
||||||
.revalidate_disk = cyasblkdev_revalidate_disk,
|
.revalidate_disk = cyasblkdev_revalidate_disk,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
@ -1090,6 +1090,7 @@ static int cyasblkdev_add_disks(int bus_num,
|
||||||
bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT;
|
bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT;
|
||||||
bd->user_disk_0->minors = 8;
|
bd->user_disk_0->minors = 8;
|
||||||
bd->user_disk_0->fops = &cyasblkdev_bdops;
|
bd->user_disk_0->fops = &cyasblkdev_bdops;
|
||||||
|
bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
bd->user_disk_0->private_data = bd;
|
bd->user_disk_0->private_data = bd;
|
||||||
bd->user_disk_0->queue = bd->queue.queue;
|
bd->user_disk_0->queue = bd->queue.queue;
|
||||||
bd->dbgprn_flags = DBGPRN_RD_RQ;
|
bd->dbgprn_flags = DBGPRN_RD_RQ;
|
||||||
|
@ -1190,6 +1191,7 @@ static int cyasblkdev_add_disks(int bus_num,
|
||||||
bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
|
bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
|
||||||
bd->user_disk_1->minors = 8;
|
bd->user_disk_1->minors = 8;
|
||||||
bd->user_disk_1->fops = &cyasblkdev_bdops;
|
bd->user_disk_1->fops = &cyasblkdev_bdops;
|
||||||
|
bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
bd->user_disk_1->private_data = bd;
|
bd->user_disk_1->private_data = bd;
|
||||||
bd->user_disk_1->queue = bd->queue.queue;
|
bd->user_disk_1->queue = bd->queue.queue;
|
||||||
bd->dbgprn_flags = DBGPRN_RD_RQ;
|
bd->dbgprn_flags = DBGPRN_RD_RQ;
|
||||||
|
@ -1278,6 +1280,7 @@ static int cyasblkdev_add_disks(int bus_num,
|
||||||
(devidx + 2) << CYASBLKDEV_SHIFT;
|
(devidx + 2) << CYASBLKDEV_SHIFT;
|
||||||
bd->system_disk->minors = 8;
|
bd->system_disk->minors = 8;
|
||||||
bd->system_disk->fops = &cyasblkdev_bdops;
|
bd->system_disk->fops = &cyasblkdev_bdops;
|
||||||
|
bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
bd->system_disk->private_data = bd;
|
bd->system_disk->private_data = bd;
|
||||||
bd->system_disk->queue = bd->queue.queue;
|
bd->system_disk->queue = bd->queue.queue;
|
||||||
/* don't search for vfat
|
/* don't search for vfat
|
||||||
|
|
|
@ -391,9 +391,8 @@ static int iblock_do_task(struct se_task *task)
|
||||||
{
|
{
|
||||||
struct se_device *dev = task->task_se_cmd->se_dev;
|
struct se_device *dev = task->task_se_cmd->se_dev;
|
||||||
struct iblock_req *req = IBLOCK_REQ(task);
|
struct iblock_req *req = IBLOCK_REQ(task);
|
||||||
struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
|
|
||||||
struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
|
|
||||||
struct bio *bio = req->ib_bio, *nbio = NULL;
|
struct bio *bio = req->ib_bio, *nbio = NULL;
|
||||||
|
struct blk_plug plug;
|
||||||
int rw;
|
int rw;
|
||||||
|
|
||||||
if (task->task_data_direction == DMA_TO_DEVICE) {
|
if (task->task_data_direction == DMA_TO_DEVICE) {
|
||||||
|
@ -411,6 +410,7 @@ static int iblock_do_task(struct se_task *task)
|
||||||
rw = READ;
|
rw = READ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_start_plug(&plug);
|
||||||
while (bio) {
|
while (bio) {
|
||||||
nbio = bio->bi_next;
|
nbio = bio->bi_next;
|
||||||
bio->bi_next = NULL;
|
bio->bi_next = NULL;
|
||||||
|
@ -420,9 +420,8 @@ static int iblock_do_task(struct se_task *task)
|
||||||
submit_bio(rw, bio);
|
submit_bio(rw, bio);
|
||||||
bio = nbio;
|
bio = nbio;
|
||||||
}
|
}
|
||||||
|
blk_finish_plug(&plug);
|
||||||
|
|
||||||
if (q->unplug_fn)
|
|
||||||
q->unplug_fn(q);
|
|
||||||
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
|
||||||
static const struct address_space_operations adfs_aops = {
|
static const struct address_space_operations adfs_aops = {
|
||||||
.readpage = adfs_readpage,
|
.readpage = adfs_readpage,
|
||||||
.writepage = adfs_writepage,
|
.writepage = adfs_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = adfs_write_begin,
|
.write_begin = adfs_write_begin,
|
||||||
.write_end = generic_write_end,
|
.write_end = generic_write_end,
|
||||||
.bmap = _adfs_bmap
|
.bmap = _adfs_bmap
|
||||||
|
|
|
@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
|
||||||
const struct address_space_operations affs_aops = {
|
const struct address_space_operations affs_aops = {
|
||||||
.readpage = affs_readpage,
|
.readpage = affs_readpage,
|
||||||
.writepage = affs_writepage,
|
.writepage = affs_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = affs_write_begin,
|
.write_begin = affs_write_begin,
|
||||||
.write_end = generic_write_end,
|
.write_end = generic_write_end,
|
||||||
.bmap = _affs_bmap
|
.bmap = _affs_bmap
|
||||||
|
@ -786,7 +785,6 @@ out:
|
||||||
const struct address_space_operations affs_aops_ofs = {
|
const struct address_space_operations affs_aops_ofs = {
|
||||||
.readpage = affs_readpage_ofs,
|
.readpage = affs_readpage_ofs,
|
||||||
//.writepage = affs_writepage_ofs,
|
//.writepage = affs_writepage_ofs,
|
||||||
//.sync_page = affs_sync_page_ofs,
|
|
||||||
.write_begin = affs_write_begin_ofs,
|
.write_begin = affs_write_begin_ofs,
|
||||||
.write_end = affs_write_end_ofs
|
.write_end = affs_write_end_ofs
|
||||||
};
|
};
|
||||||
|
|
77
fs/aio.c
77
fs/aio.c
|
@ -34,8 +34,6 @@
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/eventfd.h>
|
#include <linux/eventfd.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/mempool.h>
|
|
||||||
#include <linux/hash.h>
|
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
|
||||||
#include <asm/kmap_types.h>
|
#include <asm/kmap_types.h>
|
||||||
|
@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
|
||||||
static DEFINE_SPINLOCK(fput_lock);
|
static DEFINE_SPINLOCK(fput_lock);
|
||||||
static LIST_HEAD(fput_head);
|
static LIST_HEAD(fput_head);
|
||||||
|
|
||||||
#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
|
|
||||||
#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
|
|
||||||
struct aio_batch_entry {
|
|
||||||
struct hlist_node list;
|
|
||||||
struct address_space *mapping;
|
|
||||||
};
|
|
||||||
mempool_t *abe_pool;
|
|
||||||
|
|
||||||
static void aio_kick_handler(struct work_struct *);
|
static void aio_kick_handler(struct work_struct *);
|
||||||
static void aio_queue_work(struct kioctx *);
|
static void aio_queue_work(struct kioctx *);
|
||||||
|
|
||||||
|
@ -86,8 +76,7 @@ static int __init aio_setup(void)
|
||||||
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
||||||
|
|
||||||
aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
|
aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
|
||||||
abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
|
BUG_ON(!aio_wq);
|
||||||
BUG_ON(!aio_wq || !abe_pool);
|
|
||||||
|
|
||||||
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
|
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
|
||||||
|
|
||||||
|
@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aio_batch_add(struct address_space *mapping,
|
|
||||||
struct hlist_head *batch_hash)
|
|
||||||
{
|
|
||||||
struct aio_batch_entry *abe;
|
|
||||||
struct hlist_node *pos;
|
|
||||||
unsigned bucket;
|
|
||||||
|
|
||||||
bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
|
|
||||||
hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
|
|
||||||
if (abe->mapping == mapping)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
abe = mempool_alloc(abe_pool, GFP_KERNEL);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* we should be using igrab here, but
|
|
||||||
* we don't want to hammer on the global
|
|
||||||
* inode spinlock just to take an extra
|
|
||||||
* reference on a file that we must already
|
|
||||||
* have a reference to.
|
|
||||||
*
|
|
||||||
* When we're called, we always have a reference
|
|
||||||
* on the file, so we must always have a reference
|
|
||||||
* on the inode, so ihold() is safe here.
|
|
||||||
*/
|
|
||||||
ihold(mapping->host);
|
|
||||||
abe->mapping = mapping;
|
|
||||||
hlist_add_head(&abe->list, &batch_hash[bucket]);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void aio_batch_free(struct hlist_head *batch_hash)
|
|
||||||
{
|
|
||||||
struct aio_batch_entry *abe;
|
|
||||||
struct hlist_node *pos, *n;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
|
|
||||||
hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
|
|
||||||
blk_run_address_space(abe->mapping);
|
|
||||||
iput(abe->mapping->host);
|
|
||||||
hlist_del(&abe->list);
|
|
||||||
mempool_free(abe, abe_pool);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||||
struct iocb *iocb, struct hlist_head *batch_hash,
|
struct iocb *iocb, bool compat)
|
||||||
bool compat)
|
|
||||||
{
|
{
|
||||||
struct kiocb *req;
|
struct kiocb *req;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
|
@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&ctx->ctx_lock);
|
spin_unlock_irq(&ctx->ctx_lock);
|
||||||
if (req->ki_opcode == IOCB_CMD_PREAD ||
|
|
||||||
req->ki_opcode == IOCB_CMD_PREADV ||
|
|
||||||
req->ki_opcode == IOCB_CMD_PWRITE ||
|
|
||||||
req->ki_opcode == IOCB_CMD_PWRITEV)
|
|
||||||
aio_batch_add(file->f_mapping, batch_hash);
|
|
||||||
|
|
||||||
aio_put_req(req); /* drop extra ref to req */
|
aio_put_req(req); /* drop extra ref to req */
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
||||||
struct kioctx *ctx;
|
struct kioctx *ctx;
|
||||||
long ret = 0;
|
long ret = 0;
|
||||||
int i;
|
int i;
|
||||||
struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
|
struct blk_plug plug;
|
||||||
|
|
||||||
if (unlikely(nr < 0))
|
if (unlikely(nr < 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AKPM: should this return a partial result if some of the IOs were
|
* AKPM: should this return a partial result if some of the IOs were
|
||||||
* successfully submitted?
|
* successfully submitted?
|
||||||
|
@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
|
ret = io_submit_one(ctx, user_iocb, &tmp, compat);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
aio_batch_free(batch_hash);
|
blk_finish_plug(&plug);
|
||||||
|
|
||||||
put_ioctx(ctx);
|
put_ioctx(ctx);
|
||||||
return i ? i : ret;
|
return i ? i : ret;
|
||||||
|
|
|
@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
|
||||||
|
|
||||||
static const struct address_space_operations befs_aops = {
|
static const struct address_space_operations befs_aops = {
|
||||||
.readpage = befs_readpage,
|
.readpage = befs_readpage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.bmap = befs_bmap,
|
.bmap = befs_bmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
|
||||||
const struct address_space_operations bfs_aops = {
|
const struct address_space_operations bfs_aops = {
|
||||||
.readpage = bfs_readpage,
|
.readpage = bfs_readpage,
|
||||||
.writepage = bfs_writepage,
|
.writepage = bfs_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = bfs_write_begin,
|
.write_begin = bfs_write_begin,
|
||||||
.write_end = generic_write_end,
|
.write_end = generic_write_end,
|
||||||
.bmap = bfs_bmap,
|
.bmap = bfs_bmap,
|
||||||
|
|
|
@ -761,6 +761,9 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
|
||||||
{
|
{
|
||||||
unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
|
unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
|
||||||
|
|
||||||
|
if (bs->bio_integrity_pool)
|
||||||
|
return 0;
|
||||||
|
|
||||||
bs->bio_integrity_pool =
|
bs->bio_integrity_pool =
|
||||||
mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
|
mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
|
||||||
|
|
||||||
|
|
10
fs/bio.c
10
fs/bio.c
|
@ -43,7 +43,7 @@ static mempool_t *bio_split_pool __read_mostly;
|
||||||
* unsigned short
|
* unsigned short
|
||||||
*/
|
*/
|
||||||
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
|
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
|
||||||
struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
|
static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
|
||||||
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
|
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
|
||||||
};
|
};
|
||||||
#undef BV
|
#undef BV
|
||||||
|
@ -1636,9 +1636,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
||||||
if (!bs->bio_pool)
|
if (!bs->bio_pool)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
if (bioset_integrity_create(bs, pool_size))
|
|
||||||
goto bad;
|
|
||||||
|
|
||||||
if (!biovec_create_pools(bs, pool_size))
|
if (!biovec_create_pools(bs, pool_size))
|
||||||
return bs;
|
return bs;
|
||||||
|
|
||||||
|
@ -1656,12 +1653,10 @@ static void __init biovec_init_slabs(void)
|
||||||
int size;
|
int size;
|
||||||
struct biovec_slab *bvs = bvec_slabs + i;
|
struct biovec_slab *bvs = bvec_slabs + i;
|
||||||
|
|
||||||
#ifndef CONFIG_BLK_DEV_INTEGRITY
|
|
||||||
if (bvs->nr_vecs <= BIO_INLINE_VECS) {
|
if (bvs->nr_vecs <= BIO_INLINE_VECS) {
|
||||||
bvs->slab = NULL;
|
bvs->slab = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
size = bvs->nr_vecs * sizeof(struct bio_vec);
|
size = bvs->nr_vecs * sizeof(struct bio_vec);
|
||||||
bvs->slab = kmem_cache_create(bvs->name, size, 0,
|
bvs->slab = kmem_cache_create(bvs->name, size, 0,
|
||||||
|
@ -1684,6 +1679,9 @@ static int __init init_bio(void)
|
||||||
if (!fs_bio_set)
|
if (!fs_bio_set)
|
||||||
panic("bio: can't allocate bios\n");
|
panic("bio: can't allocate bios\n");
|
||||||
|
|
||||||
|
if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
|
||||||
|
panic("bio: can't create integrity pool\n");
|
||||||
|
|
||||||
bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
|
bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
|
||||||
sizeof(struct bio_pair));
|
sizeof(struct bio_pair));
|
||||||
if (!bio_split_pool)
|
if (!bio_split_pool)
|
||||||
|
|
|
@ -1087,6 +1087,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
disk_block_events(disk);
|
||||||
mutex_lock_nested(&bdev->bd_mutex, for_part);
|
mutex_lock_nested(&bdev->bd_mutex, for_part);
|
||||||
if (!bdev->bd_openers) {
|
if (!bdev->bd_openers) {
|
||||||
bdev->bd_disk = disk;
|
bdev->bd_disk = disk;
|
||||||
|
@ -1108,10 +1109,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||||
*/
|
*/
|
||||||
disk_put_part(bdev->bd_part);
|
disk_put_part(bdev->bd_part);
|
||||||
bdev->bd_part = NULL;
|
bdev->bd_part = NULL;
|
||||||
module_put(disk->fops->owner);
|
|
||||||
put_disk(disk);
|
|
||||||
bdev->bd_disk = NULL;
|
bdev->bd_disk = NULL;
|
||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
|
disk_unblock_events(disk);
|
||||||
|
module_put(disk->fops->owner);
|
||||||
|
put_disk(disk);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1148,9 +1150,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||||
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
|
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
module_put(disk->fops->owner);
|
|
||||||
put_disk(disk);
|
|
||||||
disk = NULL;
|
|
||||||
if (bdev->bd_contains == bdev) {
|
if (bdev->bd_contains == bdev) {
|
||||||
if (bdev->bd_disk->fops->open) {
|
if (bdev->bd_disk->fops->open) {
|
||||||
ret = bdev->bd_disk->fops->open(bdev, mode);
|
ret = bdev->bd_disk->fops->open(bdev, mode);
|
||||||
|
@ -1160,11 +1159,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||||
if (bdev->bd_invalidated)
|
if (bdev->bd_invalidated)
|
||||||
rescan_partitions(bdev->bd_disk, bdev);
|
rescan_partitions(bdev->bd_disk, bdev);
|
||||||
}
|
}
|
||||||
|
/* only one opener holds refs to the module and disk */
|
||||||
|
module_put(disk->fops->owner);
|
||||||
|
put_disk(disk);
|
||||||
}
|
}
|
||||||
bdev->bd_openers++;
|
bdev->bd_openers++;
|
||||||
if (for_part)
|
if (for_part)
|
||||||
bdev->bd_part_count++;
|
bdev->bd_part_count++;
|
||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
|
disk_unblock_events(disk);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_clear:
|
out_clear:
|
||||||
|
@ -1177,10 +1180,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||||
bdev->bd_contains = NULL;
|
bdev->bd_contains = NULL;
|
||||||
out_unlock_bdev:
|
out_unlock_bdev:
|
||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
out:
|
disk_unblock_events(disk);
|
||||||
if (disk)
|
module_put(disk->fops->owner);
|
||||||
module_put(disk->fops->owner);
|
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
|
out:
|
||||||
bdput(bdev);
|
bdput(bdev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1446,14 +1449,13 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
|
||||||
if (bdev_free) {
|
if (bdev_free) {
|
||||||
if (bdev->bd_write_holder) {
|
if (bdev->bd_write_holder) {
|
||||||
disk_unblock_events(bdev->bd_disk);
|
disk_unblock_events(bdev->bd_disk);
|
||||||
bdev->bd_write_holder = false;
|
|
||||||
} else
|
|
||||||
disk_check_events(bdev->bd_disk);
|
disk_check_events(bdev->bd_disk);
|
||||||
|
bdev->bd_write_holder = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
} else
|
}
|
||||||
disk_check_events(bdev->bd_disk);
|
|
||||||
|
|
||||||
return __blkdev_put(bdev, mode, 0);
|
return __blkdev_put(bdev, mode, 0);
|
||||||
}
|
}
|
||||||
|
@ -1527,7 +1529,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
|
||||||
static const struct address_space_operations def_blk_aops = {
|
static const struct address_space_operations def_blk_aops = {
|
||||||
.readpage = blkdev_readpage,
|
.readpage = blkdev_readpage,
|
||||||
.writepage = blkdev_writepage,
|
.writepage = blkdev_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = blkdev_write_begin,
|
.write_begin = blkdev_write_begin,
|
||||||
.write_end = blkdev_write_end,
|
.write_end = blkdev_write_end,
|
||||||
.writepages = generic_writepages,
|
.writepages = generic_writepages,
|
||||||
|
|
|
@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
|
||||||
.writepages = btree_writepages,
|
.writepages = btree_writepages,
|
||||||
.releasepage = btree_releasepage,
|
.releasepage = btree_releasepage,
|
||||||
.invalidatepage = btree_invalidatepage,
|
.invalidatepage = btree_invalidatepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
#ifdef CONFIG_MIGRATION
|
#ifdef CONFIG_MIGRATION
|
||||||
.migratepage = btree_migratepage,
|
.migratepage = btree_migratepage,
|
||||||
#endif
|
#endif
|
||||||
|
@ -1330,82 +1329,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* this unplugs every device on the box, and it is only used when page
|
|
||||||
* is null
|
|
||||||
*/
|
|
||||||
static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
|
|
||||||
{
|
|
||||||
struct btrfs_device *device;
|
|
||||||
struct btrfs_fs_info *info;
|
|
||||||
|
|
||||||
info = (struct btrfs_fs_info *)bdi->unplug_io_data;
|
|
||||||
list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
|
|
||||||
if (!device->bdev)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
bdi = blk_get_backing_dev_info(device->bdev);
|
|
||||||
if (bdi->unplug_io_fn)
|
|
||||||
bdi->unplug_io_fn(bdi, page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
|
|
||||||
{
|
|
||||||
struct inode *inode;
|
|
||||||
struct extent_map_tree *em_tree;
|
|
||||||
struct extent_map *em;
|
|
||||||
struct address_space *mapping;
|
|
||||||
u64 offset;
|
|
||||||
|
|
||||||
/* the generic O_DIRECT read code does this */
|
|
||||||
if (1 || !page) {
|
|
||||||
__unplug_io_fn(bdi, page);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* page->mapping may change at any time. Get a consistent copy
|
|
||||||
* and use that for everything below
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
mapping = page->mapping;
|
|
||||||
if (!mapping)
|
|
||||||
return;
|
|
||||||
|
|
||||||
inode = mapping->host;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* don't do the expensive searching for a small number of
|
|
||||||
* devices
|
|
||||||
*/
|
|
||||||
if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
|
|
||||||
__unplug_io_fn(bdi, page);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = page_offset(page);
|
|
||||||
|
|
||||||
em_tree = &BTRFS_I(inode)->extent_tree;
|
|
||||||
read_lock(&em_tree->lock);
|
|
||||||
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
|
|
||||||
read_unlock(&em_tree->lock);
|
|
||||||
if (!em) {
|
|
||||||
__unplug_io_fn(bdi, page);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
|
|
||||||
free_extent_map(em);
|
|
||||||
__unplug_io_fn(bdi, page);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
offset = offset - em->start;
|
|
||||||
btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
|
|
||||||
em->block_start + offset, page);
|
|
||||||
free_extent_map(em);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this fails, caller must call bdi_destroy() to get rid of the
|
* If this fails, caller must call bdi_destroy() to get rid of the
|
||||||
* bdi again.
|
* bdi again.
|
||||||
|
@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
bdi->ra_pages = default_backing_dev_info.ra_pages;
|
bdi->ra_pages = default_backing_dev_info.ra_pages;
|
||||||
bdi->unplug_io_fn = btrfs_unplug_io_fn;
|
|
||||||
bdi->unplug_io_data = info;
|
|
||||||
bdi->congested_fn = btrfs_congested_fn;
|
bdi->congested_fn = btrfs_congested_fn;
|
||||||
bdi->congested_data = info;
|
bdi->congested_data = info;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2188,7 +2188,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||||
unsigned long nr_written = 0;
|
unsigned long nr_written = 0;
|
||||||
|
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||||
write_flags = WRITE_SYNC_PLUG;
|
write_flags = WRITE_SYNC;
|
||||||
else
|
else
|
||||||
write_flags = WRITE;
|
write_flags = WRITE;
|
||||||
|
|
||||||
|
|
|
@ -7340,7 +7340,6 @@ static const struct address_space_operations btrfs_aops = {
|
||||||
.writepage = btrfs_writepage,
|
.writepage = btrfs_writepage,
|
||||||
.writepages = btrfs_writepages,
|
.writepages = btrfs_writepages,
|
||||||
.readpages = btrfs_readpages,
|
.readpages = btrfs_readpages,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.direct_IO = btrfs_direct_IO,
|
.direct_IO = btrfs_direct_IO,
|
||||||
.invalidatepage = btrfs_invalidatepage,
|
.invalidatepage = btrfs_invalidatepage,
|
||||||
.releasepage = btrfs_releasepage,
|
.releasepage = btrfs_releasepage,
|
||||||
|
|
|
@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
|
||||||
struct bio *cur;
|
struct bio *cur;
|
||||||
int again = 0;
|
int again = 0;
|
||||||
unsigned long num_run;
|
unsigned long num_run;
|
||||||
unsigned long num_sync_run;
|
|
||||||
unsigned long batch_run = 0;
|
unsigned long batch_run = 0;
|
||||||
unsigned long limit;
|
unsigned long limit;
|
||||||
unsigned long last_waited = 0;
|
unsigned long last_waited = 0;
|
||||||
|
@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
|
||||||
limit = btrfs_async_submit_limit(fs_info);
|
limit = btrfs_async_submit_limit(fs_info);
|
||||||
limit = limit * 2 / 3;
|
limit = limit * 2 / 3;
|
||||||
|
|
||||||
/* we want to make sure that every time we switch from the sync
|
|
||||||
* list to the normal list, we unplug
|
|
||||||
*/
|
|
||||||
num_sync_run = 0;
|
|
||||||
|
|
||||||
loop:
|
loop:
|
||||||
spin_lock(&device->io_lock);
|
spin_lock(&device->io_lock);
|
||||||
|
|
||||||
|
@ -223,15 +217,6 @@ loop_lock:
|
||||||
|
|
||||||
spin_unlock(&device->io_lock);
|
spin_unlock(&device->io_lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* if we're doing the regular priority list, make sure we unplug
|
|
||||||
* for any high prio bios we've sent down
|
|
||||||
*/
|
|
||||||
if (pending_bios == &device->pending_bios && num_sync_run > 0) {
|
|
||||||
num_sync_run = 0;
|
|
||||||
blk_run_backing_dev(bdi, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (pending) {
|
while (pending) {
|
||||||
|
|
||||||
rmb();
|
rmb();
|
||||||
|
@ -259,19 +244,11 @@ loop_lock:
|
||||||
|
|
||||||
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
|
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
|
||||||
|
|
||||||
if (cur->bi_rw & REQ_SYNC)
|
|
||||||
num_sync_run++;
|
|
||||||
|
|
||||||
submit_bio(cur->bi_rw, cur);
|
submit_bio(cur->bi_rw, cur);
|
||||||
num_run++;
|
num_run++;
|
||||||
batch_run++;
|
batch_run++;
|
||||||
if (need_resched()) {
|
if (need_resched())
|
||||||
if (num_sync_run) {
|
|
||||||
blk_run_backing_dev(bdi, NULL);
|
|
||||||
num_sync_run = 0;
|
|
||||||
}
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we made progress, there is more work to do and the bdi
|
* we made progress, there is more work to do and the bdi
|
||||||
|
@ -304,13 +281,8 @@ loop_lock:
|
||||||
* against it before looping
|
* against it before looping
|
||||||
*/
|
*/
|
||||||
last_waited = ioc->last_waited;
|
last_waited = ioc->last_waited;
|
||||||
if (need_resched()) {
|
if (need_resched())
|
||||||
if (num_sync_run) {
|
|
||||||
blk_run_backing_dev(bdi, NULL);
|
|
||||||
num_sync_run = 0;
|
|
||||||
}
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_lock(&device->io_lock);
|
spin_lock(&device->io_lock);
|
||||||
|
@ -323,22 +295,6 @@ loop_lock:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_sync_run) {
|
|
||||||
num_sync_run = 0;
|
|
||||||
blk_run_backing_dev(bdi, NULL);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* IO has already been through a long path to get here. Checksumming,
|
|
||||||
* async helper threads, perhaps compression. We've done a pretty
|
|
||||||
* good job of collecting a batch of IO and should just unplug
|
|
||||||
* the device right away.
|
|
||||||
*
|
|
||||||
* This will help anyone who is waiting on the IO, they might have
|
|
||||||
* already unplugged, but managed to do so before the bio they
|
|
||||||
* cared about found its way down here.
|
|
||||||
*/
|
|
||||||
blk_run_backing_dev(bdi, NULL);
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
if (again)
|
if (again)
|
||||||
goto loop;
|
goto loop;
|
||||||
|
@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
|
||||||
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
u64 logical, u64 *length,
|
u64 logical, u64 *length,
|
||||||
struct btrfs_multi_bio **multi_ret,
|
struct btrfs_multi_bio **multi_ret,
|
||||||
int mirror_num, struct page *unplug_page)
|
int mirror_num)
|
||||||
{
|
{
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
struct map_lookup *map;
|
struct map_lookup *map;
|
||||||
|
@ -2987,11 +2943,6 @@ again:
|
||||||
em = lookup_extent_mapping(em_tree, logical, *length);
|
em = lookup_extent_mapping(em_tree, logical, *length);
|
||||||
read_unlock(&em_tree->lock);
|
read_unlock(&em_tree->lock);
|
||||||
|
|
||||||
if (!em && unplug_page) {
|
|
||||||
kfree(multi);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!em) {
|
if (!em) {
|
||||||
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
|
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
|
||||||
(unsigned long long)logical,
|
(unsigned long long)logical,
|
||||||
|
@ -3047,13 +2998,13 @@ again:
|
||||||
*length = em->len - offset;
|
*length = em->len - offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!multi_ret && !unplug_page)
|
if (!multi_ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
num_stripes = 1;
|
num_stripes = 1;
|
||||||
stripe_index = 0;
|
stripe_index = 0;
|
||||||
if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
||||||
if (unplug_page || (rw & REQ_WRITE))
|
if (rw & REQ_WRITE)
|
||||||
num_stripes = map->num_stripes;
|
num_stripes = map->num_stripes;
|
||||||
else if (mirror_num)
|
else if (mirror_num)
|
||||||
stripe_index = mirror_num - 1;
|
stripe_index = mirror_num - 1;
|
||||||
|
@ -3075,7 +3026,7 @@ again:
|
||||||
stripe_index = do_div(stripe_nr, factor);
|
stripe_index = do_div(stripe_nr, factor);
|
||||||
stripe_index *= map->sub_stripes;
|
stripe_index *= map->sub_stripes;
|
||||||
|
|
||||||
if (unplug_page || (rw & REQ_WRITE))
|
if (rw & REQ_WRITE)
|
||||||
num_stripes = map->sub_stripes;
|
num_stripes = map->sub_stripes;
|
||||||
else if (mirror_num)
|
else if (mirror_num)
|
||||||
stripe_index += mirror_num - 1;
|
stripe_index += mirror_num - 1;
|
||||||
|
@ -3095,22 +3046,10 @@ again:
|
||||||
BUG_ON(stripe_index >= map->num_stripes);
|
BUG_ON(stripe_index >= map->num_stripes);
|
||||||
|
|
||||||
for (i = 0; i < num_stripes; i++) {
|
for (i = 0; i < num_stripes; i++) {
|
||||||
if (unplug_page) {
|
multi->stripes[i].physical =
|
||||||
struct btrfs_device *device;
|
map->stripes[stripe_index].physical +
|
||||||
struct backing_dev_info *bdi;
|
stripe_offset + stripe_nr * map->stripe_len;
|
||||||
|
multi->stripes[i].dev = map->stripes[stripe_index].dev;
|
||||||
device = map->stripes[stripe_index].dev;
|
|
||||||
if (device->bdev) {
|
|
||||||
bdi = blk_get_backing_dev_info(device->bdev);
|
|
||||||
if (bdi->unplug_io_fn)
|
|
||||||
bdi->unplug_io_fn(bdi, unplug_page);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
multi->stripes[i].physical =
|
|
||||||
map->stripes[stripe_index].physical +
|
|
||||||
stripe_offset + stripe_nr * map->stripe_len;
|
|
||||||
multi->stripes[i].dev = map->stripes[stripe_index].dev;
|
|
||||||
}
|
|
||||||
stripe_index++;
|
stripe_index++;
|
||||||
}
|
}
|
||||||
if (multi_ret) {
|
if (multi_ret) {
|
||||||
|
@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
||||||
struct btrfs_multi_bio **multi_ret, int mirror_num)
|
struct btrfs_multi_bio **multi_ret, int mirror_num)
|
||||||
{
|
{
|
||||||
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
|
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
|
||||||
mirror_num, NULL);
|
mirror_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||||
|
@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
|
|
||||||
u64 logical, struct page *page)
|
|
||||||
{
|
|
||||||
u64 length = PAGE_CACHE_SIZE;
|
|
||||||
return __btrfs_map_block(map_tree, READ, logical, &length,
|
|
||||||
NULL, 0, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void end_bio_multi_stripe(struct bio *bio, int err)
|
static void end_bio_multi_stripe(struct bio *bio, int err)
|
||||||
{
|
{
|
||||||
struct btrfs_multi_bio *multi = bio->bi_private;
|
struct btrfs_multi_bio *multi = bio->bi_private;
|
||||||
|
|
51
fs/buffer.c
51
fs/buffer.c
|
@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(init_buffer);
|
EXPORT_SYMBOL(init_buffer);
|
||||||
|
|
||||||
static int sync_buffer(void *word)
|
static int sleep_on_buffer(void *word)
|
||||||
{
|
{
|
||||||
struct block_device *bd;
|
|
||||||
struct buffer_head *bh
|
|
||||||
= container_of(word, struct buffer_head, b_state);
|
|
||||||
|
|
||||||
smp_mb();
|
|
||||||
bd = bh->b_bdev;
|
|
||||||
if (bd)
|
|
||||||
blk_run_address_space(bd->bd_inode->i_mapping);
|
|
||||||
io_schedule();
|
io_schedule();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __lock_buffer(struct buffer_head *bh)
|
void __lock_buffer(struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
|
wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__lock_buffer);
|
EXPORT_SYMBOL(__lock_buffer);
|
||||||
|
@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
|
||||||
*/
|
*/
|
||||||
void __wait_on_buffer(struct buffer_head * bh)
|
void __wait_on_buffer(struct buffer_head * bh)
|
||||||
{
|
{
|
||||||
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
|
wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__wait_on_buffer);
|
EXPORT_SYMBOL(__wait_on_buffer);
|
||||||
|
|
||||||
|
@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||||
{
|
{
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
struct list_head tmp;
|
struct list_head tmp;
|
||||||
struct address_space *mapping, *prev_mapping = NULL;
|
struct address_space *mapping;
|
||||||
int err = 0, err2;
|
int err = 0, err2;
|
||||||
|
struct blk_plug plug;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&tmp);
|
INIT_LIST_HEAD(&tmp);
|
||||||
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
while (!list_empty(list)) {
|
while (!list_empty(list)) {
|
||||||
|
@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||||
* still in flight on potentially older
|
* still in flight on potentially older
|
||||||
* contents.
|
* contents.
|
||||||
*/
|
*/
|
||||||
write_dirty_buffer(bh, WRITE_SYNC_PLUG);
|
write_dirty_buffer(bh, WRITE_SYNC);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kick off IO for the previous mapping. Note
|
* Kick off IO for the previous mapping. Note
|
||||||
|
@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||||
* wait_on_buffer() will do that for us
|
* wait_on_buffer() will do that for us
|
||||||
* through sync_buffer().
|
* through sync_buffer().
|
||||||
*/
|
*/
|
||||||
if (prev_mapping && prev_mapping != mapping)
|
|
||||||
blk_run_address_space(prev_mapping);
|
|
||||||
prev_mapping = mapping;
|
|
||||||
|
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock(lock);
|
||||||
|
blk_finish_plug(&plug);
|
||||||
|
spin_lock(lock);
|
||||||
|
|
||||||
while (!list_empty(&tmp)) {
|
while (!list_empty(&tmp)) {
|
||||||
bh = BH_ENTRY(tmp.prev);
|
bh = BH_ENTRY(tmp.prev);
|
||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
|
@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
|
||||||
* prevents this contention from occurring.
|
* prevents this contention from occurring.
|
||||||
*
|
*
|
||||||
* If block_write_full_page() is called with wbc->sync_mode ==
|
* If block_write_full_page() is called with wbc->sync_mode ==
|
||||||
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
|
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
|
||||||
* causes the writes to be flagged as synchronous writes, but the
|
* causes the writes to be flagged as synchronous writes.
|
||||||
* block device queue will NOT be unplugged, since usually many pages
|
|
||||||
* will be pushed to the out before the higher-level caller actually
|
|
||||||
* waits for the writes to be completed. The various wait functions,
|
|
||||||
* such as wait_on_writeback_range() will ultimately call sync_page()
|
|
||||||
* which will ultimately call blk_run_backing_dev(), which will end up
|
|
||||||
* unplugging the device queue.
|
|
||||||
*/
|
*/
|
||||||
static int __block_write_full_page(struct inode *inode, struct page *page,
|
static int __block_write_full_page(struct inode *inode, struct page *page,
|
||||||
get_block_t *get_block, struct writeback_control *wbc,
|
get_block_t *get_block, struct writeback_control *wbc,
|
||||||
|
@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
|
||||||
const unsigned blocksize = 1 << inode->i_blkbits;
|
const unsigned blocksize = 1 << inode->i_blkbits;
|
||||||
int nr_underway = 0;
|
int nr_underway = 0;
|
||||||
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
|
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
|
||||||
WRITE_SYNC_PLUG : WRITE);
|
WRITE_SYNC : WRITE);
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
|
|
||||||
|
@ -3138,17 +3126,6 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(try_to_free_buffers);
|
EXPORT_SYMBOL(try_to_free_buffers);
|
||||||
|
|
||||||
void block_sync_page(struct page *page)
|
|
||||||
{
|
|
||||||
struct address_space *mapping;
|
|
||||||
|
|
||||||
smp_mb();
|
|
||||||
mapping = page_mapping(page);
|
|
||||||
if (mapping)
|
|
||||||
blk_run_backing_dev(mapping->backing_dev_info, page);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(block_sync_page);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are no bdflush tunables left. But distributions are
|
* There are no bdflush tunables left. But distributions are
|
||||||
* still running obsolete flush daemons, so we terminate them here.
|
* still running obsolete flush daemons, so we terminate them here.
|
||||||
|
|
|
@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* static void cifs_sync_page(struct page *page)
|
|
||||||
{
|
|
||||||
struct address_space *mapping;
|
|
||||||
struct inode *inode;
|
|
||||||
unsigned long index = page->index;
|
|
||||||
unsigned int rpages = 0;
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
cFYI(1, "sync page %p", page);
|
|
||||||
mapping = page->mapping;
|
|
||||||
if (!mapping)
|
|
||||||
return 0;
|
|
||||||
inode = mapping->host;
|
|
||||||
if (!inode)
|
|
||||||
return; */
|
|
||||||
|
|
||||||
/* fill in rpages then
|
|
||||||
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
|
|
||||||
|
|
||||||
/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
if (rc < 0)
|
|
||||||
return rc;
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
} */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As file closes, flush all cached write data for this inode checking
|
* As file closes, flush all cached write data for this inode checking
|
||||||
* for write behind errors.
|
* for write behind errors.
|
||||||
|
@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
|
||||||
.set_page_dirty = __set_page_dirty_nobuffers,
|
.set_page_dirty = __set_page_dirty_nobuffers,
|
||||||
.releasepage = cifs_release_page,
|
.releasepage = cifs_release_page,
|
||||||
.invalidatepage = cifs_invalidate_page,
|
.invalidatepage = cifs_invalidate_page,
|
||||||
/* .sync_page = cifs_sync_page, */
|
|
||||||
/* .direct_IO = */
|
/* .direct_IO = */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
|
||||||
.set_page_dirty = __set_page_dirty_nobuffers,
|
.set_page_dirty = __set_page_dirty_nobuffers,
|
||||||
.releasepage = cifs_release_page,
|
.releasepage = cifs_release_page,
|
||||||
.invalidatepage = cifs_invalidate_page,
|
.invalidatepage = cifs_invalidate_page,
|
||||||
/* .sync_page = cifs_sync_page, */
|
|
||||||
/* .direct_IO = */
|
/* .direct_IO = */
|
||||||
};
|
};
|
||||||
|
|
|
@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
|
||||||
((rw & READ) || (dio->result == dio->size)))
|
((rw & READ) || (dio->result == dio->size)))
|
||||||
ret = -EIOCBQUEUED;
|
ret = -EIOCBQUEUED;
|
||||||
|
|
||||||
if (ret != -EIOCBQUEUED) {
|
if (ret != -EIOCBQUEUED)
|
||||||
/* All IO is now issued, send it on its way */
|
|
||||||
blk_run_address_space(inode->i_mapping);
|
|
||||||
dio_await_completion(dio);
|
dio_await_completion(dio);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sync will always be dropping the final ref and completing the
|
* Sync will always be dropping the final ref and completing the
|
||||||
|
@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||||
struct dio *dio;
|
struct dio *dio;
|
||||||
|
|
||||||
if (rw & WRITE)
|
if (rw & WRITE)
|
||||||
rw = WRITE_ODIRECT_PLUG;
|
rw = WRITE_ODIRECT;
|
||||||
|
|
||||||
if (bdev)
|
if (bdev)
|
||||||
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
|
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
|
||||||
|
|
|
@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
|
||||||
}
|
}
|
||||||
static const struct address_space_operations efs_aops = {
|
static const struct address_space_operations efs_aops = {
|
||||||
.readpage = efs_readpage,
|
.readpage = efs_readpage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.bmap = _efs_bmap
|
.bmap = _efs_bmap
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -823,7 +823,6 @@ const struct address_space_operations exofs_aops = {
|
||||||
.direct_IO = NULL, /* TODO: Should be trivial to do */
|
.direct_IO = NULL, /* TODO: Should be trivial to do */
|
||||||
|
|
||||||
/* With these NULL has special meaning or default is not exported */
|
/* With these NULL has special meaning or default is not exported */
|
||||||
.sync_page = NULL,
|
|
||||||
.get_xip_mem = NULL,
|
.get_xip_mem = NULL,
|
||||||
.migratepage = NULL,
|
.migratepage = NULL,
|
||||||
.launder_page = NULL,
|
.launder_page = NULL,
|
||||||
|
|
|
@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
|
||||||
.readpage = ext2_readpage,
|
.readpage = ext2_readpage,
|
||||||
.readpages = ext2_readpages,
|
.readpages = ext2_readpages,
|
||||||
.writepage = ext2_writepage,
|
.writepage = ext2_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext2_write_begin,
|
.write_begin = ext2_write_begin,
|
||||||
.write_end = ext2_write_end,
|
.write_end = ext2_write_end,
|
||||||
.bmap = ext2_bmap,
|
.bmap = ext2_bmap,
|
||||||
|
@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
|
||||||
.readpage = ext2_readpage,
|
.readpage = ext2_readpage,
|
||||||
.readpages = ext2_readpages,
|
.readpages = ext2_readpages,
|
||||||
.writepage = ext2_nobh_writepage,
|
.writepage = ext2_nobh_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext2_nobh_write_begin,
|
.write_begin = ext2_nobh_write_begin,
|
||||||
.write_end = nobh_write_end,
|
.write_end = nobh_write_end,
|
||||||
.bmap = ext2_bmap,
|
.bmap = ext2_bmap,
|
||||||
|
|
|
@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
|
||||||
.readpage = ext3_readpage,
|
.readpage = ext3_readpage,
|
||||||
.readpages = ext3_readpages,
|
.readpages = ext3_readpages,
|
||||||
.writepage = ext3_ordered_writepage,
|
.writepage = ext3_ordered_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext3_write_begin,
|
.write_begin = ext3_write_begin,
|
||||||
.write_end = ext3_ordered_write_end,
|
.write_end = ext3_ordered_write_end,
|
||||||
.bmap = ext3_bmap,
|
.bmap = ext3_bmap,
|
||||||
|
@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
|
||||||
.readpage = ext3_readpage,
|
.readpage = ext3_readpage,
|
||||||
.readpages = ext3_readpages,
|
.readpages = ext3_readpages,
|
||||||
.writepage = ext3_writeback_writepage,
|
.writepage = ext3_writeback_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext3_write_begin,
|
.write_begin = ext3_write_begin,
|
||||||
.write_end = ext3_writeback_write_end,
|
.write_end = ext3_writeback_write_end,
|
||||||
.bmap = ext3_bmap,
|
.bmap = ext3_bmap,
|
||||||
|
@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
|
||||||
.readpage = ext3_readpage,
|
.readpage = ext3_readpage,
|
||||||
.readpages = ext3_readpages,
|
.readpages = ext3_readpages,
|
||||||
.writepage = ext3_journalled_writepage,
|
.writepage = ext3_journalled_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext3_write_begin,
|
.write_begin = ext3_write_begin,
|
||||||
.write_end = ext3_journalled_write_end,
|
.write_end = ext3_journalled_write_end,
|
||||||
.set_page_dirty = ext3_journalled_set_page_dirty,
|
.set_page_dirty = ext3_journalled_set_page_dirty,
|
||||||
|
|
|
@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_ordered_write_end,
|
.write_end = ext4_ordered_write_end,
|
||||||
.bmap = ext4_bmap,
|
.bmap = ext4_bmap,
|
||||||
|
@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_writeback_write_end,
|
.write_end = ext4_writeback_write_end,
|
||||||
.bmap = ext4_bmap,
|
.bmap = ext4_bmap,
|
||||||
|
@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
|
||||||
.readpage = ext4_readpage,
|
.readpage = ext4_readpage,
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_writepage,
|
.writepage = ext4_writepage,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext4_write_begin,
|
.write_begin = ext4_write_begin,
|
||||||
.write_end = ext4_journalled_write_end,
|
.write_end = ext4_journalled_write_end,
|
||||||
.set_page_dirty = ext4_journalled_set_page_dirty,
|
.set_page_dirty = ext4_journalled_set_page_dirty,
|
||||||
|
@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
|
||||||
.readpages = ext4_readpages,
|
.readpages = ext4_readpages,
|
||||||
.writepage = ext4_writepage,
|
.writepage = ext4_writepage,
|
||||||
.writepages = ext4_da_writepages,
|
.writepages = ext4_da_writepages,
|
||||||
.sync_page = block_sync_page,
|
|
||||||
.write_begin = ext4_da_write_begin,
|
.write_begin = ext4_da_write_begin,
|
||||||
.write_end = ext4_da_write_end,
|
.write_end = ext4_da_write_end,
|
||||||
.bmap = ext4_bmap,
|
.bmap = ext4_bmap,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue