Merge remote-tracking branch 'jens/for-4.8/core' into dm-4.8
DM's DAX support depends on block core's newly added QUEUE_FLAG_DAX.
This commit is contained in:
commit
e9ccb945c4
|
@ -53,7 +53,7 @@ disk.
|
|||
|
||||
logical_block_size (RO)
|
||||
-----------------------
|
||||
This is the logcal block size of the device, in bytes.
|
||||
This is the logical block size of the device, in bytes.
|
||||
|
||||
max_hw_sectors_kb (RO)
|
||||
----------------------
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/slab.h>
|
||||
#include "blk.h"
|
||||
|
||||
#define BIP_INLINE_VECS 4
|
||||
|
||||
|
|
|
@ -905,7 +905,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct cftype blkcg_files[] = {
|
||||
static struct cftype blkcg_files[] = {
|
||||
{
|
||||
.name = "stat",
|
||||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
|
@ -914,7 +914,7 @@ struct cftype blkcg_files[] = {
|
|||
{ } /* terminate */
|
||||
};
|
||||
|
||||
struct cftype blkcg_legacy_files[] = {
|
||||
static struct cftype blkcg_legacy_files[] = {
|
||||
{
|
||||
.name = "reset_stats",
|
||||
.write_u64 = blkcg_reset_stats,
|
||||
|
|
|
@ -62,7 +62,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||
|
||||
/*
|
||||
* don't check dying flag for MQ because the request won't
|
||||
* be resued after dying flag is set
|
||||
* be reused after dying flag is set
|
||||
*/
|
||||
if (q->mq_ops) {
|
||||
blk_mq_insert_request(rq, at_head, true, false);
|
||||
|
|
|
@ -744,6 +744,12 @@ int attempt_front_merge(struct request_queue *q, struct request *rq)
|
|||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.elevator_allow_rq_merge_fn)
|
||||
if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next))
|
||||
return 0;
|
||||
|
||||
return attempt_merge(q, rq, next);
|
||||
}
|
||||
|
||||
|
|
|
@ -379,6 +379,11 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t queue_dax_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_dax(q), page);
|
||||
}
|
||||
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_requests_show,
|
||||
|
@ -516,6 +521,11 @@ static struct queue_sysfs_entry queue_wc_entry = {
|
|||
.store = queue_wc_store,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_dax_entry = {
|
||||
.attr = {.name = "dax", .mode = S_IRUGO },
|
||||
.show = queue_dax_show,
|
||||
};
|
||||
|
||||
static struct attribute *default_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&queue_ra_entry.attr,
|
||||
|
@ -542,6 +552,7 @@ static struct attribute *default_attrs[] = {
|
|||
&queue_random_entry.attr,
|
||||
&queue_poll_entry.attr,
|
||||
&queue_wc_entry.attr,
|
||||
&queue_dax_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ struct cfq_queue {
|
|||
/* time when first request from queue completed and slice started. */
|
||||
u64 slice_start;
|
||||
u64 slice_end;
|
||||
u64 slice_resid;
|
||||
s64 slice_resid;
|
||||
|
||||
/* pending priority requests */
|
||||
int prio_pending;
|
||||
|
@ -1463,7 +1463,8 @@ static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
|
|||
* a single request on seeky media and cause lots of seek time
|
||||
* and group will never know it.
|
||||
*/
|
||||
slice_used = max_t(u64, (now - cfqq->dispatch_start), 1);
|
||||
slice_used = max_t(u64, (now - cfqq->dispatch_start),
|
||||
jiffies_to_nsecs(1));
|
||||
} else {
|
||||
slice_used = now - cfqq->slice_start;
|
||||
if (slice_used > cfqq->allocated_slice) {
|
||||
|
@ -2543,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req,
|
|||
struct request *__rq;
|
||||
|
||||
__rq = cfq_find_rq_fmerge(cfqd, bio);
|
||||
if (__rq && elv_rq_merge_ok(__rq, bio)) {
|
||||
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
||||
*req = __rq;
|
||||
return ELEVATOR_FRONT_MERGE;
|
||||
}
|
||||
|
@ -2600,8 +2601,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
|
|||
cfq_del_cfqq_rr(cfqd, cfqq);
|
||||
}
|
||||
|
||||
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_io_cq *cic;
|
||||
|
@ -2625,6 +2626,12 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
|||
return cfqq == RQ_CFQQ(rq);
|
||||
}
|
||||
|
||||
static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
return RQ_CFQQ(rq) == RQ_CFQQ(next);
|
||||
}
|
||||
|
||||
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
|
||||
|
@ -2689,7 +2696,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
|
||||
else
|
||||
cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
|
||||
cfq_log_cfqq(cfqd, cfqq, "resid=%llu", cfqq->slice_resid);
|
||||
cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
|
||||
}
|
||||
|
||||
cfq_group_served(cfqd, cfqq->cfqg, cfqq);
|
||||
|
@ -4243,7 +4250,16 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
cfqq_type(cfqq));
|
||||
|
||||
st->ttime.last_end_request = now;
|
||||
if (!(rq->start_time + cfqd->cfq_fifo_expire[1] > now))
|
||||
/*
|
||||
* We have to do this check in jiffies since start_time is in
|
||||
* jiffies and it is not trivial to convert to ns. If
|
||||
* cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
|
||||
* will become problematic but so far we are fine (the default
|
||||
* is 128 ms).
|
||||
*/
|
||||
if (!time_after(rq->start_time +
|
||||
nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
|
||||
jiffies))
|
||||
cfqd->last_delayed_sync = now;
|
||||
}
|
||||
|
||||
|
@ -4811,7 +4827,8 @@ static struct elevator_type iosched_cfq = {
|
|||
.elevator_merge_fn = cfq_merge,
|
||||
.elevator_merged_fn = cfq_merged_request,
|
||||
.elevator_merge_req_fn = cfq_merged_requests,
|
||||
.elevator_allow_merge_fn = cfq_allow_merge,
|
||||
.elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
|
||||
.elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
|
||||
.elevator_bio_merged_fn = cfq_bio_merged,
|
||||
.elevator_dispatch_fn = cfq_dispatch_requests,
|
||||
.elevator_add_req_fn = cfq_insert_request,
|
||||
|
|
|
@ -137,7 +137,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
if (__rq) {
|
||||
BUG_ON(sector != blk_rq_pos(__rq));
|
||||
|
||||
if (elv_rq_merge_ok(__rq, bio)) {
|
||||
if (elv_bio_merge_ok(__rq, bio)) {
|
||||
ret = ELEVATOR_FRONT_MERGE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -173,7 +173,8 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
|
|||
* and move into next position (next will be deleted) in fifo
|
||||
*/
|
||||
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
|
||||
if (time_before(next->fifo_time, req->fifo_time)) {
|
||||
if (time_before((unsigned long)next->fifo_time,
|
||||
(unsigned long)req->fifo_time)) {
|
||||
list_move(&req->queuelist, &next->queuelist);
|
||||
req->fifo_time = next->fifo_time;
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
|
|||
/*
|
||||
* rq is expired!
|
||||
*/
|
||||
if (time_after_eq(jiffies, rq->fifo_time))
|
||||
if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -53,13 +53,13 @@ static LIST_HEAD(elv_list);
|
|||
* Query io scheduler to see if the current process issuing bio may be
|
||||
* merged with rq.
|
||||
*/
|
||||
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
|
||||
static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.elevator_allow_merge_fn)
|
||||
return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
|
||||
if (e->type->ops.elevator_allow_bio_merge_fn)
|
||||
return e->type->ops.elevator_allow_bio_merge_fn(q, rq, bio);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -67,17 +67,17 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
|
|||
/*
|
||||
* can we safely merge with this request?
|
||||
*/
|
||||
bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (!blk_rq_merge_ok(rq, bio))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (!elv_iosched_allow_merge(rq, bio))
|
||||
return 0;
|
||||
if (!elv_iosched_allow_bio_merge(rq, bio))
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(elv_rq_merge_ok);
|
||||
EXPORT_SYMBOL(elv_bio_merge_ok);
|
||||
|
||||
static struct elevator_type *elevator_find(const char *name)
|
||||
{
|
||||
|
@ -425,7 +425,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
/*
|
||||
* First try one-hit cache.
|
||||
*/
|
||||
if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
|
||||
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
|
||||
ret = blk_try_merge(q->last_merge, bio);
|
||||
if (ret != ELEVATOR_NO_MERGE) {
|
||||
*req = q->last_merge;
|
||||
|
@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
* See if our hash lookup can find a potential backmerge.
|
||||
*/
|
||||
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
|
||||
if (__rq && elv_rq_merge_ok(__rq, bio)) {
|
||||
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
||||
*req = __rq;
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
}
|
||||
|
|
|
@ -495,7 +495,6 @@ rescan:
|
|||
/* add partitions */
|
||||
for (p = 1; p < state->limit; p++) {
|
||||
sector_t size, from;
|
||||
struct partition_meta_info *info = NULL;
|
||||
|
||||
size = state->parts[p].size;
|
||||
if (!size)
|
||||
|
@ -530,8 +529,6 @@ rescan:
|
|||
}
|
||||
}
|
||||
|
||||
if (state->parts[p].has_info)
|
||||
info = &state->parts[p].info;
|
||||
part = add_partition(disk, p, from, size,
|
||||
state->parts[p].flags,
|
||||
&state->parts[p].info);
|
||||
|
|
|
@ -42,6 +42,13 @@ int atari_partition(struct parsed_partitions *state)
|
|||
int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ATARI partition scheme supports 512 lba only. If this is not
|
||||
* the case, bail early to avoid miscalculating hd_size.
|
||||
*/
|
||||
if (bdev_logical_block_size(state->bdev) != 512)
|
||||
return 0;
|
||||
|
||||
rs = read_part_sector(state, 0, §);
|
||||
if (!rs)
|
||||
return -1;
|
||||
|
|
|
@ -509,7 +509,9 @@ static struct brd_device *brd_alloc(int i)
|
|||
blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
|
||||
brd->brd_queue->limits.discard_zeroes_data = 1;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
|
||||
#endif
|
||||
disk = brd->brd_disk = alloc_disk(max_part);
|
||||
if (!disk)
|
||||
goto out_free_queue;
|
||||
|
|
|
@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev,
|
|||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
|
||||
q->queuedata = pmem;
|
||||
|
||||
disk = alloc_disk_node(0, nid);
|
||||
|
|
|
@ -618,6 +618,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
|||
dev_info->gd->driverfs_dev = &dev_info->dev;
|
||||
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
|
||||
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
|
||||
|
||||
seg_byte_size = (dev_info->end - dev_info->start + 1);
|
||||
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
|
||||
|
|
|
@ -493,7 +493,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
|
|||
|
||||
if (size < 0)
|
||||
return size;
|
||||
if (!ops->direct_access)
|
||||
if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
|
||||
return -EOPNOTSUPP;
|
||||
if ((sector + DIV_ROUND_UP(size, 512)) >
|
||||
part_nr_sects_read(bdev->bd_part))
|
||||
|
@ -1287,7 +1287,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
bdev->bd_disk = disk;
|
||||
bdev->bd_queue = disk->queue;
|
||||
bdev->bd_contains = bdev;
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_DAX) &&
|
||||
blk_queue_dax(disk->queue))
|
||||
bdev->bd_inode->i_flags = S_DAX;
|
||||
else
|
||||
bdev->bd_inode->i_flags = 0;
|
||||
|
|
|
@ -5494,7 +5494,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
|
|||
}
|
||||
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
||||
if (op == REQ_OP_WRITE || REQ_OP_DISCARD ||
|
||||
if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
|
||||
op == REQ_GET_READ_MIRRORS) {
|
||||
num_stripes = map->num_stripes;
|
||||
} else if (mirror_num) {
|
||||
|
|
|
@ -90,7 +90,7 @@ struct request {
|
|||
struct list_head queuelist;
|
||||
union {
|
||||
struct call_single_data csd;
|
||||
unsigned long fifo_time;
|
||||
u64 fifo_time;
|
||||
};
|
||||
|
||||
struct request_queue *q;
|
||||
|
@ -505,6 +505,7 @@ struct request_queue {
|
|||
#define QUEUE_FLAG_WC 23 /* Write back caching */
|
||||
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
|
||||
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
||||
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
|
@ -594,6 +595,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
|
||||
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
|
||||
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
|
||||
|
||||
#define blk_noretry_request(rq) \
|
||||
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
||||
|
|
|
@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *,
|
|||
|
||||
typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
|
||||
|
||||
typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
|
||||
typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
|
||||
struct request *, struct bio *);
|
||||
|
||||
typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
|
||||
struct request *, struct request *);
|
||||
|
||||
typedef void (elevator_bio_merged_fn) (struct request_queue *,
|
||||
struct request *, struct bio *);
|
||||
|
@ -46,7 +50,8 @@ struct elevator_ops
|
|||
elevator_merge_fn *elevator_merge_fn;
|
||||
elevator_merged_fn *elevator_merged_fn;
|
||||
elevator_merge_req_fn *elevator_merge_req_fn;
|
||||
elevator_allow_merge_fn *elevator_allow_merge_fn;
|
||||
elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
|
||||
elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
|
||||
elevator_bio_merged_fn *elevator_bio_merged_fn;
|
||||
|
||||
elevator_dispatch_fn *elevator_dispatch_fn;
|
||||
|
@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
|||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern bool elv_rq_merge_ok(struct request *, struct bio *);
|
||||
extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
||||
extern struct elevator_queue *elevator_alloc(struct request_queue *,
|
||||
struct elevator_type *);
|
||||
|
||||
|
|
|
@ -127,12 +127,13 @@ static void trace_note_tsk(struct task_struct *tsk)
|
|||
|
||||
static void trace_note_time(struct blk_trace *bt)
|
||||
{
|
||||
struct timespec now;
|
||||
struct timespec64 now;
|
||||
unsigned long flags;
|
||||
u32 words[2];
|
||||
|
||||
getnstimeofday(&now);
|
||||
words[0] = now.tv_sec;
|
||||
/* need to check user space to see if this breaks in y2038 or y2106 */
|
||||
ktime_get_real_ts64(&now);
|
||||
words[0] = (u32)now.tv_sec;
|
||||
words[1] = now.tv_nsec;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
|
Loading…
Reference in New Issue