for-5.15/block-2021-08-30

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmEs6H0QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpukbD/9Qk9fQte+WJVmpbdvhV40gcKBVnGOVH0ke
 k+36x6AB/gWKnFHwtprsSyVqPxmzqwTv9VIq5l/s3Vydt3L61znvTneBeN03Wlkn
 UTxD0lY8HzyVWnZb82LBBjjy7cs6EzrFG4kBH/ZiTAyTcBsCAvzo5J7mywb4gFjj
 L/HeBq58EJ3WCUlxlVW1ijctvi7wnGoaH5bZY1TE00GGT6TysN2bEPfzjkuYHrDz
 RqhoQdWPLDz6h3x9lAncPw2MWlcmlGvJ96ABseAKFPKvXxE2PzgolSoQfVUUJtko
 bqGyy2ns+pxN11SrcGYjogEKVKhONoms/5UN1RtwRBVsgvecxlHER/SgyZ8luBDo
 lFhVXulkSjpswbWutRy3USge98GwMu2Z4ppP2CDmO7hkQd0DF8sL0kPKyaREkcHi
 NmsD/0zF2uUhUVN+PRC/MuzngAmL4Mmxjk70L+MohlK7e+H3pnEo1ec3OMcXe+wB
 dG6t/BFD9bYmj0UjsHeXEoR/iRuvSba1L8zBz5dhRaHH6DvdycYhpynXWWlU3C8K
 3nzEVVpcDINMsiRl1Vqb6g6HsMwHIH84FRl7Mc51UmhW9C4gLfWMCt1guQuzOj72
 yEbmCLydE/FR2IUPY7eqX8hRG8GTUlMtSvGdgnvBOcWj+K3buT/c5yVTHgTrN8ox
 LCOXHSvV6w==
 =S8fs
 -----END PGP SIGNATURE-----

Merge tag 'for-5.15/block-2021-08-30' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "Nothing major in here - lots of good cleanups and tech debt handling,
  which is also evident in the diffstats. In particular:

   - Add disk sequence numbers (Matteo)

   - Discard merge fix (Ming)

   - Relax disk zoned reporting restrictions (Niklas)

   - Bio error handling zoned leak fix (Pavel)

   - Start of proper add_disk() error handling (Luis, Christoph)

   - blk crypto fix (Eric)

   - Non-standard GPT location support (Dmitry)

   - IO priority improvements and cleanups (Damien)o

   - blk-throtl improvements (Chunguang)

   - diskstats_show() stack reduction (Abd-Alrhman)

   - Loop scheduler selection (Bart)

   - Switch block layer to use kmap_local_page() (Christoph)

   - Remove obsolete disk_name helper (Christoph)

   - block_device refcounting improvements (Christoph)

   - Ensure gendisk always has a request queue reference (Christoph)

   - Misc fixes/cleanups (Shaokun, Oliver, Guoqing)"

* tag 'for-5.15/block-2021-08-30' of git://git.kernel.dk/linux-block: (129 commits)
  sg: pass the device name to blk_trace_setup
  block, bfq: cleanup the repeated declaration
  blk-crypto: fix check for too-large dun_bytes
  blk-zoned: allow BLKREPORTZONE without CAP_SYS_ADMIN
  blk-zoned: allow zone management send operations without CAP_SYS_ADMIN
  block: mark blkdev_fsync static
  block: refine the disk_live check in del_gendisk
  mmc: sdhci-tegra: Enable MMC_CAP2_ALT_GPT_TEGRA
  mmc: block: Support alternative_gpt_sector() operation
  partitions/efi: Support non-standard GPT location
  block: Add alternative_gpt_sector() operation
  bio: fix page leak bio_add_hw_page failure
  block: remove CONFIG_DEBUG_BLOCK_EXT_DEVT
  block: remove a pointless call to MINOR() in device_add_disk
  null_blk: add error handling support for add_disk()
  virtio_blk: add error handling support for add_disk()
  block: add error handling for device_add_disk / add_disk
  block: return errors from disk_alloc_events
  block: return errors from blk_integrity_add
  block: call blk_register_queue earlier in device_add_disk
  ...
This commit is contained in:
Linus Torvalds 2021-08-30 18:52:11 -07:00
commit 679369114e
129 changed files with 1582 additions and 1753 deletions

View File

@ -28,6 +28,18 @@ Description:
For more details refer Documentation/admin-guide/iostats.rst
What: /sys/block/<disk>/diskseq
Date: February 2021
Contact: Matteo Croce <mcroce@microsoft.com>
Description:
The /sys/block/<disk>/diskseq files reports the disk
sequence number, which is a monotonically increasing
number assigned to every drive.
Some devices, like the loop device, refresh such number
every time the backing file is changed.
The value type is 64 bit unsigned.
What: /sys/block/<disk>/<part>/stat
Date: February 2008
Contact: Jerome Marchand <jmarchan@redhat.com>

View File

@ -22,7 +22,6 @@ CONFIG_RAMSIZE=0x8000000
CONFIG_VECTORBASE=0x40000000
CONFIG_KERNELBASE=0x40001000
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_CMDLINE_PARSER=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_MISC=y

View File

@ -7,8 +7,6 @@
#ifndef __ASM_RC32434_RB_H
#define __ASM_RC32434_RB_H
#include <linux/genhd.h>
#define REGBASE 0x18000000
#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
#define UART0BASE 0x58000

View File

@ -132,7 +132,6 @@ CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_EQS_DEBUG=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y

View File

@ -127,7 +127,6 @@ CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_EQS_DEBUG=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y

View File

@ -1268,8 +1268,7 @@ static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
rq_for_each_segment(bvec, req, iter) {
BUG_ON(i >= io_req->desc_cnt);
io_req->io_desc[i].buffer =
page_address(bvec.bv_page) + bvec.bv_offset;
io_req->io_desc[i].buffer = bvec_virt(&bvec);
io_req->io_desc[i].length = bvec.bv_len;
i++;
}

View File

@ -114,16 +114,6 @@ config BLK_DEV_THROTTLING_LOW
Note, this is an experimental interface and could be changed someday.
config BLK_CMDLINE_PARSER
bool "Block device command line partition parser"
help
Enabling this option allows you to specify the partition layout from
the kernel boot args. This is typically of use for embedded devices
which don't otherwise have any standardized method for listing the
partitions on a block device.
See Documentation/block/cmdline-partition.rst for more information.
config BLK_WBT
bool "Enable support for block device writeback throttling"
help
@ -251,4 +241,8 @@ config BLK_MQ_RDMA
config BLK_PM
def_bool BLOCK && PM
# do not use in new code
config BLOCK_HOLDER_DEPRECATED
bool
source "block/Kconfig.iosched"

View File

@ -26,7 +26,6 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
@ -40,3 +39,4 @@ obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_PM) += blk-pm.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o

View File

@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
@ -2505,7 +2508,7 @@ void bfq_end_wr_async_queues(struct bfq_data *bfqd,
int i, j;
for (i = 0; i < 2; i++)
for (j = 0; j < IOPRIO_BE_NR; j++)
for (j = 0; j < IOPRIO_NR_LEVELS; j++)
if (bfqg->async_bfqq[i][j])
bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
if (bfqg->async_idle_bfqq)
@ -5266,8 +5269,8 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
switch (ioprio_class) {
default:
pr_err("bdi %s: bfq: bad prio class %d\n",
bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
ioprio_class);
bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
ioprio_class);
fallthrough;
case IOPRIO_CLASS_NONE:
/*
@ -5290,10 +5293,10 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
break;
}
if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
bfqq->new_ioprio);
bfqq->new_ioprio = IOPRIO_BE_NR;
bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
}
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
@ -5408,7 +5411,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
case IOPRIO_CLASS_RT:
return &bfqg->async_bfqq[0][ioprio];
case IOPRIO_CLASS_NONE:
ioprio = IOPRIO_NORM;
ioprio = IOPRIO_BE_NORM;
fallthrough;
case IOPRIO_CLASS_BE:
return &bfqg->async_bfqq[1][ioprio];
@ -6822,7 +6825,7 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
int i, j;
for (i = 0; i < 2; i++)
for (j = 0; j < IOPRIO_BE_NR; j++)
for (j = 0; j < IOPRIO_NR_LEVELS; j++)
__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);

View File

@ -931,7 +931,7 @@ struct bfq_group {
void *bfqd;
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
struct bfq_queue *async_idle_bfqq;
struct bfq_entity *my_entity;
@ -948,15 +948,13 @@ struct bfq_group {
struct bfq_entity entity;
struct bfq_sched_data sched_data;
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
struct bfq_queue *async_bfqq[2][IOPRIO_NR_LEVELS];
struct bfq_queue *async_idle_bfqq;
struct rb_root rq_pos_tree;
};
#endif
struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
/* --------------- main algorithm interface ----------------- */
#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \

View File

@ -505,7 +505,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
*/
unsigned short bfq_ioprio_to_weight(int ioprio)
{
return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
return (IOPRIO_NR_LEVELS - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
}
/**
@ -514,12 +514,12 @@ unsigned short bfq_ioprio_to_weight(int ioprio)
*
* To preserve as much as possible the old only-ioprio user interface,
* 0 is used as an escape ioprio value for weights (numerically) equal or
* larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
* larger than IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF.
*/
static unsigned short bfq_weight_to_ioprio(int weight)
{
return max_t(int, 0,
IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight);
}
static void bfq_get_entity(struct bfq_entity *entity)

View File

@ -104,8 +104,7 @@ void bio_integrity_free(struct bio *bio)
struct bio_set *bs = bio->bi_pool;
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset);
kfree(bvec_virt(bip->bip_vec));
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
@ -163,27 +162,23 @@ static blk_status_t bio_integrity_process(struct bio *bio,
struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
blk_status_t ret = BLK_STS_OK;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf;
iter.prot_buf = bvec_virt(bip->bip_vec);
__bio_for_each_segment(bv, bio, bviter, *proc_iter) {
void *kaddr = kmap_atomic(bv.bv_page);
void *kaddr = bvec_kmap_local(&bv);
iter.data_buf = kaddr + bv.bv_offset;
iter.data_buf = kaddr;
iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
kunmap_atomic(kaddr);
return ret;
}
kunmap_local(kaddr);
if (ret)
break;
kunmap_atomic(kaddr);
}
return ret;
}

View File

@ -495,16 +495,11 @@ EXPORT_SYMBOL(bio_kmalloc);
void zero_fill_bio(struct bio *bio)
{
unsigned long flags;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) {
char *data = bvec_kmap_irq(&bv, &flags);
memset(data, 0, bv.bv_len);
flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(data, &flags);
}
bio_for_each_segment(bv, bio, iter)
memzero_bvec(&bv);
}
EXPORT_SYMBOL(zero_fill_bio);
@ -979,6 +974,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
return 0;
}
static void bio_put_pages(struct page **pages, size_t size, size_t off)
{
size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
for (i = 0; i < nr; i++)
put_page(pages[i]);
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@ -1023,8 +1026,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
if (same_page)
put_page(page);
} else {
if (WARN_ON_ONCE(bio_full(bio, len)))
return -EINVAL;
if (WARN_ON_ONCE(bio_full(bio, len))) {
bio_put_pages(pages + i, left, offset);
return -EINVAL;
}
__bio_add_page(bio, page, len, offset);
}
offset = 0;
@ -1069,6 +1074,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_add_hw_page(q, bio, page, len, offset,
max_append_sectors, &same_page) != len) {
bio_put_pages(pages + i, left, offset);
ret = -EINVAL;
break;
}
@ -1191,27 +1197,15 @@ EXPORT_SYMBOL(bio_advance);
void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter)
{
struct bio_vec src_bv, dst_bv;
void *src_p, *dst_p;
unsigned bytes;
while (src_iter->bi_size && dst_iter->bi_size) {
src_bv = bio_iter_iovec(src, *src_iter);
dst_bv = bio_iter_iovec(dst, *dst_iter);
struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
void *src_buf;
bytes = min(src_bv.bv_len, dst_bv.bv_len);
src_p = kmap_atomic(src_bv.bv_page);
dst_p = kmap_atomic(dst_bv.bv_page);
memcpy(dst_p + dst_bv.bv_offset,
src_p + src_bv.bv_offset,
bytes);
kunmap_atomic(dst_p);
kunmap_atomic(src_p);
flush_dcache_page(dst_bv.bv_page);
src_buf = bvec_kmap_local(&src_bv);
memcpy_to_bvec(&dst_bv, src_buf);
kunmap_local(src_buf);
bio_advance_iter_single(src, src_iter, bytes);
bio_advance_iter_single(dst, dst_iter, bytes);

View File

@ -489,10 +489,9 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
/* some drivers (floppy) instantiate a queue w/o disk registered */
if (blkg->q->backing_dev_info->dev)
return bdi_dev_name(blkg->q->backing_dev_info);
return NULL;
if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
return NULL;
return bdi_dev_name(blkg->q->disk->bdi);
}
/**
@ -873,6 +872,63 @@ static void blkcg_fill_root_iostats(void)
}
}
static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
{
struct blkg_iostat_set *bis = &blkg->iostat;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
bool has_stats = false;
const char *dname;
unsigned seq;
int i;
if (!blkg->online)
return;
dname = blkg_dev_name(blkg);
if (!dname)
return;
seq_printf(s, "%s ", dname);
do {
seq = u64_stats_fetch_begin(&bis->sync);
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
rios = bis->cur.ios[BLKG_IOSTAT_READ];
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
} while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
has_stats = true;
seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
seq_printf(s, " use_delay=%d delay_nsec=%llu",
atomic_read(&blkg->use_delay),
atomic64_read(&blkg->delay_nsec));
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
if (pol->pd_stat_fn(blkg->pd[i], s))
has_stats = true;
}
if (has_stats)
seq_printf(s, "\n");
}
static int blkcg_print_stat(struct seq_file *sf, void *v)
{
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
@ -884,86 +940,11 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
cgroup_rstat_flush(blkcg->css.cgroup);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
struct blkg_iostat_set *bis = &blkg->iostat;
const char *dname;
char *buf;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
size_t size = seq_get_buf(sf, &buf), off = 0;
int i;
bool has_stats = false;
unsigned seq;
spin_lock_irq(&blkg->q->queue_lock);
if (!blkg->online)
goto skip;
dname = blkg_dev_name(blkg);
if (!dname)
goto skip;
/*
* Hooray string manipulation, count is the size written NOT
* INCLUDING THE \0, so size is now count+1 less than what we
* had before, but we want to start writing the next bit from
* the \0 so we only add count to buf.
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
do {
seq = u64_stats_fetch_begin(&bis->sync);
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
rios = bis->cur.ios[BLKG_IOSTAT_READ];
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
} while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
has_stats = true;
off += scnprintf(buf+off, size-off,
"rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
off += scnprintf(buf+off, size-off,
" use_delay=%d delay_nsec=%llu",
atomic_read(&blkg->use_delay),
(unsigned long long)atomic64_read(&blkg->delay_nsec));
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
size_t written;
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
if (written)
has_stats = true;
off += written;
}
if (has_stats) {
if (off < size - 1) {
off += scnprintf(buf+off, size-off, "\n");
seq_commit(sf, off);
} else {
seq_commit(sf, -1);
}
}
skip:
blkcg_print_one_stat(blkg, sf);
spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
return 0;
}

View File

@ -14,7 +14,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
@ -393,10 +392,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
if (queue_is_mq(q))
blk_mq_exit_queue(q);
@ -533,20 +529,14 @@ struct request_queue *blk_alloc_queue(int node_id)
if (ret)
goto fail_id;
q->backing_dev_info = bdi_alloc(node_id);
if (!q->backing_dev_info)
goto fail_split;
q->stats = blk_alloc_queue_stats();
if (!q->stats)
goto fail_stats;
goto fail_split;
q->node = node_id;
atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
@ -571,7 +561,7 @@ struct request_queue *blk_alloc_queue(int node_id)
if (percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
goto fail_stats;
if (blkcg_init_queue(q))
goto fail_ref;
@ -584,10 +574,8 @@ struct request_queue *blk_alloc_queue(int node_id)
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
blk_free_queue_stats(q->stats);
fail_stats:
bdi_put(q->backing_dev_info);
blk_free_queue_stats(q->stats);
fail_split:
bioset_exit(&q->bio_split);
fail_id:

View File

@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
if (mode->keysize == 0)
return -EINVAL;
if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
if (dun_bytes == 0 || dun_bytes > mode->ivsize)
return -EINVAL;
if (!is_power_of_2(data_unit_size))

View File

@ -431,13 +431,15 @@ void blk_integrity_unregister(struct gendisk *disk)
}
EXPORT_SYMBOL(blk_integrity_unregister);
void blk_integrity_add(struct gendisk *disk)
int blk_integrity_add(struct gendisk *disk)
{
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
&disk_to_dev(disk)->kobj, "%s", "integrity"))
return;
int ret;
kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
&disk_to_dev(disk)->kobj, "%s", "integrity");
if (!ret)
kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
return ret;
}
void blk_integrity_del(struct gendisk *disk)

View File

@ -2988,34 +2988,29 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
kfree(iocg);
}
static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc;
size_t pos = 0;
if (!ioc->enabled)
return 0;
return false;
if (iocg->level == 0) {
unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
ioc->vtime_base_rate * 10000,
VTIME_PER_USEC);
pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
vp10k / 100, vp10k % 100);
seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
}
pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
iocg->last_stat.usage_us);
seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
if (blkcg_debug_stats)
pos += scnprintf(buf + pos, size - pos,
" cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
iocg->last_stat.wait_us,
iocg->last_stat.indebt_us,
iocg->last_stat.indelay_us);
return pos;
seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
iocg->last_stat.wait_us,
iocg->last_stat.indebt_us,
iocg->last_stat.indelay_us);
return true;
}
static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,

View File

@ -890,8 +890,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
size_t size)
static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
{
struct latency_stat stat;
int cpu;
@ -906,39 +905,40 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
preempt_enable();
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
(unsigned long long)stat.ps.missed,
(unsigned long long)stat.ps.total);
return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
(unsigned long long)stat.ps.missed,
(unsigned long long)stat.ps.total,
iolat->rq_depth.max_depth);
seq_printf(s, " missed=%llu total=%llu depth=max",
(unsigned long long)stat.ps.missed,
(unsigned long long)stat.ps.total);
else
seq_printf(s, " missed=%llu total=%llu depth=%u",
(unsigned long long)stat.ps.missed,
(unsigned long long)stat.ps.total,
iolat->rq_depth.max_depth);
return true;
}
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
unsigned long long avg_lat;
unsigned long long cur_win;
if (!blkcg_debug_stats)
return 0;
return false;
if (iolat->ssd)
return iolatency_ssd_stat(iolat, buf, size);
return iolatency_ssd_stat(iolat, s);
avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
iolat->rq_depth.max_depth, avg_lat, cur_win);
seq_printf(s, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
else
seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
iolat->rq_depth.max_depth, avg_lat, cur_win);
return true;
}
static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
struct request_queue *q,
struct blkcg *blkcg)

View File

@ -400,7 +400,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
memcpy_from_bvec(p, bvec);
p += bvec->bv_len;
}

View File

@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
trace_block_split(split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
blk_throtl_charge_bio_split(*bio);
}
}
@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
}
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static enum elv_merge blk_try_req_merge(struct request *req,
struct request *next)
{

View File

@ -45,60 +45,12 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
kfree(hctx);
}
struct blk_mq_ctx_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_mq_ctx *, char *);
ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};
struct blk_mq_hw_ctx_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};
static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
char *page)
{
struct blk_mq_ctx_sysfs_entry *entry;
struct blk_mq_ctx *ctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
q = ctx->queue;
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
res = entry->show(ctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct blk_mq_ctx_sysfs_entry *entry;
struct blk_mq_ctx *ctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
q = ctx->queue;
if (!entry->store)
return -EIO;
mutex_lock(&q->sysfs_lock);
res = entry->store(ctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *page)
{
@ -198,23 +150,16 @@ static struct attribute *default_hw_ctx_attrs[] = {
};
ATTRIBUTE_GROUPS(default_hw_ctx);
static const struct sysfs_ops blk_mq_sysfs_ops = {
.show = blk_mq_sysfs_show,
.store = blk_mq_sysfs_store,
};
static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
.show = blk_mq_hw_sysfs_show,
.store = blk_mq_hw_sysfs_store,
};
static struct kobj_type blk_mq_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.release = blk_mq_sysfs_release,
};
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.release = blk_mq_ctx_sysfs_release,
};

View File

@ -525,7 +525,7 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_dec_active_requests(hctx);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);
laptop_io_completion(q->disk->bdi);
rq_qos_done(q, rq);
@ -3115,7 +3115,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass)
{
struct request_queue *q;
struct gendisk *disk;
@ -3124,12 +3125,11 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata)
if (IS_ERR(q))
return ERR_CAST(q);
disk = __alloc_disk_node(0, set->numa_node);
disk = __alloc_disk_node(q, set->numa_node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
return ERR_PTR(-ENOMEM);
}
disk->queue = q;
return disk;
}
EXPORT_SYMBOL(__blk_mq_alloc_disk);

View File

@ -8,6 +8,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
@ -140,7 +141,9 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors;
q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
if (!q->disk)
return;
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@ -380,18 +383,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
void blk_queue_update_readahead(struct request_queue *q)
void disk_update_readahead(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
/*
* For read-ahead of large files to be effective, we need to read ahead
* at least twice the optimal I/O size.
*/
q->backing_dev_info->ra_pages =
disk->bdi->ra_pages =
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
q->backing_dev_info->io_pages =
queue_max_sectors(q) >> (PAGE_SHIFT - 9);
disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
EXPORT_SYMBOL_GPL(disk_update_readahead);
/**
* blk_limits_io_min - set minimum request size for a device
@ -471,7 +475,9 @@ EXPORT_SYMBOL(blk_limits_io_opt);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
q->backing_dev_info->ra_pages =
if (!q->disk)
return;
q->disk->bdi->ra_pages =
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
@ -661,17 +667,11 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
struct request_queue *t = disk->queue;
if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
get_start_sect(bdev) + (offset >> 9)) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
get_start_sect(bdev) + (offset >> 9)) < 0)
pr_notice("%s: Warning: Device %pg is misaligned\n",
disk->disk_name, bdev);
disk_name(disk, 0, top);
bdevname(bdev, bottom);
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
blk_queue_update_readahead(disk->queue);
disk_update_readahead(disk);
}
EXPORT_SYMBOL(disk_stack_limits);

View File

@ -88,9 +88,11 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10);
unsigned long ra_kb;
if (!q->disk)
return -EINVAL;
ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, page);
}
@ -98,13 +100,14 @@ static ssize_t
queue_ra_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count);
ssize_t ret;
if (!q->disk)
return -EINVAL;
ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
@ -251,7 +254,8 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
if (q->disk)
q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(&q->queue_lock);
return ret;
@ -766,13 +770,6 @@ static void blk_exit_queue(struct request_queue *q)
* e.g. blkcg_print_blkgs() to crash.
*/
blkcg_exit_queue(q);
/*
* Since the cgroup code may dereference the @q->backing_dev_info
* pointer, only decrease its reference count after having removed the
* association with the block cgroup controller.
*/
bdi_put(q->backing_dev_info);
}
/**
@ -859,15 +856,6 @@ int blk_register_queue(struct gendisk *disk)
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
if (WARN_ON(!q))
return -ENXIO;
WARN_ONCE(blk_queue_registered(q),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
blk_queue_update_readahead(q);
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
@ -941,7 +929,6 @@ unlock:
return ret;
}
EXPORT_SYMBOL_GPL(blk_register_queue);
/**
* blk_unregister_queue - counterpart of blk_register_queue()

View File

@ -178,6 +178,9 @@ struct throtl_grp {
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;
atomic_t io_split_cnt[2];
atomic_t last_io_split_cnt[2];
struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
};
@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
atomic_set(&tg->io_split_cnt[rw], 0);
/*
* Previous slice has expired. We must have trimmed it after last
* bio dispatch. That means since start of last slice, we never used
@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
atomic_set(&tg->io_split_cnt[rw], 0);
throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw],
@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
jiffies + tg->td->throtl_slice);
}
if (iops_limit != UINT_MAX)
tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
if (wait)
@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
}
if (tg->iops[READ][LIMIT_LOW]) {
tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
if (iops >= tg->iops[READ][LIMIT_LOW])
tg->last_low_overflow_time[READ] = now;
}
if (tg->iops[WRITE][LIMIT_LOW]) {
tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
if (iops >= tg->iops[WRITE][LIMIT_LOW])
tg->last_low_overflow_time[WRITE] = now;
@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
void blk_throtl_charge_bio_split(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
struct throtl_grp *parent = blkg_to_tg(blkg);
struct throtl_service_queue *parent_sq;
bool rw = bio_data_dir(bio);
do {
if (!parent->has_rules[rw])
break;
atomic_inc(&parent->io_split_cnt[rw]);
atomic_inc(&parent->last_io_split_cnt[rw]);
parent_sq = parent->service_queue.parent_sq;
parent = sq_to_tg(parent_sq);
} while (parent);
}
bool blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_bdev->bd_disk->queue;

View File

@ -97,7 +97,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
@ -234,7 +234,7 @@ enum {
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat;
@ -287,7 +287,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth;
trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
@ -359,7 +359,7 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
status = latency_exceeded(rwb, cb->stat);
trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
inflight);
/*

View File

@ -360,9 +360,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!blk_queue_is_zoned(q))
return -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
return -EFAULT;
@ -421,9 +418,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
if (!blk_queue_is_zoned(q))
return -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!(mode & FMODE_WRITE))
return -EBADF;

View File

@ -128,7 +128,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
bip_next->bip_vec[0].bv_offset);
}
void blk_integrity_add(struct gendisk *);
int blk_integrity_add(struct gendisk *disk);
void blk_integrity_del(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
@ -162,8 +162,9 @@ static inline bool bio_integrity_endio(struct bio *bio)
static inline void bio_integrity_free(struct bio *bio)
{
}
static inline void blk_integrity_add(struct gendisk *disk)
static inline int blk_integrity_add(struct gendisk *disk)
{
return 0;
}
static inline void blk_integrity_del(struct gendisk *disk)
{
@ -289,11 +290,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_register_queue(struct request_queue *q);
extern void blk_throtl_charge_bio_split(struct bio *bio);
bool blk_throtl_bio(struct bio *bio);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { }
static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
@ -340,15 +343,14 @@ static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
char *disk_name(struct gendisk *hd, int partno, char *buf);
#define ADDPART_FLAG_NONE 0
#define ADDPART_FLAG_RAID 1
#define ADDPART_FLAG_WHOLEDISK 2
int bdev_add_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length);
int bdev_del_partition(struct block_device *bdev, int partno);
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length);
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
int bdev_del_partition(struct gendisk *disk, int partno);
int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
@ -356,7 +358,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct request_queue *blk_alloc_queue(int node_id);
void disk_alloc_events(struct gendisk *disk);
int disk_alloc_events(struct gendisk *disk);
void disk_add_events(struct gendisk *disk);
void disk_del_events(struct gendisk *disk);
void disk_release_events(struct gendisk *disk);

View File

@ -67,18 +67,6 @@ static __init int init_emergency_pool(void)
__initcall(init_emergency_pool);
/*
* highmem version, map in to vec
*/
static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
{
unsigned char *vto;
vto = kmap_atomic(to->bv_page);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
kunmap_atomic(vto);
}
/*
* Simple bounce buffer support for highmem pages. Depending on the
* queue gfp mask set, *to may or may not be a highmem page. kmap it
@ -86,7 +74,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
*/
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
struct bio_vec tovec, fromvec;
struct bvec_iter iter;
/*
@ -104,11 +91,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
* been modified by the block layer, so use the original
* copy, bounce_copy_vec already uses tovec->bv_len
*/
vfrom = page_address(fromvec.bv_page) +
tovec.bv_offset;
bounce_copy_vec(&tovec, vfrom);
flush_dcache_page(tovec.bv_page);
memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
tovec.bv_offset);
}
bio_advance_iter(from, &from_iter, tovec.bv_len);
}
@ -255,24 +239,19 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
* because the 'bio' is single-page bvec.
*/
for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
struct page *page = to->bv_page;
struct page *bounce_page;
if (!PageHighMem(page))
if (!PageHighMem(to->bv_page))
continue;
to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
inc_zone_page_state(to->bv_page, NR_BOUNCE);
bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
inc_zone_page_state(bounce_page, NR_BOUNCE);
if (rw == WRITE) {
char *vto, *vfrom;
flush_dcache_page(page);
vto = page_address(to->bv_page) + to->bv_offset;
vfrom = kmap_atomic(page) + to->bv_offset;
memcpy(vto, vfrom, to->bv_len);
kunmap_atomic(vfrom);
flush_dcache_page(to->bv_page);
memcpy_from_bvec(page_address(bounce_page), to);
}
to->bv_page = bounce_page;
}
trace_block_bio_bounce(*bio_orig);

View File

@ -1,255 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Parse command line, get partition information
*
* Written by Cai Zhiyong <caizhiyong@huawei.com>
*
*/
#include <linux/export.h>
#include <linux/cmdline-parser.h>
static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
{
int ret = 0;
struct cmdline_subpart *new_subpart;
*subpart = NULL;
new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
if (!new_subpart)
return -ENOMEM;
if (*partdef == '-') {
new_subpart->size = (sector_t)(~0ULL);
partdef++;
} else {
new_subpart->size = (sector_t)memparse(partdef, &partdef);
if (new_subpart->size < (sector_t)PAGE_SIZE) {
pr_warn("cmdline partition size is invalid.");
ret = -EINVAL;
goto fail;
}
}
if (*partdef == '@') {
partdef++;
new_subpart->from = (sector_t)memparse(partdef, &partdef);
} else {
new_subpart->from = (sector_t)(~0ULL);
}
if (*partdef == '(') {
int length;
char *next = strchr(++partdef, ')');
if (!next) {
pr_warn("cmdline partition format is invalid.");
ret = -EINVAL;
goto fail;
}
length = min_t(int, next - partdef,
sizeof(new_subpart->name) - 1);
strncpy(new_subpart->name, partdef, length);
new_subpart->name[length] = '\0';
partdef = ++next;
} else
new_subpart->name[0] = '\0';
new_subpart->flags = 0;
if (!strncmp(partdef, "ro", 2)) {
new_subpart->flags |= PF_RDONLY;
partdef += 2;
}
if (!strncmp(partdef, "lk", 2)) {
new_subpart->flags |= PF_POWERUP_LOCK;
partdef += 2;
}
*subpart = new_subpart;
return 0;
fail:
kfree(new_subpart);
return ret;
}
static void free_subpart(struct cmdline_parts *parts)
{
struct cmdline_subpart *subpart;
while (parts->subpart) {
subpart = parts->subpart;
parts->subpart = subpart->next_subpart;
kfree(subpart);
}
}
static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
{
int ret = -EINVAL;
char *next;
int length;
struct cmdline_subpart **next_subpart;
struct cmdline_parts *newparts;
char buf[BDEVNAME_SIZE + 32 + 4];
*parts = NULL;
newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
if (!newparts)
return -ENOMEM;
next = strchr(bdevdef, ':');
if (!next) {
pr_warn("cmdline partition has no block device.");
goto fail;
}
length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
strncpy(newparts->name, bdevdef, length);
newparts->name[length] = '\0';
newparts->nr_subparts = 0;
next_subpart = &newparts->subpart;
while (next && *(++next)) {
bdevdef = next;
next = strchr(bdevdef, ',');
length = (!next) ? (sizeof(buf) - 1) :
min_t(int, next - bdevdef, sizeof(buf) - 1);
strncpy(buf, bdevdef, length);
buf[length] = '\0';
ret = parse_subpart(next_subpart, buf);
if (ret)
goto fail;
newparts->nr_subparts++;
next_subpart = &(*next_subpart)->next_subpart;
}
if (!newparts->subpart) {
pr_warn("cmdline partition has no valid partition.");
ret = -EINVAL;
goto fail;
}
*parts = newparts;
return 0;
fail:
free_subpart(newparts);
kfree(newparts);
return ret;
}
void cmdline_parts_free(struct cmdline_parts **parts)
{
struct cmdline_parts *next_parts;
while (*parts) {
next_parts = (*parts)->next_parts;
free_subpart(*parts);
kfree(*parts);
*parts = next_parts;
}
}
EXPORT_SYMBOL(cmdline_parts_free);
int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
{
int ret;
char *buf;
char *pbuf;
char *next;
struct cmdline_parts **next_parts;
*parts = NULL;
next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
if (!buf)
return -ENOMEM;
next_parts = parts;
while (next && *pbuf) {
next = strchr(pbuf, ';');
if (next)
*next = '\0';
ret = parse_parts(next_parts, pbuf);
if (ret)
goto fail;
if (next)
pbuf = ++next;
next_parts = &(*next_parts)->next_parts;
}
if (!*parts) {
pr_warn("cmdline partition has no valid partition.");
ret = -EINVAL;
goto fail;
}
ret = 0;
done:
kfree(buf);
return ret;
fail:
cmdline_parts_free(parts);
goto done;
}
EXPORT_SYMBOL(cmdline_parts_parse);
struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
const char *bdev)
{
while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
parts = parts->next_parts;
return parts;
}
EXPORT_SYMBOL(cmdline_parts_find);
/*
* add_part()
* 0 success.
* 1 can not add so many partitions.
*/
int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
int slot,
int (*add_part)(int, struct cmdline_subpart *, void *),
void *param)
{
sector_t from = 0;
struct cmdline_subpart *subpart;
for (subpart = parts->subpart; subpart;
subpart = subpart->next_subpart, slot++) {
if (subpart->from == (sector_t)(~0ULL))
subpart->from = from;
else
from = subpart->from;
if (from >= disk_size)
break;
if (subpart->size > (disk_size - from))
subpart->size = disk_size - from;
from += subpart->size;
if (add_part(slot, subpart, param))
break;
}
return slot;
}
EXPORT_SYMBOL(cmdline_parts_set);

View File

@ -163,15 +163,31 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
spin_unlock_irq(&ev->lock);
}
/*
* Tell userland about new events. Only the events listed in @disk->events are
* reported, and only if DISK_EVENT_FLAG_UEVENT is set. Otherwise, events are
* processed internally but never get reported to userland.
*/
static void disk_event_uevent(struct gendisk *disk, unsigned int events)
{
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
int nr_events = 0, i;
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
if (events & disk->events & (1 << i))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
}
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr)
{
struct gendisk *disk = ev->disk;
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
unsigned int clearing = *clearing_ptr;
unsigned int events;
unsigned long intv;
int nr_events = 0, i;
/* check events */
events = disk->fops->check_events(disk, clearing);
@ -190,19 +206,11 @@ static void disk_check_events(struct disk_events *ev,
spin_unlock_irq(&ev->lock);
/*
* Tell userland about new events. Only the events listed in
* @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
* is set. Otherwise, events are processed internally but never
* get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
if ((events & disk->events & (1 << i)) &&
(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
envp[nr_events++] = disk_uevents[i];
if (events & DISK_EVENT_MEDIA_CHANGE)
inc_diskseq(disk);
if (nr_events)
kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
if (disk->event_flags & DISK_EVENT_FLAG_UEVENT)
disk_event_uevent(disk, events);
}
/**
@ -281,6 +289,32 @@ bool bdev_check_media_change(struct block_device *bdev)
}
EXPORT_SYMBOL(bdev_check_media_change);
/**
* disk_force_media_change - force a media change event
* @disk: the disk which will raise the event
* @events: the events to raise
*
* Generate uevents for the disk. If DISK_EVENT_MEDIA_CHANGE is present,
* attempt to free all dentries and inodes and invalidates all block
* device page cache entries in that case.
*
* Returns %true if DISK_EVENT_MEDIA_CHANGE was raised, or %false if not.
*/
bool disk_force_media_change(struct gendisk *disk, unsigned int events)
{
disk_event_uevent(disk, events);
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return false;
if (__invalidate_device(disk->part0, true))
pr_warn("VFS: busy inodes on changed media %s\n",
disk->disk_name);
set_bit(GD_NEED_PART_SCAN, &disk->state);
return true;
}
EXPORT_SYMBOL_GPL(disk_force_media_change);
/*
* Separate this part out so that a different pointer for clearing_ptr can be
* passed in for disk_clear_events.
@ -410,17 +444,17 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
/*
* disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
*/
void disk_alloc_events(struct gendisk *disk)
int disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
if (!disk->fops->check_events || !disk->events)
return;
return 0;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev) {
pr_warn("%s: failed to initialize events\n", disk->disk_name);
return;
return -ENOMEM;
}
INIT_LIST_HEAD(&ev->node);
@ -432,6 +466,7 @@ void disk_alloc_events(struct gendisk *disk)
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
disk->ev = ev;
return 0;
}
void disk_add_events(struct gendisk *disk)

View File

@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}
@ -630,6 +633,9 @@ static inline bool elv_support_iosched(struct request_queue *q)
*/
static struct elevator_type *elevator_get_default(struct request_queue *q)
{
if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
return NULL;
if (q->nr_hw_queues != 1 &&
!blk_mq_is_sbitmap_shared(q->tag_set->flags))
return NULL;
@ -702,7 +708,6 @@ void elevator_init_mq(struct request_queue *q)
elevator_put(e);
}
}
EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -

View File

@ -29,6 +29,23 @@
static struct kobject *block_depr;
/*
* Unique, monotonically increasing sequential number associated with block
* devices instances (i.e. incremented each time a device is attached).
* Associating uevents with block devices in userspace is difficult and racy:
* the uevent netlink socket is lossy, and on slow and overloaded systems has
* a very high latency.
* Block devices do not have exclusive owners in userspace, any process can set
* one up (e.g. loop devices). Moreover, device names can be reused (e.g. loop0
* can be reused again and again).
* A userspace process setting up a block device and watching for its events
* cannot thus reliably tell whether an event relates to the device it just set
* up or another earlier instance with the same name.
* This sequential number allows userspace processes to solve this problem, and
* uniquely associate an uevent to the lifetime to a device.
*/
static atomic64_t diskseq;
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
static DEFINE_IDA(ext_devt_ida);
@ -60,7 +77,8 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
* initial capacity during probing.
*/
if (size == capacity ||
(disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
!disk_live(disk) ||
(disk->flags & GENHD_FL_HIDDEN))
return false;
pr_info("%s: detected capacity change from %lld to %lld\n",
@ -78,11 +96,17 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
EXPORT_SYMBOL_GPL(set_capacity_and_notify);
/*
* Format the device name of the indicated disk into the supplied buffer and
* return a pointer to that same buffer for convenience.
* Format the device name of the indicated block device into the supplied buffer
* and return a pointer to that same buffer for convenience.
*
* Note: do not use this in new code, use the %pg specifier to sprintf and
* printk insted.
*/
char *disk_name(struct gendisk *hd, int partno, char *buf)
const char *bdevname(struct block_device *bdev, char *buf)
{
struct gendisk *hd = bdev->bd_disk;
int partno = bdev->bd_partno;
if (!partno)
snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
@ -92,11 +116,6 @@ char *disk_name(struct gendisk *hd, int partno, char *buf)
return buf;
}
const char *bdevname(struct block_device *bdev, char *buf)
{
return disk_name(bdev->bd_disk, bdev->bd_partno, buf);
}
EXPORT_SYMBOL(bdevname);
static void part_stat_read_all(struct block_device *part,
@ -294,54 +313,19 @@ void unregister_blkdev(unsigned int major, const char *name)
EXPORT_SYMBOL(unregister_blkdev);
/**
* blk_mangle_minor - scatter minor numbers apart
* @minor: minor number to mangle
*
* Scatter consecutively allocated @minor number apart if MANGLE_DEVT
* is enabled. Mangling twice gives the original value.
*
* RETURNS:
* Mangled value.
*
* CONTEXT:
* Don't care.
*/
static int blk_mangle_minor(int minor)
{
#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
int i;
for (i = 0; i < MINORBITS / 2; i++) {
int low = minor & (1 << i);
int high = minor & (1 << (MINORBITS - 1 - i));
int distance = MINORBITS - 1 - 2 * i;
minor ^= low | high; /* clear both bits */
low <<= distance; /* swap the positions */
high >>= distance;
minor |= low | high; /* and set */
}
#endif
return minor;
}
int blk_alloc_ext_minor(void)
{
int idx;
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
if (idx < 0) {
if (idx == -ENOSPC)
return -EBUSY;
return idx;
}
return blk_mangle_minor(idx);
if (idx == -ENOSPC)
return -EBUSY;
return idx;
}
void blk_free_ext_minor(unsigned int minor)
{
ida_free(&ext_devt_ida, blk_mangle_minor(minor));
ida_free(&ext_devt_ida, minor);
}
static char *bdevt_str(dev_t devt, char *buf)
@ -390,78 +374,20 @@ static void disk_scan_partitions(struct gendisk *disk)
blkdev_put(bdev, FMODE_READ);
}
static void register_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
int err;
ddev->parent = parent;
dev_set_name(ddev, "%s", disk->disk_name);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
if (groups) {
WARN_ON(ddev->groups);
ddev->groups = groups;
}
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
err = sysfs_create_link(block_depr, &ddev->kobj,
kobject_name(&ddev->kobj));
if (err) {
device_del(ddev);
return;
}
}
/*
* avoid probable deadlock caused by allocating memory with
* GFP_KERNEL in runtime_resume callback of its all ancestor
* devices
*/
pm_runtime_set_memalloc_noio(ddev, true);
disk->part0->bd_holder_dir =
kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
if (disk->flags & GENHD_FL_HIDDEN)
return;
disk_scan_partitions(disk);
/* announce the disk and partitions after all partitions are created */
dev_set_uevent_suppress(ddev, 0);
disk_uevent(disk, KOBJ_ADD);
if (disk->queue->backing_dev_info->dev) {
err = sysfs_create_link(&ddev->kobj,
&disk->queue->backing_dev_info->dev->kobj,
"bdi");
WARN_ON(err);
}
}
/**
* __device_add_disk - add disk information to kernel list
* device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
* @groups: Additional per-device sysfs groups
* @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
* with the kernel.
*
* FIXME: error handling
*/
static void __device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups,
bool register_queue)
int device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
int ret;
/*
@ -470,8 +396,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
* elevator if one is needed, that is, for devices requesting queue
* registration.
*/
if (register_queue)
elevator_init_mq(disk->queue);
elevator_init_mq(disk->queue);
/*
* If the driver provides an explicit major number it also must provide
@ -481,7 +406,8 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
* and all partitions from the extended dev_t space.
*/
if (disk->major) {
WARN_ON(!disk->minors);
if (WARN_ON(!disk->minors))
return -EINVAL;
if (disk->minors > DISK_MAX_PARTS) {
pr_err("block: can't allocate more than %d partitions\n",
@ -489,21 +415,65 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
disk->minors = DISK_MAX_PARTS;
}
} else {
WARN_ON(disk->minors);
if (WARN_ON(disk->minors))
return -EINVAL;
ret = blk_alloc_ext_minor();
if (ret < 0) {
WARN_ON(1);
return;
}
if (ret < 0)
return ret;
disk->major = BLOCK_EXT_MAJOR;
disk->first_minor = MINOR(ret);
disk->first_minor = ret;
disk->flags |= GENHD_FL_EXT_DEVT;
}
disk->flags |= GENHD_FL_UP;
ret = disk_alloc_events(disk);
if (ret)
goto out_free_ext_minor;
disk_alloc_events(disk);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
ddev->parent = parent;
ddev->groups = groups;
dev_set_name(ddev, "%s", disk->disk_name);
if (!(disk->flags & GENHD_FL_HIDDEN))
ddev->devt = MKDEV(disk->major, disk->first_minor);
ret = device_add(ddev);
if (ret)
goto out_disk_release_events;
if (!sysfs_deprecated) {
ret = sysfs_create_link(block_depr, &ddev->kobj,
kobject_name(&ddev->kobj));
if (ret)
goto out_device_del;
}
/*
* avoid probable deadlock caused by allocating memory with
* GFP_KERNEL in runtime_resume callback of its all ancestor
* devices
*/
pm_runtime_set_memalloc_noio(ddev, true);
ret = blk_integrity_add(disk);
if (ret)
goto out_del_block_link;
disk->part0->bd_holder_dir =
kobject_create_and_add("holders", &ddev->kobj);
if (!disk->part0->bd_holder_dir)
goto out_del_integrity;
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
if (!disk->slave_dir)
goto out_put_holder_dir;
ret = bd_register_pending_holders(disk);
if (ret < 0)
goto out_put_slave_dir;
ret = blk_register_queue(disk);
if (ret)
goto out_put_slave_dir;
if (disk->flags & GENHD_FL_HIDDEN) {
/*
@ -513,48 +483,56 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->flags |= GENHD_FL_NO_PART_SCAN;
} else {
struct backing_dev_info *bdi = disk->queue->backing_dev_info;
struct device *dev = disk_to_dev(disk);
/* Register BDI before referencing it from bdev */
dev->devt = MKDEV(disk->major, disk->first_minor);
ret = bdi_register(bdi, "%u:%u",
ret = bdi_register(disk->bdi, "%u:%u",
disk->major, disk->first_minor);
WARN_ON(ret);
bdi_set_owner(bdi, dev);
bdev_add(disk->part0, dev->devt);
if (ret)
goto out_unregister_queue;
bdi_set_owner(disk->bdi, ddev);
ret = sysfs_create_link(&ddev->kobj,
&disk->bdi->dev->kobj, "bdi");
if (ret)
goto out_unregister_bdi;
bdev_add(disk->part0, ddev->devt);
disk_scan_partitions(disk);
/*
* Announce the disk and partitions after all partitions are
* created. (for hidden disks uevents remain suppressed forever)
*/
dev_set_uevent_suppress(ddev, 0);
disk_uevent(disk, KOBJ_ADD);
}
register_disk(parent, disk, groups);
if (register_queue)
blk_register_queue(disk);
/*
* Take an extra ref on queue which will be put on disk_release()
* so that it sticks around as long as @disk is there.
*/
if (blk_get_queue(disk->queue))
set_bit(GD_QUEUE_REF, &disk->state);
else
WARN_ON_ONCE(1);
disk_update_readahead(disk);
disk_add_events(disk);
blk_integrity_add(disk);
}
return 0;
void device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups)
{
__device_add_disk(parent, disk, groups, true);
out_unregister_bdi:
if (!(disk->flags & GENHD_FL_HIDDEN))
bdi_unregister(disk->bdi);
out_unregister_queue:
blk_unregister_queue(disk);
out_put_slave_dir:
kobject_put(disk->slave_dir);
out_put_holder_dir:
kobject_put(disk->part0->bd_holder_dir);
out_del_integrity:
blk_integrity_del(disk);
out_del_block_link:
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(ddev));
out_device_del:
device_del(ddev);
out_disk_release_events:
disk_release_events(disk);
out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor);
return WARN_ON_ONCE(ret); /* keep until all callers handle errors */
}
EXPORT_SYMBOL(device_add_disk);
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
{
__device_add_disk(parent, disk, NULL, false);
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
/**
* del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove
@ -578,26 +556,20 @@ void del_gendisk(struct gendisk *disk)
{
might_sleep();
if (WARN_ON_ONCE(!disk->queue))
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
return;
blk_integrity_del(disk);
disk_del_events(disk);
mutex_lock(&disk->open_mutex);
disk->flags &= ~GENHD_FL_UP;
remove_inode_hash(disk->part0->bd_inode);
blk_drop_partitions(disk);
mutex_unlock(&disk->open_mutex);
fsync_bdev(disk->part0);
__invalidate_device(disk->part0, true);
/*
* Unhash the bdev inode for this device so that it can't be looked
* up any more even if openers still hold references to it.
*/
remove_inode_hash(disk->part0->bd_inode);
set_capacity(disk, 0);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
@ -607,7 +579,7 @@ void del_gendisk(struct gendisk *disk)
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
bdi_unregister(disk->queue->backing_dev_info);
bdi_unregister(disk->bdi);
}
blk_unregister_queue(disk);
@ -683,7 +655,6 @@ void __init printk_all_partitions(void)
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
struct block_device *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
unsigned long idx;
@ -703,11 +674,10 @@ void __init printk_all_partitions(void)
xa_for_each(&disk->part_tbl, idx, part) {
if (!bdev_nr_sectors(part))
continue;
printk("%s%s %10llu %s %s",
printk("%s%s %10llu %pg %s",
bdev_is_partition(part) ? " " : "",
bdevt_str(part->bd_dev, devt_buf),
bdev_nr_sectors(part) >> 1,
disk_name(disk, part->bd_partno, name_buf),
bdev_nr_sectors(part) >> 1, part,
part->bd_meta_info ?
part->bd_meta_info->uuid : "");
if (bdev_is_partition(part))
@ -785,7 +755,6 @@ static int show_partition(struct seq_file *seqf, void *v)
struct gendisk *sgp = v;
struct block_device *part;
unsigned long idx;
char buf[BDEVNAME_SIZE];
/* Don't show non-partitionable removeable devices or empty devices */
if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
@ -798,10 +767,9 @@ static int show_partition(struct seq_file *seqf, void *v)
xa_for_each(&sgp->part_tbl, idx, part) {
if (!bdev_nr_sectors(part))
continue;
seq_printf(seqf, "%4d %7d %10llu %s\n",
seq_printf(seqf, "%4d %7d %10llu %pg\n",
MAJOR(part->bd_dev), MINOR(part->bd_dev),
bdev_nr_sectors(part) >> 1,
disk_name(sgp, part->bd_partno, buf));
bdev_nr_sectors(part) >> 1, part);
}
rcu_read_unlock();
return 0;
@ -968,6 +936,14 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
}
static ssize_t diskseq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%llu\n", disk->diskseq);
}
static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
@ -980,6 +956,7 @@ static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
ssize_t part_fail_show(struct device *dev,
@ -1025,6 +1002,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_events.attr,
&dev_attr_events_async.attr,
&dev_attr_events_poll_msecs.attr,
&dev_attr_diskseq.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
@ -1074,17 +1052,24 @@ static void disk_release(struct device *dev)
might_sleep();
if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
blk_free_ext_minor(MINOR(dev->devt));
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
blk_put_queue(disk->queue);
bdput(disk->part0); /* frees the disk */
disk->queue->disk = NULL;
blk_put_queue(disk->queue);
iput(disk->part0->bd_inode); /* frees the disk */
}
static int block_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct gendisk *disk = dev_to_disk(dev);
return add_uevent_var(env, "DISKSEQ=%llu", disk->diskseq);
}
struct class block_class = {
.name = "block",
.dev_uevent = block_uevent,
};
static char *block_devnode(struct device *dev, umode_t *mode,
@ -1116,7 +1101,6 @@ static int diskstats_show(struct seq_file *seqf, void *v)
{
struct gendisk *gp = v;
struct block_device *hd;
char buf[BDEVNAME_SIZE];
unsigned int inflight;
struct disk_stats stat;
unsigned long idx;
@ -1139,15 +1123,14 @@ static int diskstats_show(struct seq_file *seqf, void *v)
else
inflight = part_in_flight(hd);
seq_printf(seqf, "%4d %7d %s "
seq_printf(seqf, "%4d %7d %pg "
"%lu %lu %lu %u "
"%lu %lu %lu %u "
"%u %u %u "
"%lu %lu %lu %u "
"%lu %u"
"\n",
MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
disk_name(gp, hd->bd_partno, buf),
MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd,
stat.ios[STAT_READ],
stat.merges[STAT_READ],
stat.sectors[STAT_READ],
@ -1239,17 +1222,25 @@ dev_t blk_lookup_devt(const char *name, int partno)
return devt;
}
struct gendisk *__alloc_disk_node(int minors, int node_id)
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass)
{
struct gendisk *disk;
if (!blk_get_queue(q))
return NULL;
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (!disk)
return NULL;
goto out_put_queue;
disk->bdi = bdi_alloc(node_id);
if (!disk->bdi)
goto out_free_disk;
disk->part0 = bdev_alloc(disk, 0);
if (!disk->part0)
goto out_free_disk;
goto out_free_bdi;
disk->node_id = node_id;
mutex_init(&disk->open_mutex);
@ -1257,23 +1248,33 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
goto out_destroy_part_tbl;
disk->minors = minors;
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class;
disk_to_dev(disk)->type = &disk_type;
device_initialize(disk_to_dev(disk));
inc_diskseq(disk);
disk->queue = q;
q->disk = disk;
lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
INIT_LIST_HEAD(&disk->slave_bdevs);
#endif
return disk;
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
bdput(disk->part0);
iput(disk->part0->bd_inode);
out_free_bdi:
bdi_put(disk->bdi);
out_free_disk:
kfree(disk);
out_put_queue:
blk_put_queue(q);
return NULL;
}
EXPORT_SYMBOL(__alloc_disk_node);
struct gendisk *__blk_alloc_disk(int node)
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
{
struct request_queue *q;
struct gendisk *disk;
@ -1282,12 +1283,11 @@ struct gendisk *__blk_alloc_disk(int node)
if (!q)
return NULL;
disk = __alloc_disk_node(0, node);
disk = __alloc_disk_node(q, node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
return NULL;
}
disk->queue = q;
return disk;
}
EXPORT_SYMBOL(__blk_alloc_disk);
@ -1362,3 +1362,8 @@ int bdev_read_only(struct block_device *bdev)
return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}
EXPORT_SYMBOL(bdev_read_only);
void inc_diskseq(struct gendisk *disk)
{
disk->diskseq = atomic64_inc_return(&diskseq);
}

174
block/holder.c Normal file
View File

@ -0,0 +1,174 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/genhd.h>
struct bd_holder_disk {
struct list_head list;
struct block_device *bdev;
int refcnt;
};
static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
struct gendisk *disk)
{
struct bd_holder_disk *holder;
list_for_each_entry(holder, &disk->slave_bdevs, list)
if (holder->bdev == bdev)
return holder;
return NULL;
}
static int add_symlink(struct kobject *from, struct kobject *to)
{
return sysfs_create_link(from, to, kobject_name(to));
}
static void del_symlink(struct kobject *from, struct kobject *to)
{
sysfs_remove_link(from, kobject_name(to));
}
static int __link_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
int ret;
ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
if (ret)
return ret;
ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
if (ret)
del_symlink(disk->slave_dir, bdev_kobj(bdev));
return ret;
}
/**
* bd_link_disk_holder - create symlinks between holding disk and slave bdev
* @bdev: the claimed slave bdev
* @disk: the holding disk
*
* DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
*
* This functions creates the following sysfs symlinks.
*
* - from "slaves" directory of the holder @disk to the claimed @bdev
* - from "holders" directory of the @bdev to the holder @disk
*
* For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
* passed to bd_link_disk_holder(), then:
*
* /sys/block/dm-0/slaves/sda --> /sys/block/sda
* /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
*
* The caller must have claimed @bdev before calling this function and
* ensure that both @bdev and @disk are valid during the creation and
* lifetime of these symlinks.
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
struct bd_holder_disk *holder;
int ret = 0;
mutex_lock(&disk->open_mutex);
WARN_ON_ONCE(!bdev->bd_holder);
/* FIXME: remove the following once add_disk() handles errors */
if (WARN_ON(!bdev->bd_holder_dir))
goto out_unlock;
holder = bd_find_holder_disk(bdev, disk);
if (holder) {
holder->refcnt++;
goto out_unlock;
}
holder = kzalloc(sizeof(*holder), GFP_KERNEL);
if (!holder) {
ret = -ENOMEM;
goto out_unlock;
}
INIT_LIST_HEAD(&holder->list);
holder->bdev = bdev;
holder->refcnt = 1;
if (disk->slave_dir) {
ret = __link_disk_holder(bdev, disk);
if (ret) {
kfree(holder);
goto out_unlock;
}
}
list_add(&holder->list, &disk->slave_bdevs);
/*
* del_gendisk drops the initial reference to bd_holder_dir, so we need
* to keep our own here to allow for cleanup past that point.
*/
kobject_get(bdev->bd_holder_dir);
out_unlock:
mutex_unlock(&disk->open_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
static void __unlink_disk_holder(struct block_device *bdev,
struct gendisk *disk)
{
del_symlink(disk->slave_dir, bdev_kobj(bdev));
del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
}
/**
* bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
* @bdev: the calimed slave bdev
* @disk: the holding disk
*
* DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
*
* CONTEXT:
* Might sleep.
*/
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
struct bd_holder_disk *holder;
mutex_lock(&disk->open_mutex);
holder = bd_find_holder_disk(bdev, disk);
if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
if (disk->slave_dir)
__unlink_disk_holder(bdev, disk);
kobject_put(bdev->bd_holder_dir);
list_del_init(&holder->list);
kfree(holder);
}
mutex_unlock(&disk->open_mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
int bd_register_pending_holders(struct gendisk *disk)
{
struct bd_holder_disk *holder;
int ret;
mutex_lock(&disk->open_mutex);
list_for_each_entry(holder, &disk->slave_bdevs, list) {
ret = __link_disk_holder(holder->bdev, disk);
if (ret)
goto out_undo;
}
mutex_unlock(&disk->open_mutex);
return 0;
out_undo:
list_for_each_entry_continue_reverse(holder, &disk->slave_bdevs, list)
__unlink_disk_holder(holder->bdev, disk);
mutex_unlock(&disk->open_mutex);
return ret;
}

View File

@ -16,6 +16,7 @@
static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition __user *upart, int op)
{
struct gendisk *disk = bdev->bd_disk;
struct blkpg_partition p;
long long start, length;
@ -30,7 +31,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
return -EINVAL;
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(bdev, p.pno);
return bdev_del_partition(disk, p.pno);
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;
@ -40,9 +41,9 @@ static int blkpg_do_ioctl(struct block_device *bdev,
/* check if partition is aligned to blocksize */
if (p.start & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
return bdev_add_partition(bdev, p.pno, start, length);
return bdev_add_partition(disk, p.pno, start, length);
case BLKPG_RESIZE_PARTITION:
return bdev_resize_partition(bdev, p.pno, start, length);
return bdev_resize_partition(disk, p.pno, start, length);
default:
return -EINVAL;
}
@ -469,6 +470,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
BLKDEV_DISCARD_SECURE);
case BLKZEROOUT:
return blk_ioctl_zeroout(bdev, mode, arg);
case BLKGETDISKSEQ:
return put_u64(argp, bdev->bd_disk->diskseq);
case BLKREPORTZONE:
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
@ -504,7 +507,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
return blkdev_reread_part(bdev, mode);
@ -554,7 +557,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKFRAGET:
if (!argp)
return -EINVAL;
return put_long(argp, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
return put_long(argp,
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
@ -626,7 +630,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!argp)
return -EINVAL;
return compat_put_long(argp,
(bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)

View File

@ -74,9 +74,8 @@ int ioprio_check_cap(int ioprio)
fallthrough;
/* rt has prio field too */
case IOPRIO_CLASS_BE:
if (data >= IOPRIO_BE_NR || data < 0)
if (data >= IOPRIO_NR_LEVELS || data < 0)
return -EINVAL;
break;
case IOPRIO_CLASS_IDLE:
break;
@ -171,7 +170,7 @@ static int get_task_ioprio(struct task_struct *p)
ret = security_task_getioprio(p);
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
ret = IOPRIO_DEFAULT;
task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
@ -183,9 +182,9 @@ out:
int ioprio_best(unsigned short aprio, unsigned short bprio)
{
if (!ioprio_valid(aprio))
aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
aprio = IOPRIO_DEFAULT;
if (!ioprio_valid(bprio))
bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
bprio = IOPRIO_DEFAULT;
return min(aprio, bprio);
}

View File

@ -629,6 +629,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
if (elv_bio_merge_ok(__rq, bio)) {
*rq = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
}

View File

@ -264,7 +264,6 @@ config SYSV68_PARTITION
config CMDLINE_PARTITION
bool "Command line partition support" if PARTITION_ADVANCED
select BLK_CMDLINE_PARSER
help
Say Y here if you want to read the partition table from bootargs.
The format for the command line is just like mtdparts.

View File

@ -275,7 +275,7 @@ int adfspart_check_ADFS(struct parsed_partitions *state)
/*
* Work out start of non-adfs partition.
*/
nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
nr_sects = get_capacity(state->disk) - start_sect;
if (start_sect) {
switch (id) {
@ -540,7 +540,7 @@ int adfspart_check_EESOX(struct parsed_partitions *state)
if (i != 0) {
sector_t size;
size = get_capacity(state->bdev->bd_disk);
size = get_capacity(state->disk);
put_partition(state, slot++, start, size - start);
strlcat(state->pp_buf, "\n", PAGE_SIZE);
}

View File

@ -66,22 +66,6 @@ struct pvd {
#define LVM_MAXLVS 256
/**
* last_lba(): return number of last logical block of device
* @bdev: block device
*
* Description: Returns last LBA value on success, 0 on error.
* This is stored (by sd and ide-geometry) in
* the part[0] entry for this disk, and is the number of
* physical sectors available on the disk.
*/
static u64 last_lba(struct block_device *bdev)
{
if (!bdev || !bdev->bd_inode)
return 0;
return (bdev->bd_inode->i_size >> 9) - 1ULL;
}
/**
* read_lba(): Read bytes from disk, starting at given LBA
* @state
@ -89,7 +73,7 @@ static u64 last_lba(struct block_device *bdev)
* @buffer
* @count
*
* Description: Reads @count bytes from @state->bdev into @buffer.
* Description: Reads @count bytes from @state->disk into @buffer.
* Returns number of bytes read on success, 0 on error.
*/
static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
@ -97,7 +81,7 @@ static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
{
size_t totalreadcount = 0;
if (!buffer || lba + count / 512 > last_lba(state->bdev))
if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
return 0;
while (count) {

View File

@ -34,7 +34,6 @@ int amiga_partition(struct parsed_partitions *state)
int start_sect, nr_sects, blk, part, res = 0;
int blksize = 1; /* Multiplier for disk block size */
int slot = 1;
char b[BDEVNAME_SIZE];
for (blk = 0; ; blk++, put_dev_sector(sect)) {
if (blk == RDB_ALLOCATION_LIMIT)
@ -42,7 +41,7 @@ int amiga_partition(struct parsed_partitions *state)
data = read_part_sector(state, blk, &sect);
if (!data) {
pr_err("Dev %s: unable to read RDB block %d\n",
bdevname(state->bdev, b), blk);
state->disk->disk_name, blk);
res = -1;
goto rdb_done;
}
@ -64,7 +63,7 @@ int amiga_partition(struct parsed_partitions *state)
}
pr_err("Dev %s: RDB in block %d has bad checksum\n",
bdevname(state->bdev, b), blk);
state->disk->disk_name, blk);
}
/* blksize is blocks per 512 byte standard block */
@ -84,7 +83,7 @@ int amiga_partition(struct parsed_partitions *state)
data = read_part_sector(state, blk, &sect);
if (!data) {
pr_err("Dev %s: unable to read partition block %d\n",
bdevname(state->bdev, b), blk);
state->disk->disk_name, blk);
res = -1;
goto rdb_done;
}

View File

@ -47,7 +47,7 @@ int atari_partition(struct parsed_partitions *state)
* ATARI partition scheme supports 512 lba only. If this is not
* the case, bail early to avoid miscalculating hd_size.
*/
if (bdev_logical_block_size(state->bdev) != 512)
if (queue_logical_block_size(state->disk->queue) != 512)
return 0;
rs = read_part_sector(state, 0, &sect);
@ -55,7 +55,7 @@ int atari_partition(struct parsed_partitions *state)
return -1;
/* Verify this is an Atari rootsector: */
hd_size = state->bdev->bd_inode->i_size >> 9;
hd_size = get_capacity(state->disk);
if (!VALID_PARTITION(&rs->part[0], hd_size) &&
!VALID_PARTITION(&rs->part[1], hd_size) &&
!VALID_PARTITION(&rs->part[2], hd_size) &&

View File

@ -9,7 +9,7 @@
* description.
*/
struct parsed_partitions {
struct block_device *bdev;
struct gendisk *disk;
char name[BDEVNAME_SIZE];
struct {
sector_t from;

View File

@ -14,20 +14,248 @@
* For further information, see "Documentation/block/cmdline-partition.rst"
*
*/
#include <linux/cmdline-parser.h>
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include "check.h"
/* partition flags */
#define PF_RDONLY 0x01 /* Device is read only */
#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
struct cmdline_subpart {
char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
sector_t from;
sector_t size;
int flags;
struct cmdline_subpart *next_subpart;
};
struct cmdline_parts {
char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
unsigned int nr_subparts;
struct cmdline_subpart *subpart;
struct cmdline_parts *next_parts;
};
static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
{
int ret = 0;
struct cmdline_subpart *new_subpart;
*subpart = NULL;
new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
if (!new_subpart)
return -ENOMEM;
if (*partdef == '-') {
new_subpart->size = (sector_t)(~0ULL);
partdef++;
} else {
new_subpart->size = (sector_t)memparse(partdef, &partdef);
if (new_subpart->size < (sector_t)PAGE_SIZE) {
pr_warn("cmdline partition size is invalid.");
ret = -EINVAL;
goto fail;
}
}
if (*partdef == '@') {
partdef++;
new_subpart->from = (sector_t)memparse(partdef, &partdef);
} else {
new_subpart->from = (sector_t)(~0ULL);
}
if (*partdef == '(') {
int length;
char *next = strchr(++partdef, ')');
if (!next) {
pr_warn("cmdline partition format is invalid.");
ret = -EINVAL;
goto fail;
}
length = min_t(int, next - partdef,
sizeof(new_subpart->name) - 1);
strncpy(new_subpart->name, partdef, length);
new_subpart->name[length] = '\0';
partdef = ++next;
} else
new_subpart->name[0] = '\0';
new_subpart->flags = 0;
if (!strncmp(partdef, "ro", 2)) {
new_subpart->flags |= PF_RDONLY;
partdef += 2;
}
if (!strncmp(partdef, "lk", 2)) {
new_subpart->flags |= PF_POWERUP_LOCK;
partdef += 2;
}
*subpart = new_subpart;
return 0;
fail:
kfree(new_subpart);
return ret;
}
static void free_subpart(struct cmdline_parts *parts)
{
struct cmdline_subpart *subpart;
while (parts->subpart) {
subpart = parts->subpart;
parts->subpart = subpart->next_subpart;
kfree(subpart);
}
}
static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
{
int ret = -EINVAL;
char *next;
int length;
struct cmdline_subpart **next_subpart;
struct cmdline_parts *newparts;
char buf[BDEVNAME_SIZE + 32 + 4];
*parts = NULL;
newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
if (!newparts)
return -ENOMEM;
next = strchr(bdevdef, ':');
if (!next) {
pr_warn("cmdline partition has no block device.");
goto fail;
}
length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
strncpy(newparts->name, bdevdef, length);
newparts->name[length] = '\0';
newparts->nr_subparts = 0;
next_subpart = &newparts->subpart;
while (next && *(++next)) {
bdevdef = next;
next = strchr(bdevdef, ',');
length = (!next) ? (sizeof(buf) - 1) :
min_t(int, next - bdevdef, sizeof(buf) - 1);
strncpy(buf, bdevdef, length);
buf[length] = '\0';
ret = parse_subpart(next_subpart, buf);
if (ret)
goto fail;
newparts->nr_subparts++;
next_subpart = &(*next_subpart)->next_subpart;
}
if (!newparts->subpart) {
pr_warn("cmdline partition has no valid partition.");
ret = -EINVAL;
goto fail;
}
*parts = newparts;
return 0;
fail:
free_subpart(newparts);
kfree(newparts);
return ret;
}
static void cmdline_parts_free(struct cmdline_parts **parts)
{
struct cmdline_parts *next_parts;
while (*parts) {
next_parts = (*parts)->next_parts;
free_subpart(*parts);
kfree(*parts);
*parts = next_parts;
}
}
static int cmdline_parts_parse(struct cmdline_parts **parts,
const char *cmdline)
{
int ret;
char *buf;
char *pbuf;
char *next;
struct cmdline_parts **next_parts;
*parts = NULL;
next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
if (!buf)
return -ENOMEM;
next_parts = parts;
while (next && *pbuf) {
next = strchr(pbuf, ';');
if (next)
*next = '\0';
ret = parse_parts(next_parts, pbuf);
if (ret)
goto fail;
if (next)
pbuf = ++next;
next_parts = &(*next_parts)->next_parts;
}
if (!*parts) {
pr_warn("cmdline partition has no valid partition.");
ret = -EINVAL;
goto fail;
}
ret = 0;
done:
kfree(buf);
return ret;
fail:
cmdline_parts_free(parts);
goto done;
}
static struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
const char *bdev)
{
while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
parts = parts->next_parts;
return parts;
}
static char *cmdline;
static struct cmdline_parts *bdev_parts;
static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
static int add_part(int slot, struct cmdline_subpart *subpart,
struct parsed_partitions *state)
{
int label_min;
struct partition_meta_info *info;
char tmp[sizeof(info->volname) + 4];
struct parsed_partitions *state = (struct parsed_partitions *)param;
if (slot >= state->limit)
return 1;
@ -50,6 +278,35 @@ static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
return 0;
}
static int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
struct parsed_partitions *state)
{
sector_t from = 0;
struct cmdline_subpart *subpart;
int slot = 1;
for (subpart = parts->subpart; subpart;
subpart = subpart->next_subpart, slot++) {
if (subpart->from == (sector_t)(~0ULL))
subpart->from = from;
else
from = subpart->from;
if (from >= disk_size)
break;
if (subpart->size > (disk_size - from))
subpart->size = disk_size - from;
from += subpart->size;
if (add_part(slot, subpart, state))
break;
}
return slot;
}
static int __init cmdline_parts_setup(char *s)
{
cmdline = s;
@ -123,7 +380,6 @@ static void cmdline_parts_verifier(int slot, struct parsed_partitions *state)
int cmdline_partition(struct parsed_partitions *state)
{
sector_t disk_size;
char bdev[BDEVNAME_SIZE];
struct cmdline_parts *parts;
if (cmdline) {
@ -140,14 +396,13 @@ int cmdline_partition(struct parsed_partitions *state)
if (!bdev_parts)
return 0;
bdevname(state->bdev, bdev);
parts = cmdline_parts_find(bdev_parts, bdev);
parts = cmdline_parts_find(bdev_parts, state->disk->disk_name);
if (!parts)
return 0;
disk_size = get_capacity(state->bdev->bd_disk) << 9;
disk_size = get_capacity(state->disk) << 9;
cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
cmdline_parts_set(parts, disk_size, state);
cmdline_parts_verifier(1, state);
strlcat(state->pp_buf, "\n", PAGE_SIZE);

View File

@ -135,8 +135,8 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
}
state->pp_buf[0] = '\0';
state->bdev = hd->part0;
disk_name(hd, 0, state->name);
state->disk = hd;
snprintf(state->name, BDEVNAME_SIZE, "%s", hd->disk_name);
snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
@ -259,9 +259,8 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
blk_free_ext_minor(MINOR(dev->devt));
bdput(dev_to_bdev(dev));
put_disk(dev_to_bdev(dev)->bd_disk);
iput(dev_to_bdev(dev)->bd_inode);
}
static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
@ -281,12 +280,10 @@ struct device_type part_type = {
.uevent = part_uevent,
};
/*
* Must be called either with open_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static void delete_partition(struct block_device *part)
{
lockdep_assert_held(&part->bd_disk->open_mutex);
fsync_bdev(part);
__invalidate_device(part, true);
@ -351,20 +348,17 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
if (xa_load(&disk->part_tbl, partno))
return ERR_PTR(-EBUSY);
/* ensure we always have a reference to the whole disk */
get_device(disk_to_dev(disk));
err = -ENOMEM;
bdev = bdev_alloc(disk, partno);
if (!bdev)
return ERR_PTR(-ENOMEM);
goto out_put_disk;
bdev->bd_start_sect = start;
bdev_set_nr_sectors(bdev, len);
if (info) {
err = -ENOMEM;
bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
if (!bdev->bd_meta_info)
goto out_bdput;
}
pdev = &bdev->bd_device;
dname = dev_name(ddev);
if (isdigit(dname[strlen(dname) - 1]))
@ -388,6 +382,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
}
pdev->devt = devt;
if (info) {
err = -ENOMEM;
bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
if (!bdev->bd_meta_info)
goto out_put;
}
/* delay uevent until 'holders' subdir is created */
dev_set_uevent_suppress(pdev, 1);
err = device_add(pdev);
@ -417,14 +418,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
kobject_uevent(&pdev->kobj, KOBJ_ADD);
return bdev;
out_bdput:
bdput(bdev);
return ERR_PTR(err);
out_del:
kobject_put(bdev->bd_holder_dir);
device_del(pdev);
out_put:
put_device(pdev);
out_put_disk:
put_disk(disk);
return ERR_PTR(err);
}
@ -449,15 +449,14 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
return overlap;
}
int bdev_add_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length)
{
struct block_device *part;
struct gendisk *disk = bdev->bd_disk;
int ret;
mutex_lock(&disk->open_mutex);
if (!(disk->flags & GENHD_FL_UP)) {
if (!disk_live(disk)) {
ret = -ENXIO;
goto out;
}
@ -475,13 +474,13 @@ out:
return ret;
}
int bdev_del_partition(struct block_device *bdev, int partno)
int bdev_del_partition(struct gendisk *disk, int partno)
{
struct block_device *part = NULL;
int ret = -ENXIO;
mutex_lock(&bdev->bd_disk->open_mutex);
part = xa_load(&bdev->bd_disk->part_tbl, partno);
mutex_lock(&disk->open_mutex);
part = xa_load(&disk->part_tbl, partno);
if (!part)
goto out_unlock;
@ -492,18 +491,18 @@ int bdev_del_partition(struct block_device *bdev, int partno)
delete_partition(part);
ret = 0;
out_unlock:
mutex_unlock(&bdev->bd_disk->open_mutex);
mutex_unlock(&disk->open_mutex);
return ret;
}
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length)
{
struct block_device *part = NULL;
int ret = -ENXIO;
mutex_lock(&bdev->bd_disk->open_mutex);
part = xa_load(&bdev->bd_disk->part_tbl, partno);
mutex_lock(&disk->open_mutex);
part = xa_load(&disk->part_tbl, partno);
if (!part)
goto out_unlock;
@ -512,14 +511,14 @@ int bdev_resize_partition(struct block_device *bdev, int partno,
goto out_unlock;
ret = -EBUSY;
if (partition_overlaps(bdev->bd_disk, start, length, partno))
if (partition_overlaps(disk, start, length, partno))
goto out_unlock;
bdev_set_nr_sectors(part, length);
ret = 0;
out_unlock:
mutex_unlock(&bdev->bd_disk->open_mutex);
mutex_unlock(&disk->open_mutex);
return ret;
}
@ -667,7 +666,7 @@ int bdev_disk_changed(struct gendisk *disk, bool invalidate)
lockdep_assert_held(&disk->open_mutex);
if (!(disk->flags & GENHD_FL_UP))
if (!disk_live(disk))
return -ENXIO;
rescan:
@ -715,10 +714,10 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
struct address_space *mapping = state->bdev->bd_inode->i_mapping;
struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
struct page *page;
if (n >= get_capacity(state->bdev->bd_disk)) {
if (n >= get_capacity(state->disk)) {
state->access_beyond_eod = true;
return NULL;
}

View File

@ -124,19 +124,17 @@ efi_crc32(const void *buf, unsigned long len)
/**
* last_lba(): return number of last logical block of device
* @bdev: block device
* @disk: block device
*
* Description: Returns last LBA value on success, 0 on error.
* This is stored (by sd and ide-geometry) in
* the part[0] entry for this disk, and is the number of
* physical sectors available on the disk.
*/
static u64 last_lba(struct block_device *bdev)
static u64 last_lba(struct gendisk *disk)
{
if (!bdev || !bdev->bd_inode)
return 0;
return div_u64(bdev->bd_inode->i_size,
bdev_logical_block_size(bdev)) - 1ULL;
return div_u64(disk->part0->bd_inode->i_size,
queue_logical_block_size(disk->queue)) - 1ULL;
}
static inline int pmbr_part_valid(gpt_mbr_record *part)
@ -231,17 +229,17 @@ done:
* @buffer: destination buffer
* @count: bytes to read
*
* Description: Reads @count bytes from @state->bdev into @buffer.
* Description: Reads @count bytes from @state->disk into @buffer.
* Returns number of bytes read on success, 0 on error.
*/
static size_t read_lba(struct parsed_partitions *state,
u64 lba, u8 *buffer, size_t count)
{
size_t totalreadcount = 0;
struct block_device *bdev = state->bdev;
sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
sector_t n = lba *
(queue_logical_block_size(state->disk->queue) / 512);
if (!buffer || lba > last_lba(bdev))
if (!buffer || lba > last_lba(state->disk))
return 0;
while (count) {
@ -302,14 +300,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
* @lba: the Logical Block Address of the partition table
*
* Description: returns GPT header on success, NULL on error. Allocates
* and fills a GPT header starting at @ from @state->bdev.
* and fills a GPT header starting at @ from @state->disk.
* Note: remember to free gpt when finished with it.
*/
static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
u64 lba)
{
gpt_header *gpt;
unsigned ssz = bdev_logical_block_size(state->bdev);
unsigned ssz = queue_logical_block_size(state->disk->queue);
gpt = kmalloc(ssz, GFP_KERNEL);
if (!gpt)
@ -356,10 +354,10 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
/* Check the GUID Partition Table header size is too big */
if (le32_to_cpu((*gpt)->header_size) >
bdev_logical_block_size(state->bdev)) {
queue_logical_block_size(state->disk->queue)) {
pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
le32_to_cpu((*gpt)->header_size),
bdev_logical_block_size(state->bdev));
queue_logical_block_size(state->disk->queue));
goto fail;
}
@ -395,7 +393,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
/* Check the first_usable_lba and last_usable_lba are
* within the disk.
*/
lastlba = last_lba(state->bdev);
lastlba = last_lba(state->disk);
if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
(unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
@ -587,13 +585,15 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
gpt_header *pgpt = NULL, *agpt = NULL;
gpt_entry *pptes = NULL, *aptes = NULL;
legacy_mbr *legacymbr;
sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
struct gendisk *disk = state->disk;
const struct block_device_operations *fops = disk->fops;
sector_t total_sectors = get_capacity(state->disk);
u64 lastlba;
if (!ptes)
return 0;
lastlba = last_lba(state->bdev);
lastlba = last_lba(state->disk);
if (!force_gpt) {
/* This will be added to the EFI Spec. per Intel after v1.02. */
legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
@ -621,6 +621,16 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
if (!good_agpt && force_gpt)
good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
if (!good_agpt && force_gpt && fops->alternative_gpt_sector) {
sector_t agpt_sector;
int err;
err = fops->alternative_gpt_sector(disk, &agpt_sector);
if (!err)
good_agpt = is_gpt_valid(state, agpt_sector,
&agpt, &aptes);
}
/* The obviously unsuccessful case */
if (!good_pgpt && !good_agpt)
goto fail;
@ -705,7 +715,7 @@ int efi_partition(struct parsed_partitions *state)
gpt_header *gpt = NULL;
gpt_entry *ptes = NULL;
u32 i;
unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
unsigned ssz = queue_logical_block_size(state->disk->queue) / 512;
if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
@ -722,7 +732,7 @@ int efi_partition(struct parsed_partitions *state)
u64 size = le64_to_cpu(ptes[i].ending_lba) -
le64_to_cpu(ptes[i].starting_lba) + 1ULL;
if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
if (!is_pte_valid(&ptes[i], last_lba(state->disk)))
continue;
put_partition(state, i+1, start * ssz, size * ssz);

View File

@ -290,8 +290,8 @@ static int find_cms1_partitions(struct parsed_partitions *state,
int ibm_partition(struct parsed_partitions *state)
{
int (*fn)(struct gendisk *disk, dasd_information2_t *info);
struct block_device *bdev = state->bdev;
struct gendisk *disk = bdev->bd_disk;
struct gendisk *disk = state->disk;
struct block_device *bdev = disk->part0;
int blocksize, res;
loff_t i_size, offset, size;
dasd_information2_t *info;

View File

@ -304,7 +304,7 @@ static bool ldm_validate_privheads(struct parsed_partitions *state,
}
}
num_sects = state->bdev->bd_inode->i_size >> 9;
num_sects = get_capacity(state->disk);
if ((ph[0]->config_start > num_sects) ||
((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
@ -339,11 +339,11 @@ out:
/**
* ldm_validate_tocblocks - Validate the table of contents and its backups
* @state: Partition check state including device holding the LDM Database
* @base: Offset, into @state->bdev, of the database
* @base: Offset, into @state->disk, of the database
* @ldb: Cache of the database structures
*
* Find and compare the four tables of contents of the LDM Database stored on
* @state->bdev and return the parsed information into @toc1.
* @state->disk and return the parsed information into @toc1.
*
* The offsets and sizes of the configs are range-checked against a privhead.
*
@ -486,8 +486,8 @@ out:
* only likely to happen if the underlying device is strange. If that IS
* the case we should return zero to let someone else try.
*
* Return: 'true' @state->bdev is a dynamic disk
* 'false' @state->bdev is not a dynamic disk, or an error occurred
* Return: 'true' @state->disk is a dynamic disk
* 'false' @state->disk is not a dynamic disk, or an error occurred
*/
static bool ldm_validate_partition_table(struct parsed_partitions *state)
{
@ -1340,7 +1340,7 @@ static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
/**
* ldm_get_vblks - Read the on-disk database of VBLKs into memory
* @state: Partition check state including device holding the LDM Database
* @base: Offset, into @state->bdev, of the database
* @base: Offset, into @state->disk, of the database
* @ldb: Cache of the database structures
*
* To use the information from the VBLKs, they need to be read from the disk,
@ -1432,10 +1432,10 @@ static void ldm_free_vblks (struct list_head *lh)
* example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
* and so on: the actual data containing partitions.
*
* Return: 1 Success, @state->bdev is a dynamic disk and we handled it
* 0 Success, @state->bdev is not a dynamic disk
* Return: 1 Success, @state->disk is a dynamic disk and we handled it
* 0 Success, @state->disk is not a dynamic disk
* -1 An error occurred before enough information had been read
* Or @state->bdev is a dynamic disk, but it may be corrupted
* Or @state->disk is a dynamic disk, but it may be corrupted
*/
int ldm_partition(struct parsed_partitions *state)
{

View File

@ -133,7 +133,7 @@ int mac_partition(struct parsed_partitions *state)
}
#ifdef CONFIG_PPC_PMAC
if (found_root_goodness)
note_bootable_part(state->bdev->bd_dev, found_root,
note_bootable_part(state->disk->part0->bd_dev, found_root,
found_root_goodness);
#endif

View File

@ -135,11 +135,12 @@ static void parse_extended(struct parsed_partitions *state,
Sector sect;
unsigned char *data;
sector_t this_sector, this_size;
sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
sector_t sector_size;
int loopct = 0; /* number of links followed
without finding a data partition */
int i;
sector_size = queue_logical_block_size(state->disk->queue) / 512;
this_sector = first_sector;
this_size = first_size;
@ -579,7 +580,7 @@ static struct {
int msdos_partition(struct parsed_partitions *state)
{
sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
sector_t sector_size;
Sector sect;
unsigned char *data;
struct msdos_partition *p;
@ -587,6 +588,7 @@ int msdos_partition(struct parsed_partitions *state)
int slot;
u32 disksig;
sector_size = queue_logical_block_size(state->disk->queue) / 512;
data = read_part_sector(state, 0, &sect);
if (!data)
return -1;

View File

@ -43,7 +43,6 @@ int sgi_partition(struct parsed_partitions *state)
Sector sect;
struct sgi_disklabel *label;
struct sgi_partition *p;
char b[BDEVNAME_SIZE];
label = read_part_sector(state, 0, &sect);
if (!label)
@ -52,7 +51,7 @@ int sgi_partition(struct parsed_partitions *state)
magic = label->magic_mushroom;
if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
/*printk("Dev %s SGI disklabel: bad magic %08x\n",
bdevname(bdev, b), be32_to_cpu(magic));*/
state->disk->disk_name, be32_to_cpu(magic));*/
put_dev_sector(sect);
return 0;
}
@ -63,7 +62,7 @@ int sgi_partition(struct parsed_partitions *state)
}
if(csum) {
printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
bdevname(state->bdev, b));
state->disk->disk_name);
put_dev_sector(sect);
return 0;
}

View File

@ -65,7 +65,6 @@ int sun_partition(struct parsed_partitions *state)
} * label;
struct sun_partition *p;
unsigned long spc;
char b[BDEVNAME_SIZE];
int use_vtoc;
int nparts;
@ -76,7 +75,7 @@ int sun_partition(struct parsed_partitions *state)
p = label->partitions;
if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
bdevname(bdev, b), be16_to_cpu(label->magic)); */
state->disk->disk_name, be16_to_cpu(label->magic)); */
put_dev_sector(sect);
return 0;
}
@ -86,7 +85,7 @@ int sun_partition(struct parsed_partitions *state)
csum ^= *ush--;
if (csum) {
printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
bdevname(state->bdev, b));
state->disk->disk_name);
put_dev_sector(sect);
return 0;
}

View File

@ -147,11 +147,10 @@ static void t10_pi_type1_prepare(struct request *rq)
break;
bip_for_each_vec(iv, bip, iter) {
void *p, *pmap;
unsigned int j;
void *p;
pmap = kmap_atomic(iv.bv_page);
p = pmap + iv.bv_offset;
p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len; j += tuple_sz) {
struct t10_pi_tuple *pi = p;
@ -161,8 +160,7 @@ static void t10_pi_type1_prepare(struct request *rq)
ref_tag++;
p += tuple_sz;
}
kunmap_atomic(pmap);
kunmap_local(p);
}
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
@ -195,11 +193,10 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
struct bvec_iter iter;
bip_for_each_vec(iv, bip, iter) {
void *p, *pmap;
unsigned int j;
void *p;
pmap = kmap_atomic(iv.bv_page);
p = pmap + iv.bv_offset;
p = bvec_kmap_local(&iv);
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
struct t10_pi_tuple *pi = p;
@ -210,8 +207,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
intervals--;
p += tuple_sz;
}
kunmap_atomic(pmap);
kunmap_local(p);
}
}
}

View File

@ -27,9 +27,6 @@
#include <linux/uaccess.h>
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
/*
* Each block ramdisk device has a radix_tree brd_pages of pages that stores
* the pages containing the block device's contents. A brd page's ->index is

View File

@ -1364,7 +1364,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
blk_queue_update_readahead(q);
disk_update_readahead(device->vdisk);
}
fixup_discard_if_not_supported(q);
fixup_write_zeroes(device, q);

View File

@ -905,13 +905,12 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
struct backing_dev_info *bdi;
int stripe_shift;
switch (rbm) {
case RB_CONGESTED_REMOTE:
bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
return bdi_read_congested(
device->ldev->backing_bdev->bd_disk->bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);

View File

@ -774,6 +774,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_err;
/* and ... switch */
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
@ -1257,6 +1258,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
goto out_unlock;
}
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
@ -1304,10 +1306,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
/* Grab the block_device to prevent its destruction after we
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
@ -1398,7 +1396,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
blk_queue_physical_block_size(lo->lo_queue, 512);
blk_queue_io_min(lo->lo_queue, 512);
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
bdev->bd_inode->i_mapping->wb_err = 0;
}
@ -1415,6 +1412,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan) {
@ -2335,7 +2333,8 @@ static int loop_add(int i)
lo->tag_set.queue_depth = 128;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
@ -2391,6 +2390,8 @@ static int loop_add(int i)
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
add_disk(disk);
mutex_unlock(&loop_ctl_mutex);

View File

@ -11,10 +11,6 @@
#include <linux/init.h>
#include "null_blk.h"
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
#define SECTOR_MASK (PAGE_SECTORS - 1)
#define FREE_BATCH 16
#define TICKS_PER_SEC 50ULL
@ -1721,8 +1717,7 @@ static int null_gendisk_register(struct nullb *nullb)
return ret;
}
add_disk(disk);
return 0;
return add_disk(disk);
}
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)

View File

@ -1183,10 +1183,8 @@ try_next_bio:
wakeup = (pd->write_congestion_on > 0
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
if (wakeup)
clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
@ -2366,7 +2364,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);

View File

@ -83,26 +83,12 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
unsigned int offset = 0;
struct req_iterator iter;
struct bio_vec bvec;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
__func__, __LINE__, i, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
size = bvec.bv_len;
buf = bvec_kmap_irq(&bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bvec.bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
}
}

View File

@ -541,7 +541,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
char *ptr = bvec_virt(&bvec);
size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,

View File

@ -1219,24 +1219,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
rbd_dev->mapping.size = 0;
}
static void zero_bvec(struct bio_vec *bv)
{
void *buf;
unsigned long flags;
buf = bvec_kmap_irq(bv, &flags);
memset(buf, 0, bv->bv_len);
flush_dcache_page(bv->bv_page);
bvec_kunmap_irq(buf, &flags);
}
static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
{
struct ceph_bio_iter it = *bio_pos;
ceph_bio_iter_advance(&it, off);
ceph_bio_iter_advance_step(&it, bytes, ({
zero_bvec(&bv);
memzero_bvec(&bv);
}));
}
@ -1246,7 +1235,7 @@ static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
ceph_bvec_iter_advance(&it, off);
ceph_bvec_iter_advance_step(&it, bytes, ({
zero_bvec(&bv);
memzero_bvec(&bv);
}));
}
@ -2997,8 +2986,7 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
};
ceph_bvec_iter_advance_step(&it, bytes, ({
if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
bv.bv_len))
if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
return false;
}));
return true;

View File

@ -1373,7 +1373,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no)
if (!disk)
return;
if (disk->flags & GENHD_FL_UP)
if (host->state > HST_DEV_ACTIVATE)
del_gendisk(disk);
blk_cleanup_disk(disk);
}

View File

@ -166,11 +166,8 @@ static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
kfree(page_address(req->special_vec.bv_page) +
req->special_vec.bv_offset);
}
if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
kfree(bvec_virt(&req->special_vec));
blk_mq_end_request(req, virtblk_result(vbr));
}
@ -844,7 +841,7 @@ static int virtblk_probe(struct virtio_device *vdev)
"block size is changed unexpectedly, now is %u\n",
blk_size);
err = -EINVAL;
goto err_cleanup_disk;
goto out_cleanup_disk;
}
/* Use topology information if available */
@ -902,10 +899,13 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
return 0;
err_cleanup_disk:
out_cleanup_disk:
blk_cleanup_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);

View File

@ -15,6 +15,7 @@ if MD
config BLK_DEV_MD
tristate "RAID support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
help
This driver lets you combine several hard disk partitions into one
logical block device. This can be used to simply append one
@ -201,6 +202,7 @@ config BLK_DEV_DM_BUILTIN
config BLK_DEV_DM
tristate "Device mapper support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select BLK_DEV_DM_BUILTIN
depends on DAX || DAX=n
help
@ -340,7 +342,7 @@ config DM_WRITECACHE
config DM_EBS
tristate "Emulated block size target (EXPERIMENTAL)"
depends on BLK_DEV_DM
depends on BLK_DEV_DM && !HIGHMEM
select DM_BUFIO
help
dm-ebs emulates smaller logical block size on backing devices

View File

@ -2,6 +2,7 @@
config BCACHE
tristate "Block device as cache"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select CRC64
help
Allows a block device to be used as cache for other devices; uses

View File

@ -378,7 +378,7 @@ static void do_btree_node_write(struct btree *b)
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bv, b->bio, iter_all) {
memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
memcpy(bvec_virt(bv), addr, PAGE_SIZE);
addr += PAGE_SIZE;
}

View File

@ -885,11 +885,6 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
if (disk_added)
del_gendisk(disk);
blk_cleanup_disk(disk);
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
@ -931,20 +926,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
if (!d->full_dirty_stripes)
return -ENOMEM;
goto out_free_stripe_sectors_dirty;
idx = ida_simple_get(&bcache_device_idx, 0,
BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
if (idx < 0)
return idx;
goto out_free_full_dirty_stripes;
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
goto out_ida_remove;
d->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!d->disk)
goto err;
goto out_bioset_exit;
set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@ -987,8 +982,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
return 0;
err:
out_bioset_exit:
bioset_exit(&d->bio_split);
out_ida_remove:
ida_simple_remove(&bcache_device_idx, idx);
out_free_full_dirty_stripes:
kvfree(d->full_dirty_stripes);
out_free_stripe_sectors_dirty:
kvfree(d->stripe_sectors_dirty);
return -ENOMEM;
}
@ -1365,8 +1366,10 @@ static void cached_dev_free(struct closure *cl)
mutex_lock(&bch_register_lock);
if (atomic_read(&dc->running))
if (atomic_read(&dc->running)) {
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
del_gendisk(dc->disk.disk);
}
bcache_device_free(&dc->disk);
list_del(&dc->list);
@ -1512,6 +1515,7 @@ static void flash_dev_free(struct closure *cl)
mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d),
&d->c->flash_dev_dirty_sectors);
del_gendisk(d->disk);
bcache_device_free(d);
mutex_unlock(&bch_register_lock);
kobject_put(&d->kobj);

View File

@ -15,8 +15,6 @@
#include "closure.h"
#define PAGE_SECTORS (PAGE_SIZE / 512)
struct closure;
#ifdef CONFIG_BCACHE_DEBUG

View File

@ -74,7 +74,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
if (unlikely(!bv->bv_page || !bv_len))
return -EIO;
pa = page_address(bv->bv_page) + bv->bv_offset;
pa = bvec_virt(bv);
/* Handle overlapping page <-> blocks */
while (bv_len) {

View File

@ -1819,7 +1819,7 @@ again:
unsigned this_len;
BUG_ON(PageHighMem(biv.bv_page));
tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
tag = bvec_virt(&biv);
this_len = min(biv.bv_len, data_to_process);
r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
@ -2006,7 +2006,7 @@ retry_kmap:
unsigned tag_now = min(biv.bv_len, tag_todo);
char *tag_addr;
BUG_ON(PageHighMem(biv.bv_page));
tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
tag_addr = bvec_virt(&biv);
if (likely(dio->op == REQ_OP_WRITE))
memcpy(tag_ptr, tag_addr, tag_now);
else

View File

@ -1436,9 +1436,6 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
}
if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {
@ -2187,7 +2184,6 @@ int __init dm_early_create(struct dm_ioctl *dmi,
if (r)
goto err_destroy_table;
md->type = dm_table_get_type(t);
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {

View File

@ -559,7 +559,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
if (err)
goto out_tag_set;
elevator_init_mq(md->queue);
return 0;
out_tag_set:

View File

@ -2076,7 +2076,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
dm_update_keyslot_manager(q, t);
blk_queue_update_readahead(q);
disk_update_readahead(t->md->disk);
return 0;
}

View File

@ -1214,14 +1214,13 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
unsigned long flags;
unsigned size;
int rw = bio_data_dir(bio);
unsigned remaining_size = wc->block_size;
do {
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
buf = bvec_kmap_irq(&bv, &flags);
buf = bvec_kmap_local(&bv);
size = bv.bv_len;
if (unlikely(size > remaining_size))
size = remaining_size;
@ -1239,7 +1238,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
memcpy_flushcache_optimized(data, buf, size);
}
bvec_kunmap_irq(buf, &flags);
kunmap_local(buf);
data = (char *)data + size;
remaining_size -= size;

View File

@ -1693,14 +1693,13 @@ static void cleanup_mapped_device(struct mapped_device *md)
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
del_gendisk(md->disk);
}
if (md->queue)
if (dm_get_md_type(md) != DM_TYPE_NONE) {
dm_sysfs_exit(md);
del_gendisk(md->disk);
}
dm_queue_destroy_keyslot_manager(md->queue);
if (md->disk)
blk_cleanup_disk(md->disk);
}
cleanup_srcu_struct(&md->io_barrier);
@ -1792,7 +1791,6 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
}
add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@ -1993,19 +1991,12 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
r = dm_sysfs_init(md);
if (r) {
free_dev(md);
return r;
}
*result = md;
return 0;
}
@ -2056,9 +2047,9 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
*/
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
enum dm_queue_mode type = dm_table_get_type(t);
struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
int r;
switch (type) {
case DM_TYPE_REQUEST_BASED:
@ -2086,8 +2077,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (r)
return r;
blk_register_queue(md->disk);
add_disk(md->disk);
r = dm_sysfs_init(md);
if (r) {
del_gendisk(md->disk);
return r;
}
md->type = type;
return 0;
}
@ -2193,7 +2190,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
dm_sysfs_exit(md);
dm_table_destroy(__unbind(md));
free_dev(md);
}

View File

@ -764,9 +764,7 @@ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
{
int flags = rdev->bdev->bd_disk->flags;
if (!(flags & GENHD_FL_UP)) {
if (!disk_live(rdev->bdev->bd_disk)) {
if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
pr_warn("md: %s: %s array has a missing/failed member\n",
mdname(rdev->mddev), md_type);

View File

@ -128,8 +128,6 @@ struct mmc_blk_data {
* track of the current selected device partition.
*/
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
/* debugfs files (only in main mmc_blk_data) */
@ -281,6 +279,9 @@ out_put:
return count;
}
static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
power_ro_lock_show, power_ro_lock_store);
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@ -313,6 +314,44 @@ out:
return ret;
}
static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
static struct attribute *mmc_disk_attrs[] = {
&dev_attr_force_ro.attr,
&dev_attr_ro_lock_until_next_power_on.attr,
NULL,
};
static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
umode_t mode = a->mode;
if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
(md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
md->queue.card->ext_csd.boot_ro_lockable) {
mode = S_IRUGO;
if (!(md->queue.card->ext_csd.boot_ro_lock &
EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
mode |= S_IWUSR;
}
mmc_blk_put(md);
return mode;
}
static const struct attribute_group mmc_disk_attr_group = {
.is_visible = mmc_disk_attrs_is_visible,
.attrs = mmc_disk_attrs,
};
static const struct attribute_group *mmc_disk_attr_groups[] = {
&mmc_disk_attr_group,
NULL,
};
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@ -792,6 +831,26 @@ static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
}
#endif
static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
sector_t *sector)
{
struct mmc_blk_data *md;
int ret;
md = mmc_blk_get(disk);
if (!md)
return -EINVAL;
if (md->queue.card)
ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
else
ret = -ENODEV;
mmc_blk_put(md);
return ret;
}
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
@ -801,6 +860,7 @@ static const struct block_device_operations mmc_bdops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_blk_compat_ioctl,
#endif
.alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
};
static int mmc_blk_part_switch_pre(struct mmc_card *card,
@ -2289,7 +2349,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
sector_t size,
bool default_ro,
const char *subname,
int area_type)
int area_type,
unsigned int part_type)
{
struct mmc_blk_data *md;
int devidx, ret;
@ -2336,6 +2397,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
kref_init(&md->kref);
md->queue.blkdata = md;
md->part_type = part_type;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->minors = perdev_minors;
@ -2388,6 +2450,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
return md;
err_kfree:
@ -2417,7 +2483,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
}
return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
MMC_BLK_DATA_AREA_MAIN);
MMC_BLK_DATA_AREA_MAIN, 0);
}
static int mmc_blk_alloc_part(struct mmc_card *card,
@ -2431,10 +2497,9 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
subname, area_type);
subname, area_type, part_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
part_md->part_type = part_type;
list_add(&part_md->part, &md->part);
return 0;
@ -2635,27 +2700,13 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
struct mmc_card *card;
if (md) {
/*
* Flush remaining requests and free queues. It
* is freeing the queue that stops new requests
* from being accepted.
*/
card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
card->ext_csd.boot_ro_lockable)
device_remove_file(disk_to_dev(md->disk),
&md->power_ro_lock);
del_gendisk(md->disk);
}
mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
/*
* Flush remaining requests and free queues. It is freeing the queue
* that stops new requests from being accepted.
*/
del_gendisk(md->disk);
mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
static void mmc_blk_remove_parts(struct mmc_card *card,
@ -2679,51 +2730,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
}
}
static int mmc_add_disk(struct mmc_blk_data *md)
{
int ret;
struct mmc_card *card = md->queue.card;
device_add_disk(md->parent, md->disk, NULL);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
md->force_ro.attr.name = "force_ro";
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
if (ret)
goto force_ro_fail;
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
card->ext_csd.boot_ro_lockable) {
umode_t mode;
if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
mode = S_IRUGO;
else
mode = S_IRUGO | S_IWUSR;
md->power_ro_lock.show = power_ro_lock_show;
md->power_ro_lock.store = power_ro_lock_store;
sysfs_attr_init(&md->power_ro_lock.attr);
md->power_ro_lock.attr.mode = mode;
md->power_ro_lock.attr.name =
"ro_lock_until_next_power_on";
ret = device_create_file(disk_to_dev(md->disk),
&md->power_ro_lock);
if (ret)
goto power_ro_lock_fail;
}
return ret;
power_ro_lock_fail:
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
del_gendisk(md->disk);
return ret;
}
#ifdef CONFIG_DEBUG_FS
static int mmc_dbg_card_status_get(void *data, u64 *val)
@ -2889,7 +2895,7 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
struct mmc_blk_data *md;
int ret = 0;
/*
@ -2917,18 +2923,6 @@ static int mmc_blk_probe(struct mmc_card *card)
if (ret)
goto out;
dev_set_drvdata(&card->dev, md);
ret = mmc_add_disk(md);
if (ret)
goto out;
list_for_each_entry(part_md, &md->part, part) {
ret = mmc_add_disk(part_md);
if (ret)
goto out;
}
/* Add two debugfs entries */
mmc_blk_add_debugfs(card, md);

View File

@ -2149,6 +2149,41 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
{
unsigned int boot_sectors_num;
if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
return -EOPNOTSUPP;
/* filter out unrelated cards */
if (card->ext_csd.rev < 3 ||
!mmc_card_mmc(card) ||
!mmc_card_is_blockaddr(card) ||
mmc_card_is_removable(card->host))
return -ENOENT;
/*
* eMMC storage has two special boot partitions in addition to the
* main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main
* accesses, this means that the partition table addresses are shifted
* by the size of boot partitions. In accordance with the eMMC
* specification, the boot partition size is calculated as follows:
*
* boot partition size = 128K byte x BOOT_SIZE_MULT
*
* Calculate number of sectors occupied by the both boot partitions.
*/
boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
SZ_512 * MMC_NUM_BOOT_PARTITION;
/* Defined by NVIDIA and used by Android devices. */
*gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
return 0;
}
EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =

View File

@ -119,6 +119,8 @@ void mmc_release_host(struct mmc_host *host);
void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *sector);
/**
* mmc_claim_host - exclusively claim a host
* @host: mmc host to claim

View File

@ -418,6 +418,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
card->ext_csd.raw_hc_erase_grp_size =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
card->ext_csd.raw_boot_mult =
ext_csd[EXT_CSD_BOOT_MULT];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

View File

@ -116,6 +116,8 @@
*/
#define NVQUIRK_HAS_TMCLK BIT(10)
#define NVQUIRK_HAS_ANDROID_GPT_SECTOR BIT(11)
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
@ -1361,6 +1363,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
.pdata = &sdhci_tegra20_pdata,
.dma_mask = DMA_BIT_MASK(32),
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
NVQUIRK_HAS_ANDROID_GPT_SECTOR |
NVQUIRK_ENABLE_BLOCK_GAP_DET,
};
@ -1390,6 +1393,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
NVQUIRK_HAS_ANDROID_GPT_SECTOR |
NVQUIRK_HAS_PADCALIB,
};
@ -1422,6 +1426,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
.dma_mask = DMA_BIT_MASK(32),
.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
};
static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
@ -1438,6 +1443,7 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
.pdata = &sdhci_tegra124_pdata,
.dma_mask = DMA_BIT_MASK(34),
.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
};
static const struct sdhci_ops tegra210_sdhci_ops = {
@ -1616,6 +1622,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
tegra_host->pad_control_available = false;
tegra_host->soc_data = soc_data;
if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
if (rc == 0)

View File

@ -968,12 +968,11 @@ void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
struct page *page = req->special_vec.bv_page;
if (page == ctrl->discard_page)
if (req->special_vec.bv_page == ctrl->discard_page)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(page_address(page) + req->special_vec.bv_offset);
kfree(bvec_virt(&req->special_vec));
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@ -1822,7 +1821,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
return !(disk->flags & GENHD_FL_UP);
return !disk_live(disk);
}
static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
@ -1890,7 +1889,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
nvme_update_disk_info(ns->head->disk, ns, id);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
blk_queue_update_readahead(ns->head->disk->queue);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
return 0;
@ -3729,9 +3728,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!ns)
goto out_free_id;
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
disk->private_data = ns;
ns->disk = disk;
ns->queue = disk->queue;
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@ -3740,20 +3744,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
kref_init(&ns->kref);
if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
goto out_free_queue;
goto out_cleanup_disk;
disk = alloc_disk_node(0, node);
if (!disk)
goto out_unlink_ns;
disk->fops = &nvme_bdev_ops;
disk->private_data = ns;
disk->queue = ns->queue;
/*
* Without the multipath code enabled, multiple controller per
* subsystems are visible as devices and thus we cannot use the
@ -3762,15 +3758,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance);
ns->disk = disk;
if (nvme_update_ns_info(ns, id))
goto out_put_disk;
goto out_unlink_ns;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk->disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
goto out_put_disk;
goto out_unlink_ns;
}
}
@ -3789,10 +3784,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
kfree(id);
return;
out_put_disk:
/* prevent double queue cleanup */
ns->disk->queue = NULL;
put_disk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@ -3800,8 +3792,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
out_free_queue:
blk_cleanup_queue(ns->queue);
out_cleanup_disk:
blk_cleanup_disk(disk);
out_free_ns:
kfree(ns);
out_free_id:
@ -3826,14 +3818,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_mpath_clear_current_path(ns);
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
if (ns->disk->flags & GENHD_FL_UP) {
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
}
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);

View File

@ -765,7 +765,7 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
if (!head->disk)
return;
kblockd_schedule_work(&head->requeue_work);
if (head->disk->flags & GENHD_FL_UP) {
if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}

View File

@ -552,7 +552,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;

View File

@ -3276,7 +3276,7 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
end_blk = (curr_trk + 1) * recs_per_trk;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
if (first_blk + blk_count >= end_blk) {
cqr->proc_bytes = blk_count * blksize;
@ -4008,7 +4008,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
@ -4175,7 +4175,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
@ -4518,7 +4518,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
@ -4551,7 +4551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
}
} else {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
last_tidaw = itcw_add_tidaw(itcw, 0x00,
dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
@ -4787,7 +4787,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
@ -4848,7 +4848,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)

View File

@ -501,7 +501,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
@ -583,7 +583,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)

View File

@ -24,6 +24,8 @@
#include "dasd_int.h"
static struct lock_class_key dasd_bio_compl_lkclass;
/*
* Allocate and register gendisk structure for device.
*/
@ -38,13 +40,15 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
gdp = alloc_disk(1 << DASD_PARTN_BITS);
gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
&dasd_bio_compl_lkclass);
if (!gdp)
return -ENOMEM;
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->minors = 1 << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
/*
@ -73,7 +77,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
set_disk_ro(gdp, 1);
dasd_add_link_to_gendisk(gdp, base);
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
device_add_disk(&base->cdev->dev, block->gdp, NULL);

View File

@ -892,8 +892,7 @@ dcssblk_submit_bio(struct bio *bio)
index = (bio->bi_iter.bi_sector >> 3);
bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
page_address(bvec.bv_page) + bvec.bv_offset;
page_addr = (unsigned long)bvec_virt(&bvec);
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
// More paranoia.

View File

@ -129,6 +129,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
static struct lock_class_key sd_bio_compl_lkclass;
static const char *sd_cache_types[] = {
"write through", "none", "write back",
@ -886,7 +887,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
cmd->cmnd[0] = UNMAP;
cmd->cmnd[8] = 24;
buf = page_address(rq->special_vec.bv_page);
buf = bvec_virt(&rq->special_vec);
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
put_unaligned_be64(lba, &buf[8]);
@ -3408,7 +3409,8 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
gd = alloc_disk(SD_MINORS);
gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
&sd_bio_compl_lkclass);
if (!gd)
goto out_free;
@ -3454,10 +3456,10 @@ static int sd_probe(struct device *dev)
gd->major = sd_major((index & 0xf0) >> 4);
gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
gd->minors = SD_MINORS;
gd->fops = &sd_fops;
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
/* defaults, until the device tells us otherwise */
sdp->sector_size = 512;

View File

@ -166,7 +166,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
char name[DISK_NAME_LEN];
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
@ -202,8 +202,7 @@ static void sg_device_destroy(struct kref *kref);
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a)
/*
* The SCSI interfaces that use read() and write() as an asynchronous variant of
@ -832,7 +831,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->disk, srp->rq, at_head, sg_rq_end_io);
blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
return 0;
}
@ -1119,8 +1118,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
return blk_trace_setup(sdp->device->request_queue, sdp->name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL, p);
case BLKTRACESTART:
@ -1456,7 +1454,7 @@ static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sg_alloc(struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
@ -1492,9 +1490,7 @@ sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sprintf(sdp->name, "sg%d", k);
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
@ -1521,19 +1517,11 @@ static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
@ -1543,7 +1531,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
sdp = sg_alloc(disk, scsidp);
sdp = sg_alloc(scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
@ -1561,7 +1549,7 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", disk->disk_name);
sdp, "%s", sdp->name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
@ -1589,7 +1577,6 @@ cdev_add_err:
kfree(sdp);
out:
put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
@ -1613,7 +1600,6 @@ sg_device_destroy(struct kref *kref)
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
@ -2606,7 +2592,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
seq_printf(s, " >>> device=%s ", sdp->name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {

View File

@ -106,6 +106,8 @@ static struct scsi_driver sr_template = {
static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
static DEFINE_SPINLOCK(sr_index_lock);
static struct lock_class_key sr_bio_compl_lkclass;
/* This semaphore is used to mediate the 0->1 reference get in the
* face of object destruction (i.e. we can't allow a get on an
* object after last put) */
@ -712,7 +714,8 @@ static int sr_probe(struct device *dev)
kref_init(&cd->kref);
disk = alloc_disk(1);
disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
&sr_bio_compl_lkclass);
if (!disk)
goto fail_free;
mutex_init(&cd->lock);
@ -729,6 +732,7 @@ static int sr_probe(struct device *dev)
disk->major = SCSI_CDROM_MAJOR;
disk->first_minor = minor;
disk->minors = 1;
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
@ -762,7 +766,6 @@ static int sr_probe(struct device *dev)
set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver;
disk->queue = sdev->request_queue;
if (register_cdrom(disk, &cd->cdi))
goto fail_minor;

View File

@ -309,13 +309,8 @@ static char * st_incompatible(struct scsi_device* SDp)
}
static inline char *tape_name(struct scsi_tape *tape)
{
return tape->disk->disk_name;
}
#define st_printk(prefix, t, fmt, a...) \
sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a)
#ifdef DEBUG
#define DEBC_printk(t, fmt, a...) \
if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
@ -363,7 +358,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
int result = SRpnt->result;
u8 scode;
DEB(const char *stp;)
char *name = tape_name(STp);
char *name = STp->name;
struct st_cmdstatus *cmdstatp;
if (!result)
@ -3841,8 +3836,9 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
!capable(CAP_SYS_RAWIO))
i = -EPERM;
else
i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
file->f_mode, cmd_in, p);
i = scsi_cmd_ioctl(STp->device->request_queue,
NULL, file->f_mode, cmd_in,
p);
if (i != -ENOTTY)
return i;
break;
@ -4216,7 +4212,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
i = mode << (4 - ST_NBR_MODE_BITS);
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
tape->disk->disk_name, st_formats[i]);
tape->name, st_formats[i]);
dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
cdev_devno, &tape->modes[mode], "%s", name);
@ -4271,7 +4267,6 @@ static void remove_cdevs(struct scsi_tape *tape)
static int st_probe(struct device *dev)
{
struct scsi_device *SDp = to_scsi_device(dev);
struct gendisk *disk = NULL;
struct scsi_tape *tpnt = NULL;
struct st_modedef *STm;
struct st_partstat *STps;
@ -4301,27 +4296,13 @@ static int st_probe(struct device *dev)
goto out;
}
disk = alloc_disk(1);
if (!disk) {
sdev_printk(KERN_ERR, SDp,
"st: out of memory. Device not attached.\n");
goto out_buffer_free;
}
tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
if (tpnt == NULL) {
sdev_printk(KERN_ERR, SDp,
"st: Can't allocate device descriptor.\n");
goto out_put_disk;
goto out_buffer_free;
}
kref_init(&tpnt->kref);
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
/* SCSI tape doesn't register this gendisk via add_disk(). Manually
* take queue reference that release_disk() expects. */
if (!blk_get_queue(SDp->request_queue))
goto out_put_disk;
disk->queue = SDp->request_queue;
tpnt->driver = &st_template;
tpnt->device = SDp;
@ -4394,10 +4375,10 @@ static int st_probe(struct device *dev)
idr_preload_end();
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
goto out_put_queue;
goto out_free_tape;
}
tpnt->index = error;
sprintf(disk->disk_name, "st%d", tpnt->index);
sprintf(tpnt->name, "st%d", tpnt->index);
tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL);
if (tpnt->stats == NULL) {
sdev_printk(KERN_ERR, SDp,
@ -4414,9 +4395,9 @@ static int st_probe(struct device *dev)
scsi_autopm_put_device(SDp);
sdev_printk(KERN_NOTICE, SDp,
"Attached scsi tape %s\n", tape_name(tpnt));
"Attached scsi tape %s\n", tpnt->name);
sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
tpnt->name, tpnt->try_dio ? "yes" : "no",
queue_dma_alignment(SDp->request_queue) + 1);
return 0;
@ -4428,10 +4409,7 @@ out_idr_remove:
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
out_put_queue:
blk_put_queue(disk->queue);
out_put_disk:
put_disk(disk);
out_free_tape:
kfree(tpnt);
out_buffer_free:
kfree(buffer);
@ -4470,7 +4448,6 @@ static int st_remove(struct device *dev)
static void scsi_tape_release(struct kref *kref)
{
struct scsi_tape *tpnt = to_scsi_tape(kref);
struct gendisk *disk = tpnt->disk;
tpnt->device = NULL;
@ -4480,8 +4457,6 @@ static void scsi_tape_release(struct kref *kref)
kfree(tpnt->buffer);
}
disk->private_data = NULL;
put_disk(disk);
kfree(tpnt->stats);
kfree(tpnt);
return;

View File

@ -187,7 +187,7 @@ struct scsi_tape {
unsigned char last_cmnd[6];
unsigned char last_sense[16];
#endif
struct gendisk *disk;
char name[DISK_NAME_LEN];
struct kref kref;
struct scsi_tape_stats *stats;
};

View File

@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include "internal.h"
#include "../block/blk.h"
struct bdev_inode {
struct block_device bdev;
@ -686,7 +687,8 @@ static loff_t block_llseek(struct file *file, loff_t offset, int whence)
return retval;
}
int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct inode *bd_inode = bdev_file_inode(filp);
struct block_device *bdev = I_BDEV(bd_inode);
@ -707,7 +709,6 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return error;
}
EXPORT_SYMBOL(blkdev_fsync);
/**
* bdev_read_page() - Start reading a page from a block device
@ -801,7 +802,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
memset(&ei->bdev, 0, sizeof(ei->bdev));
ei->bdev.bd_bdi = &noop_backing_dev_info;
return &ei->vfs_inode;
}
@ -812,8 +812,15 @@ static void bdev_free_inode(struct inode *inode)
free_percpu(bdev->bd_stats);
kfree(bdev->bd_meta_info);
if (!bdev_is_partition(bdev))
if (!bdev_is_partition(bdev)) {
if (bdev->bd_disk && bdev->bd_disk->bdi)
bdi_put(bdev->bd_disk->bdi);
kfree(bdev->bd_disk);
}
if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
blk_free_ext_minor(MINOR(bdev->bd_dev));
kmem_cache_free(bdev_cachep, BDEV_I(inode));
}
@ -826,16 +833,9 @@ static void init_once(void *data)
static void bdev_evict_inode(struct inode *inode)
{
struct block_device *bdev = &BDEV_I(inode)->bdev;
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode); /* is it needed here? */
clear_inode(inode);
/* Detach inode from wb early as bdi_put() may free bdi->wb */
inode_detach_wb(inode);
if (bdev->bd_bdi != &noop_backing_dev_info) {
bdi_put(bdev->bd_bdi);
bdev->bd_bdi = &noop_backing_dev_info;
}
}
static const struct super_operations bdev_sops = {
@ -902,9 +902,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
bdev->bd_disk = disk;
bdev->bd_partno = partno;
bdev->bd_inode = inode;
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
bdev->bd_stats = alloc_percpu(struct disk_stats);
if (!bdev->bd_stats) {
iput(inode);
@ -921,31 +918,6 @@ void bdev_add(struct block_device *bdev, dev_t dev)
insert_inode_hash(bdev->bd_inode);
}
static struct block_device *bdget(dev_t dev)
{
struct inode *inode;
inode = ilookup(blockdev_superblock, dev);
if (!inode)
return NULL;
return &BDEV_I(inode)->bdev;
}
/**
* bdgrab -- Grab a reference to an already referenced block device
* @bdev: Block device to grab a reference to.
*
* Returns the block_device with an additional reference when successful,
* or NULL if the inode is already beeing freed.
*/
struct block_device *bdgrab(struct block_device *bdev)
{
if (!igrab(bdev->bd_inode))
return NULL;
return bdev;
}
EXPORT_SYMBOL(bdgrab);
long nr_blockdev_pages(void)
{
struct inode *inode;
@ -959,12 +931,6 @@ long nr_blockdev_pages(void)
return ret;
}
void bdput(struct block_device *bdev)
{
iput(bdev->bd_inode);
}
EXPORT_SYMBOL(bdput);
/**
* bd_may_claim - test whether a block device can be claimed
* @bdev: block device of interest
@ -1094,148 +1060,6 @@ void bd_abort_claiming(struct block_device *bdev, void *holder)
}
EXPORT_SYMBOL(bd_abort_claiming);
#ifdef CONFIG_SYSFS
struct bd_holder_disk {
struct list_head list;
struct gendisk *disk;
int refcnt;
};
static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
struct gendisk *disk)
{
struct bd_holder_disk *holder;
list_for_each_entry(holder, &bdev->bd_holder_disks, list)
if (holder->disk == disk)
return holder;
return NULL;
}
static int add_symlink(struct kobject *from, struct kobject *to)
{
return sysfs_create_link(from, to, kobject_name(to));
}
static void del_symlink(struct kobject *from, struct kobject *to)
{
sysfs_remove_link(from, kobject_name(to));
}
/**
* bd_link_disk_holder - create symlinks between holding disk and slave bdev
* @bdev: the claimed slave bdev
* @disk: the holding disk
*
* DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
*
* This functions creates the following sysfs symlinks.
*
* - from "slaves" directory of the holder @disk to the claimed @bdev
* - from "holders" directory of the @bdev to the holder @disk
*
* For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
* passed to bd_link_disk_holder(), then:
*
* /sys/block/dm-0/slaves/sda --> /sys/block/sda
* /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
*
* The caller must have claimed @bdev before calling this function and
* ensure that both @bdev and @disk are valid during the creation and
* lifetime of these symlinks.
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
struct bd_holder_disk *holder;
int ret = 0;
mutex_lock(&bdev->bd_disk->open_mutex);
WARN_ON_ONCE(!bdev->bd_holder);
/* FIXME: remove the following once add_disk() handles errors */
if (WARN_ON(!disk->slave_dir || !bdev->bd_holder_dir))
goto out_unlock;
holder = bd_find_holder_disk(bdev, disk);
if (holder) {
holder->refcnt++;
goto out_unlock;
}
holder = kzalloc(sizeof(*holder), GFP_KERNEL);
if (!holder) {
ret = -ENOMEM;
goto out_unlock;
}
INIT_LIST_HEAD(&holder->list);
holder->disk = disk;
holder->refcnt = 1;
ret = add_symlink(disk->slave_dir, bdev_kobj(bdev));
if (ret)
goto out_free;
ret = add_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
if (ret)
goto out_del;
/*
* bdev could be deleted beneath us which would implicitly destroy
* the holder directory. Hold on to it.
*/
kobject_get(bdev->bd_holder_dir);
list_add(&holder->list, &bdev->bd_holder_disks);
goto out_unlock;
out_del:
del_symlink(disk->slave_dir, bdev_kobj(bdev));
out_free:
kfree(holder);
out_unlock:
mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
/**
* bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
* @bdev: the calimed slave bdev
* @disk: the holding disk
*
* DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
*
* CONTEXT:
* Might sleep.
*/
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
{
struct bd_holder_disk *holder;
mutex_lock(&bdev->bd_disk->open_mutex);
holder = bd_find_holder_disk(bdev, disk);
if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
del_symlink(disk->slave_dir, bdev_kobj(bdev));
del_symlink(bdev->bd_holder_dir, &disk_to_dev(disk)->kobj);
kobject_put(bdev->bd_holder_dir);
list_del_init(&holder->list);
kfree(holder);
}
mutex_unlock(&bdev->bd_disk->open_mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
#endif
static void blkdev_flush_mapping(struct block_device *bdev)
{
WARN_ON_ONCE(bdev->bd_holders);
@ -1260,11 +1084,8 @@ static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
}
}
if (!bdev->bd_openers) {
if (!bdev->bd_openers)
set_init_blocksize(bdev);
if (bdev->bd_bdi == &noop_backing_dev_info)
bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
}
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, false);
bdev->bd_openers++;
@ -1282,16 +1103,14 @@ static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
static int blkdev_get_part(struct block_device *part, fmode_t mode)
{
struct gendisk *disk = part->bd_disk;
struct block_device *whole;
int ret;
if (part->bd_openers)
goto done;
whole = bdgrab(disk->part0);
ret = blkdev_get_whole(whole, mode);
ret = blkdev_get_whole(bdev_whole(part), mode);
if (ret)
goto out_put_whole;
return ret;
ret = -ENXIO;
if (!bdev_nr_sectors(part))
@ -1299,16 +1118,12 @@ static int blkdev_get_part(struct block_device *part, fmode_t mode)
disk->open_partitions++;
set_init_blocksize(part);
if (part->bd_bdi == &noop_backing_dev_info)
part->bd_bdi = bdi_get(disk->queue->backing_dev_info);
done:
part->bd_openers++;
return 0;
out_blkdev_put:
blkdev_put_whole(whole, mode);
out_put_whole:
bdput(whole);
blkdev_put_whole(bdev_whole(part), mode);
return ret;
}
@ -1321,42 +1136,42 @@ static void blkdev_put_part(struct block_device *part, fmode_t mode)
blkdev_flush_mapping(part);
whole->bd_disk->open_partitions--;
blkdev_put_whole(whole, mode);
bdput(whole);
}
struct block_device *blkdev_get_no_open(dev_t dev)
{
struct block_device *bdev;
struct gendisk *disk;
struct inode *inode;
bdev = bdget(dev);
if (!bdev) {
inode = ilookup(blockdev_superblock, dev);
if (!inode) {
blk_request_module(dev);
bdev = bdget(dev);
if (!bdev)
inode = ilookup(blockdev_superblock, dev);
if (!inode)
return NULL;
}
disk = bdev->bd_disk;
if (!kobject_get_unless_zero(&disk_to_dev(disk)->kobj))
goto bdput;
if ((disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
goto put_disk;
if (!try_module_get(bdev->bd_disk->fops->owner))
goto put_disk;
/* switch from the inode reference to a device mode one: */
bdev = &BDEV_I(inode)->bdev;
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
bdev = NULL;
iput(inode);
if (!bdev)
return NULL;
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
!try_module_get(bdev->bd_disk->fops->owner)) {
put_device(&bdev->bd_device);
return NULL;
}
return bdev;
put_disk:
put_disk(disk);
bdput:
bdput(bdev);
return NULL;
}
void blkdev_put_no_open(struct block_device *bdev)
{
module_put(bdev->bd_disk->fops->owner);
put_disk(bdev->bd_disk);
bdput(bdev);
put_device(&bdev->bd_device);
}
/**
@ -1409,7 +1224,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
mutex_lock(&disk->open_mutex);
ret = -ENXIO;
if (!(disk->flags & GENHD_FL_UP))
if (!disk_live(disk))
goto abort_claiming;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);

View File

@ -378,7 +378,7 @@ out:
ret = kstrtol(name, 10, &data);
if (ret)
return ret;
if (data >= IOPRIO_BE_NR || data < 0)
if (data >= IOPRIO_NR_LEVELS || data < 0)
return -EINVAL;
cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);

View File

@ -5,6 +5,7 @@
#include <linux/blkdev.h>
#include <linux/sched/signal.h>
#include <linux/backing-dev-defs.h>
#include "fat.h"
struct fatent_operations {

View File

@ -1053,7 +1053,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX;
sb->s_bdi = bdi_get(sb->s_bdev->bd_bdi);
sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi);
err = load_nilfs(nilfs, sb);
if (err)

View File

@ -49,8 +49,7 @@ static int copy_bio_to_actor(struct bio *bio,
bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
memcpy(actor_addr + actor_offset,
page_address(bvec->bv_page) + bvec->bv_offset + offset,
memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
bytes_to_copy);
actor_offset += bytes_to_copy;
@ -177,7 +176,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
goto out_free_bio;
}
/* Extract the length of the metadata block */
data = page_address(bvec->bv_page) + bvec->bv_offset;
data = bvec_virt(bvec);
length = data[offset];
if (offset < bvec->bv_len - 1) {
length |= data[offset + 1] << 8;
@ -186,7 +185,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
res = -EIO;
goto out_free_bio;
}
data = page_address(bvec->bv_page) + bvec->bv_offset;
data = bvec_virt(bvec);
length |= data[0] << 8;
}
bio_free_pages(bio);

Some files were not shown because too many files have changed in this diff Show More