for-5.16/drivers-2021-10-29

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmF8KFsQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgph1ZEACwNuHkAZcIgNzKhzuLP9OjMhv9vV+q254G
 /EcM31e+qgRioMd0ihbVsgW76jOwLEmb3ldKGcN+0Wo5+Sv9Im8+wAWYY1REOZO5
 ZTUBfAzhEh63/EtqTFiU8U+7dmXqy4z7NaICnhlynjwkd3IT+I561os6kcqwJMMr
 G+Q1Cnk9rgCMIoLOCoVThIpjmjyZzF33qJb2VEIkHfkot62iNdpABWaSASF+CCba
 z8LfbvLAYz3YLl4thXlLJFU282T5y7gzgSomGvX4F0rMJSbqFbgoNEPxaYw9CvzC
 uC6MnYCYdCdvVkWVm1b8I8LYzPd5GrpVOSh3JQGvuA4Ppv2IyJCDSruYGgVUlhao
 cVPzuHCqNCfKk0ykYVRZy9oKiBk5wmFeKM/lSHu408y8VNraPNIAEpB6sA9qGr22
 AYr8lNh3JDr0g8dtFsDOq+7u3MANW0KQozfzwTPZo6NjzEE1D2jIg39Ljiijo9+Y
 3pU8pitIAhsKd2KhW1H6LmtJbF4dX756VKYDXOhzgORU0NZYgvGhBIj9tAdpQR0S
 xeae5Kj0/wBGcqR/owf/n1EY/q7rWgNDETnsBhbmzMZyhwH3L6zhT+bfD8YoQCHY
 ueyqhyIUe4YBxTrIpICqwDlqaMYAmQ0jRaci+bK9ovVlQ89FQ9o/BE2COPlI/DGX
 w+rUmmoX4g==
 =HiWU
 -----END PGP SIGNATURE-----

Merge tag 'for-5.16/drivers-2021-10-29' of git://git.kernel.dk/linux-block

Pull block driver updates from Jens Axboe:

 - paride driver cleanups (Christoph)

 - Remove cryptoloop support (Christoph)

 - null_blk poll support (me)

 - Now that add_disk() supports proper error handling, add it to various
   drivers (Luis)

 - Make ataflop actually work again (Michael)

 - s390 dasd fixes (Stefan, Heiko)

 - nbd fixes (Yu, Ye)

 - Remove redundant wq flush in mtip32xx (Christophe)

 - NVMe updates
      - fix a multipath partition scanning deadlock (Hannes Reinecke)
      - generate uevent once a multipath namespace is operational again
        (Hannes Reinecke)
      - support unique discovery controller NQNs (Hannes Reinecke)
      - fix use-after-free when a port is removed (Israel Rukshin)
      - clear shadow doorbell memory on resets (Keith Busch)
      - use struct_size (Len Baker)
      - add error handling support for add_disk (Luis Chamberlain)
      - limit the maximal queue size for RDMA controllers (Max Gurtovoy)
      - use a few more symbolic names (Max Gurtovoy)
      - fix error code in nvme_rdma_setup_ctrl (Max Gurtovoy)
      - add support for ->map_queues on FC (Saurav Kashyap)
      - support the current discovery subsystem entry (Hannes Reinecke)
      - use flex_array_size and struct_size (Len Baker)

 - bcache fixes (Christoph, Coly, Chao, Lin, Qing)

 - MD updates (Christoph, Guoqing, Xiao)

 - Misc fixes (Dan, Ding, Jiapeng, Shin'ichiro, Ye)

* tag 'for-5.16/drivers-2021-10-29' of git://git.kernel.dk/linux-block: (117 commits)
  null_blk: Fix handling of submit_queues and poll_queues attributes
  block: ataflop: Fix warning comparing pointer to 0
  bcache: replace snprintf in show functions with sysfs_emit
  bcache: move uapi header bcache.h to bcache code directory
  nvmet: use flex_array_size and struct_size
  nvmet: register discovery subsystem as 'current'
  nvmet: switch check for subsystem type
  nvme: add new discovery log page entry definitions
  block: ataflop: more blk-mq refactoring fixes
  block: remove support for cryptoloop and the xor transfer
  mtd: add add_disk() error handling
  rnbd: add error handling support for add_disk()
  um/drivers/ubd_kern: add error handling support for add_disk()
  m68k/emu/nfblock: add error handling support for add_disk()
  xen-blkfront: add error handling support for add_disk()
  bcache: add error handling support for add_disk()
  dm: add add_disk() error handling
  block: aoe: fixup coccinelle warnings
  nvmet: use struct_size over open coded arithmetic
  nvme: drop scan_lock and always kick requeue list when removing namespaces
  ...
This commit is contained in:
Linus Torvalds 2021-11-01 09:27:38 -07:00
commit 643a7234e0
80 changed files with 1529 additions and 1532 deletions

View File

@ -99,6 +99,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
{
struct nfhd_device *dev;
int dev_id = id - NFHD_DEV_OFFSET;
int err = -ENOMEM;
pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id,
blocks, bsize);
@ -129,16 +130,20 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
sprintf(dev->disk->disk_name, "nfhd%u", dev_id);
set_capacity(dev->disk, (sector_t)blocks * (bsize / 512));
blk_queue_logical_block_size(dev->disk->queue, bsize);
add_disk(dev->disk);
err = add_disk(dev->disk);
if (err)
goto out_cleanup_disk;
list_add_tail(&dev->list, &nfhd_list);
return 0;
out_cleanup_disk:
blk_cleanup_disk(dev->disk);
free_dev:
kfree(dev);
out:
return -ENOMEM;
return err;
}
static int __init nfhd_init(void)

View File

@ -855,8 +855,8 @@ static const struct attribute_group *ubd_attr_groups[] = {
NULL,
};
static void ubd_disk_register(int major, u64 size, int unit,
struct gendisk *disk)
static int ubd_disk_register(int major, u64 size, int unit,
struct gendisk *disk)
{
disk->major = major;
disk->first_minor = unit << UBD_SHIFT;
@ -873,7 +873,7 @@ static void ubd_disk_register(int major, u64 size, int unit,
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
device_add_disk(&ubd_devs[unit].pdev.dev, disk, ubd_attr_groups);
return device_add_disk(&ubd_devs[unit].pdev.dev, disk, ubd_attr_groups);
}
#define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE))
@ -920,10 +920,15 @@ static int ubd_add(int n, char **error_out)
blk_queue_write_cache(ubd_dev->queue, true, false);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk);
if (err)
goto out_cleanup_disk;
ubd_gendisk[n] = disk;
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&ubd_dev->tag_set);
out:

View File

@ -258,6 +258,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
char tmp[2] = { '0' + which, 0 };
int err = -ENOMEM;
dev->fd = -1;
dev->filename = NULL;
@ -266,7 +267,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
dev->gd = blk_alloc_disk(NUMA_NO_NODE);
if (!dev->gd)
return -ENOMEM;
goto out;
dev->gd->major = simdisk_major;
dev->gd->first_minor = which;
dev->gd->minors = SIMDISK_MINORS;
@ -274,10 +275,18 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
set_capacity(dev->gd, 0);
add_disk(dev->gd);
err = add_disk(dev->gd);
if (err)
goto out_cleanup_disk;
dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
return 0;
out_cleanup_disk:
blk_cleanup_disk(dev->gd);
out:
return err;
}
static int __init simdisk_init(void)

View File

@ -180,14 +180,6 @@ config BLK_DEV_LOOP
bits of, say, a sound file). This is also safe if the file resides
on a remote file server.
There are several ways of encrypting disks. Some of these require
kernel patches. The vanilla kernel offers the cryptoloop option
and a Device Mapper target (which is superior, as it supports all
file systems). If you want to use the cryptoloop, say Y to both
LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
or later) version of util-linux. Additionally, be aware that
the cryptoloop is not safe for storing journaled filesystems.
Note that this loop device has nothing to do with the loopback
device used for network connections from the machine to itself.
@ -211,21 +203,6 @@ config BLK_DEV_LOOP_MIN_COUNT
is used, it can be set to 0, since needed loop devices can be
dynamically allocated with the /dev/loop-control interface.
config BLK_DEV_CRYPTOLOOP
tristate "Cryptoloop Support (DEPRECATED)"
select CRYPTO
select CRYPTO_CBC
depends on BLK_DEV_LOOP
help
Say Y here if you want to be able to use the ciphers that are
provided by the CryptoAPI as loop transformation. This might be
used as hard disk encryption.
WARNING: This device is not safe for journaled file systems like
ext3 or Reiserfs. Please use the Device Mapper crypto module
instead, which can be configured to be on-disk compatible with the
cryptoloop device. cryptoloop support will be removed in Linux 5.16.
source "drivers/block/drbd/Kconfig"
config BLK_DEV_NBD

View File

@ -24,7 +24,6 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o

View File

@ -1780,6 +1780,7 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
@ -1798,8 +1799,10 @@ static int fd_alloc_disk(int drive, int system)
set_capacity(disk, 880 * 2);
unit[drive].gendisk[system] = disk;
add_disk(disk);
return 0;
err = add_disk(disk);
if (err)
blk_cleanup_disk(disk);
return err;
}
static int fd_alloc_drive(int drive)

View File

@ -37,8 +37,7 @@ static ssize_t aoedisk_show_state(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return snprintf(page, PAGE_SIZE,
"%s%s\n",
return sysfs_emit(page, "%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
@ -52,8 +51,8 @@ static ssize_t aoedisk_show_mac(struct device *dev,
struct aoetgt *t = d->targets[0];
if (t == NULL)
return snprintf(page, PAGE_SIZE, "none\n");
return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
return sysfs_emit(page, "none\n");
return sysfs_emit(page, "%pm\n", t->addr);
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
@ -85,7 +84,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
ne = nd;
nd = nds;
if (*nd == NULL)
return snprintf(page, PAGE_SIZE, "none\n");
return sysfs_emit(page, "none\n");
for (p = page; nd < ne; nd++)
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
@ -99,7 +98,7 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
}
static ssize_t aoedisk_show_payload(struct device *dev,
struct device_attribute *attr, char *page)
@ -107,7 +106,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
@ -417,7 +416,9 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
device_add_disk(NULL, gd, aoe_attr_groups);
err = device_add_disk(NULL, gd, aoe_attr_groups);
if (err)
goto out_disk_cleanup;
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
@ -426,6 +427,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
return;
out_disk_cleanup:
blk_cleanup_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:

View File

@ -299,6 +299,7 @@ static struct atari_floppy_struct {
disk change detection) */
int flags; /* flags */
struct gendisk *disk[NUM_DISK_MINORS];
bool registered[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
@ -457,10 +458,20 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
DPRINT(("fd_end_request_cur(), bytes %d of %d\n",
blk_rq_cur_bytes(fd_request),
blk_rq_bytes(fd_request)));
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
DPRINT(("calling __blk_mq_end_request()\n"));
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
} else {
/* requeue rest of request */
DPRINT(("calling blk_mq_requeue_request()\n"));
blk_mq_requeue_request(fd_request, true);
fd_request = NULL;
}
}
@ -654,9 +665,6 @@ static inline void copy_buffer(void *from, void *to)
*p2++ = *p1++;
}
/* General Interrupt Handling */
static void (*FloppyIRQHandler)( int status ) = NULL;
@ -701,12 +709,21 @@ static void fd_error( void )
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
finish_fdc();
return;
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
}
/* need to re-run request to recalibrate */
atari_disable_irq( IRQ_MFP_FDC );
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
atari_enable_irq( IRQ_MFP_FDC );
}
@ -733,8 +750,10 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
type--;
if (type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType)
minor2disktype[type].drive_types > DriveType) {
finish_fdc();
return -EINVAL;
}
}
q = unit[drive].disk[type]->queue;
@ -752,6 +771,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
finish_fdc();
ret = -EINVAL;
goto out;
}
@ -792,6 +812,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
finish_fdc();
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
@ -826,6 +847,7 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
finish_fdc();
return;
}
}
@ -1230,6 +1252,7 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
finish_fdc();
}
return;
@ -1351,7 +1374,7 @@ static void fd_times_out(struct timer_list *unused)
static void finish_fdc( void )
{
if (!NeedSeek) {
if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) {
finish_fdc_done( 0 );
}
else {
@ -1386,7 +1409,8 @@ static void finish_fdc_done( int dummy )
start_motor_off_timer();
local_irq_save(flags);
stdma_release();
if (stdma_is_locked_by(floppy_irq))
stdma_release();
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@ -1436,8 +1460,7 @@ static int floppy_revalidate(struct gendisk *disk)
unsigned int drive = p - unit;
if (test_bit(drive, &changed_floppies) ||
test_bit(drive, &fake_change) ||
p->disktype == 0) {
test_bit(drive, &fake_change) || !p->disktype) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
@ -1476,15 +1499,6 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
spin_lock_irq(&ataflop_lock);
atari_disable_irq(IRQ_MFP_FDC);
finish_fdc();
atari_enable_irq(IRQ_MFP_FDC);
spin_unlock_irq(&ataflop_lock);
}
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@ -1492,6 +1506,10 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
int drive = floppy - unit;
int type = floppy->type;
DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n",
drive, type, blk_rq_cur_sectors(bd->rq),
blk_rq_sectors(bd->rq), bd->last));
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
@ -1512,6 +1530,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
@ -1528,11 +1547,13 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
stdma_release();
goto out;
}
type = minor2disktype[type].index;
@ -1551,8 +1572,6 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
setup_req_params( drive );
do_fd_action( drive );
if (bd->last)
finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
out:
@ -1635,6 +1654,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
finish_fdc();
return -EINVAL;
}
@ -1702,8 +1722,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
setprm.head != 2)
setprm.head != 2) {
finish_fdc();
return -EINVAL;
}
UDT = dtp;
set_capacity(disk, UDT->blocks);
@ -1963,7 +1985,6 @@ static const struct block_device_operations floppy_fops = {
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
.commit_rqs = ataflop_commit_rqs,
};
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
@ -2001,12 +2022,28 @@ static void ataflop_probe(dev_t dev)
return;
mutex_lock(&ataflop_probe_lock);
if (!unit[drive].disk[type]) {
if (ataflop_alloc_disk(drive, type) == 0)
if (ataflop_alloc_disk(drive, type) == 0) {
add_disk(unit[drive].disk[type]);
unit[drive].registered[type] = true;
}
}
mutex_unlock(&ataflop_probe_lock);
}
static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
{
int type;
for (type = 0; type < NUM_DISK_MINORS; type++) {
if (!fs->disk[type])
continue;
if (fs->registered[type])
del_gendisk(fs->disk[type]);
blk_cleanup_disk(fs->disk[type]);
}
blk_mq_free_tag_set(&fs->tag_set);
}
static int __init atari_floppy_init (void)
{
int i;
@ -2065,7 +2102,10 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
add_disk(unit[i].disk[0]);
ret = add_disk(unit[i].disk[0]);
if (ret)
goto err_out_dma;
unit[i].registered[0] = true;
}
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
@ -2075,12 +2115,11 @@ static int __init atari_floppy_init (void)
return 0;
err_out_dma:
atari_stram_free(DMABuffer);
err:
while (--i >= 0) {
blk_cleanup_queue(unit[i].disk[0]->queue);
put_disk(unit[i].disk[0]);
blk_mq_free_tag_set(&unit[i].tag_set);
}
while (--i >= 0)
atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_unlock:
@ -2129,18 +2168,10 @@ __setup("floppy=", atari_floppy_setup);
static void __exit atari_floppy_exit(void)
{
int i, type;
int i;
for (i = 0; i < FD_MAX_UNITS; i++) {
for (type = 0; type < NUM_DISK_MINORS; type++) {
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
blk_cleanup_queue(unit[i].disk[type]->queue);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);
}
for (i = 0; i < FD_MAX_UNITS; i++)
atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
del_timer_sync(&fd_timer);

View File

@ -1,206 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Linux loop encryption enabling module
Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
*/
#include <linux/module.h>
#include <crypto/skcipher.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <linux/uaccess.h>
#include "loop.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
#define LOOP_IV_SECTOR_BITS 9
#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
static int
cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
{
int err = -EINVAL;
int cipher_len;
int mode_len;
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *mode;
char *cmsp = cms; /* c-m string pointer */
struct crypto_sync_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
goto out;
strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
cms[LO_NAME_SIZE - 1] = 0;
cipher_len = strcspn(cmsp, "-");
mode = cmsp + cipher_len;
mode_len = 0;
if (*mode) {
mode++;
mode_len = strcspn(mode, "-");
}
if (!mode_len) {
mode = "cbc";
mode_len = 3;
}
if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
return -EINVAL;
memmove(cms, mode, mode_len);
cmsp = cms + mode_len;
*cmsp++ = '(';
memcpy(cmsp, info->lo_crypt_name, cipher_len);
cmsp += cipher_len;
*cmsp++ = ')';
*cmsp = 0;
tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
info->lo_encrypt_key_size);
if (err != 0)
goto out_free_tfm;
lo->key_data = tfm;
return 0;
out_free_tfm:
crypto_free_sync_skcipher(tfm);
out:
return err;
}
typedef int (*encdec_cbc_t)(struct skcipher_request *req);
static int
cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
struct crypto_sync_skcipher *tfm = lo->key_data;
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
sg_init_table(&sg_out, 1);
sg_init_table(&sg_in, 1);
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
encdecfunc = crypto_skcipher_decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
encdecfunc = crypto_skcipher_encrypt;
}
while (size > 0) {
const int sz = min(size, LOOP_IV_SECTOR_SIZE);
u32 iv[4] = { 0, };
iv[0] = cpu_to_le32(IV & 0xffffffff);
sg_set_page(&sg_in, in_page, sz, in_offs);
sg_set_page(&sg_out, out_page, sz, out_offs);
skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
err = encdecfunc(req);
if (err)
goto out;
IV++;
size -= sz;
in_offs += sz;
out_offs += sz;
}
err = 0;
out:
skcipher_request_zero(req);
return err;
}
static int
cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
{
return -EINVAL;
}
static int
cryptoloop_release(struct loop_device *lo)
{
struct crypto_sync_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
crypto_free_sync_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
return -EINVAL;
}
static struct loop_func_table cryptoloop_funcs = {
.number = LO_CRYPT_CRYPTOAPI,
.init = cryptoloop_init,
.ioctl = cryptoloop_ioctl,
.transfer = cryptoloop_transfer,
.release = cryptoloop_release,
.owner = THIS_MODULE
};
static int __init
init_cryptoloop(void)
{
int rc = loop_register_transfer(&cryptoloop_funcs);
if (rc)
printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
else
pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
return rc;
}
static void __exit
cleanup_cryptoloop(void)
{
if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
printk(KERN_ERR
"cryptoloop: loop_unregister_transfer failed\n");
}
module_init(init_cryptoloop);
module_exit(cleanup_cryptoloop);

View File

@ -2794,7 +2794,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
goto out_idr_remove_vol;
}
add_disk(disk);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@ -2808,6 +2810,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
out_cleanup_disk:
blk_cleanup_disk(disk);
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:

View File

@ -4479,6 +4479,7 @@ static const struct blk_mq_ops floppy_mq_ops = {
};
static struct platform_device floppy_device[N_DRIVE];
static bool registered[N_DRIVE];
static bool floppy_available(int drive)
{
@ -4694,8 +4695,12 @@ static int __init do_floppy_init(void)
if (err)
goto out_remove_drives;
device_add_disk(&floppy_device[drive].dev, disks[drive][0],
NULL);
registered[drive] = true;
err = device_add_disk(&floppy_device[drive].dev,
disks[drive][0], NULL);
if (err)
goto out_remove_drives;
}
return 0;
@ -4704,7 +4709,8 @@ out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive][0]);
platform_device_unregister(&floppy_device[drive]);
if (registered[drive])
platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
@ -4947,30 +4953,14 @@ static void __exit floppy_module_exit(void)
if (disks[drive][i])
del_gendisk(disks[drive][i]);
}
platform_device_unregister(&floppy_device[drive]);
if (registered[drive])
platform_device_unregister(&floppy_device[drive]);
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
blk_cleanup_queue(disks[drive][i]->queue);
blk_cleanup_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
/*
* These disks have not called add_disk(). Don't put down
* queue reference in put_disk().
*/
if (!(allowed_drive_mask & (1 << drive)) ||
fdc_state[FDC(drive)].version == FDC_NONE) {
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
disks[drive][i]->queue = NULL;
}
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
put_disk(disks[drive][i]);
}
}
cancel_delayed_work_sync(&fd_timeout);

View File

@ -133,58 +133,6 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
static int transfer_xor(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block)
{
char *raw_buf = kmap_atomic(raw_page) + raw_off;
char *loop_buf = kmap_atomic(loop_page) + loop_off;
char *in, *out, *key;
int i, keysize;
if (cmd == READ) {
in = raw_buf;
out = loop_buf;
} else {
in = loop_buf;
out = raw_buf;
}
key = lo->lo_encrypt_key;
keysize = lo->lo_encrypt_key_size;
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
kunmap_atomic(loop_buf);
kunmap_atomic(raw_buf);
cond_resched();
return 0;
}
static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
{
if (unlikely(info->lo_encrypt_key_size <= 0))
return -EINVAL;
return 0;
}
static struct loop_func_table none_funcs = {
.number = LO_CRYPT_NONE,
};
static struct loop_func_table xor_funcs = {
.number = LO_CRYPT_XOR,
.transfer = transfer_xor,
.init = xor_init
};
/* xfer_funcs[0] is special - its release function is never called */
static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
&none_funcs,
&xor_funcs
};
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
loff_t loopsize;
@ -228,8 +176,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
/*
* We support direct I/O only if lo_offset is aligned with the
* logical I/O size of backing device, and the logical block
* size of loop is bigger than the backing device's and the loop
* needn't transform transfer.
* size of loop is bigger than the backing device's.
*
* TODO: the above condition may be loosed in the future, and
* direct I/O may be switched runtime at that time because most
@ -238,8 +185,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
if (dio) {
if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
!(lo->lo_offset & dio_align) &&
mapping->a_ops->direct_IO &&
!lo->transfer)
mapping->a_ops->direct_IO)
use_dio = true;
else
use_dio = false;
@ -286,24 +232,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
static inline int
lo_do_transfer(struct loop_device *lo, int cmd,
struct page *rpage, unsigned roffs,
struct page *lpage, unsigned loffs,
int size, sector_t rblock)
{
int ret;
ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
if (likely(!ret))
return 0;
printk_ratelimited(KERN_ERR
"loop: Transfer error at byte offset %llu, length %i.\n",
(unsigned long long)rblock << 9, size);
return ret;
}
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
@ -343,41 +271,6 @@ static int lo_write_simple(struct loop_device *lo, struct request *rq,
return ret;
}
/*
* This is the slow, transforming version that needs to double buffer the
* data as it cannot do the transformations in place without having direct
* access to the destination pages of the backing file.
*/
static int lo_write_transfer(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec, b;
struct req_iterator iter;
struct page *page;
int ret = 0;
page = alloc_page(GFP_NOIO);
if (unlikely(!page))
return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
bvec.bv_offset, bvec.bv_len, pos >> 9);
if (unlikely(ret))
break;
b.bv_page = page;
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
if (ret < 0)
break;
}
__free_page(page);
return ret;
}
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
@ -407,64 +300,12 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
static int lo_read_transfer(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec, b;
struct req_iterator iter;
struct iov_iter i;
struct page *page;
ssize_t len;
int ret = 0;
page = alloc_page(GFP_NOIO);
if (unlikely(!page))
return -ENOMEM;
rq_for_each_segment(bvec, rq, iter) {
loff_t offset = pos;
b.bv_page = page;
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
goto out_free_page;
}
ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
bvec.bv_offset, len, offset >> 9);
if (ret)
goto out_free_page;
flush_dcache_page(bvec.bv_page);
if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
}
ret = 0;
out_free_page:
__free_page(page);
return ret;
}
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use fallocate to manipulate the space mappings used by the image
* a.k.a. discard/zerorange. However we do not support this if
* encryption is enabled, because it may give an attacker useful
* information.
* a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
@ -647,16 +488,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
else if (cmd->use_aio)
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, WRITE);
else
return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
if (lo->transfer)
return lo_read_transfer(lo, rq, pos);
else if (cmd->use_aio)
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, READ);
else
return lo_read_simple(lo, rq, pos);
@ -921,7 +758,7 @@ static void loop_config_discard(struct loop_device *lo)
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
if (S_ISBLK(inode->i_mode)) {
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
@ -930,11 +767,9 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
* image a.k.a. discard.
*/
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
} else if (!file->f_op->fallocate) {
max_discard_sectors = 0;
granularity = 0;
@ -1071,43 +906,6 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
static int
loop_release_xfer(struct loop_device *lo)
{
int err = 0;
struct loop_func_table *xfer = lo->lo_encryption;
if (xfer) {
if (xfer->release)
err = xfer->release(lo);
lo->transfer = NULL;
lo->lo_encryption = NULL;
module_put(xfer->owner);
}
return err;
}
static int
loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
const struct loop_info64 *i)
{
int err = 0;
if (xfer) {
struct module *owner = xfer->owner;
if (!try_module_get(owner))
return -EINVAL;
if (xfer->init)
err = xfer->init(lo, i);
if (err)
module_put(owner);
else
lo->lo_encryption = xfer;
}
return err;
}
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@ -1120,55 +918,27 @@ static int
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
err = loop_release_xfer(lo);
if (err)
return err;
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
if (type >= MAX_LO_CRYPT)
return -EINVAL;
xfer = xfer_funcs[type];
if (xfer == NULL)
return -EINVAL;
} else
xfer = NULL;
err = loop_init_xfer(lo, xfer, info);
if (err)
return err;
switch (info->lo_encrypt_type) {
case LO_CRYPT_NONE:
break;
case LO_CRYPT_XOR:
pr_warn("support for the xor transformation has been removed.\n");
return -EINVAL;
case LO_CRYPT_CRYPTOAPI:
pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
return -EINVAL;
default:
return -EINVAL;
}
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
if (!xfer)
xfer = &none_funcs;
lo->transfer = xfer->transfer;
lo->ioctl = xfer->ioctl;
lo->lo_flags = info->lo_flags;
lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
lo->lo_init[0] = info->lo_init[0];
lo->lo_init[1] = info->lo_init[1];
if (info->lo_encrypt_key_size) {
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
info->lo_encrypt_key_size);
lo->lo_key_owner = uid;
}
return 0;
}
@ -1367,16 +1137,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
loop_release_xfer(lo);
lo->transfer = NULL;
lo->ioctl = NULL;
lo->lo_device = NULL;
lo->lo_encryption = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
lo->lo_encrypt_key_size = 0;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
blk_queue_logical_block_size(lo->lo_queue, 512);
blk_queue_physical_block_size(lo->lo_queue, 512);
@ -1478,7 +1241,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
kuid_t uid = current_uid();
int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@ -1486,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto out_unlock;
}
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@ -1577,14 +1333,6 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_sizelimit = lo->lo_sizelimit;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
info->lo_encrypt_type =
lo->lo_encryption ? lo->lo_encryption->number : 0;
if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
lo->lo_encrypt_key_size);
}
/* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
@ -1610,16 +1358,8 @@ loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
info64->lo_rdevice = info->lo_rdevice;
info64->lo_offset = info->lo_offset;
info64->lo_sizelimit = 0;
info64->lo_encrypt_type = info->lo_encrypt_type;
info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
info64->lo_flags = info->lo_flags;
info64->lo_init[0] = info->lo_init[0];
info64->lo_init[1] = info->lo_init[1];
if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
else
memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
}
static int
@ -1631,16 +1371,8 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
info->lo_inode = info64->lo_inode;
info->lo_rdevice = info64->lo_rdevice;
info->lo_offset = info64->lo_offset;
info->lo_encrypt_type = info64->lo_encrypt_type;
info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
info->lo_flags = info64->lo_flags;
info->lo_init[0] = info64->lo_init[0];
info->lo_init[1] = info64->lo_init[1];
if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
else
memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info->lo_device != info64->lo_device ||
@ -1789,7 +1521,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
err = loop_set_block_size(lo, arg);
break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
err = -EINVAL;
}
mutex_unlock(&lo->lo_mutex);
return err;
@ -1865,7 +1597,6 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
compat_int_t lo_encrypt_type;
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
@ -1894,16 +1625,8 @@ loop_info64_from_compat(const struct compat_loop_info __user *arg,
info64->lo_rdevice = info.lo_rdevice;
info64->lo_offset = info.lo_offset;
info64->lo_sizelimit = 0;
info64->lo_encrypt_type = info.lo_encrypt_type;
info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
info64->lo_flags = info.lo_flags;
info64->lo_init[0] = info.lo_init[0];
info64->lo_init[1] = info.lo_init[1];
if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
else
memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
return 0;
}
@ -1923,24 +1646,14 @@ loop_info64_to_compat(const struct loop_info64 *info64,
info.lo_inode = info64->lo_inode;
info.lo_rdevice = info64->lo_rdevice;
info.lo_offset = info64->lo_offset;
info.lo_encrypt_type = info64->lo_encrypt_type;
info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
info.lo_flags = info64->lo_flags;
info.lo_init[0] = info64->lo_init[0];
info.lo_init[1] = info64->lo_init[1];
if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
else
memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info.lo_device != info64->lo_device ||
info.lo_rdevice != info64->lo_rdevice ||
info.lo_inode != info64->lo_inode ||
info.lo_offset != info64->lo_offset ||
info.lo_init[0] != info64->lo_init[0] ||
info.lo_init[1] != info64->lo_init[1])
info.lo_offset != info64->lo_offset)
return -EOVERFLOW;
if (copy_to_user(arg, &info, sizeof(info)))
@ -2081,43 +1794,6 @@ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
int loop_register_transfer(struct loop_func_table *funcs)
{
unsigned int n = funcs->number;
if (n >= MAX_LO_CRYPT || xfer_funcs[n])
return -EINVAL;
xfer_funcs[n] = funcs;
return 0;
}
int loop_unregister_transfer(int number)
{
unsigned int n = number;
struct loop_func_table *xfer;
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
return -EINVAL;
/*
* This function is called from only cleanup_cryptoloop().
* Given that each loop device that has a transfer enabled holds a
* reference to the module implementing it we should never get here
* with a transfer that is set (unless forced module unloading is
* requested). Thus, check module's refcount and warn if this is
* not a clean unloading.
*/
#ifdef CONFIG_MODULE_UNLOAD
if (xfer->owner && module_refcount(xfer->owner) != -1)
pr_err("Danger! Unregistering an in use transfer function.\n");
#endif
xfer_funcs[n] = NULL;
return 0;
}
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@ -2374,13 +2050,19 @@ static int loop_add(int i)
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
/* Make this loop device reachable from pathname. */
add_disk(disk);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
return i;
out_cleanup_disk:
blk_cleanup_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:

View File

@ -32,23 +32,10 @@ struct loop_device {
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
int (*transfer)(struct loop_device *, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block);
char lo_file_name[LO_NAME_SIZE];
char lo_crypt_name[LO_NAME_SIZE];
char lo_encrypt_key[LO_KEY_SIZE];
int lo_encrypt_key_size;
struct loop_func_table *lo_encryption;
__u32 lo_init[2];
kuid_t lo_key_owner; /* Who set the key */
int (*ioctl)(struct loop_device *, int cmd,
unsigned long arg);
struct file * lo_backing_file;
struct block_device *lo_device;
void *key_data;
gfp_t old_gfp_mask;
@ -82,21 +69,4 @@ struct loop_cmd {
struct cgroup_subsys_state *memcg_css;
};
/* Support for loadable transfer modules */
struct loop_func_table {
int number; /* filter type */
int (*transfer)(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t real_block);
int (*init)(struct loop_device *, const struct loop_info64 *);
/* release is called from loop_unregister_transfer or clr_fd */
int (*release)(struct loop_device *);
int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
struct module *owner;
};
int loop_register_transfer(struct loop_func_table *funcs);
int loop_unregister_transfer(int number);
#endif

View File

@ -3633,7 +3633,9 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
if (rv)
goto read_capacity_error;
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@ -4061,7 +4063,6 @@ block_initialize_err:
msi_initialize_err:
if (dd->isr_workq) {
flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
@ -4119,7 +4120,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
mtip_block_remove(dd);
if (dd->isr_workq) {
flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);

View File

@ -115,6 +115,7 @@ static const struct block_device_operations n64cart_fops = {
static int __init n64cart_probe(struct platform_device *pdev)
{
struct gendisk *disk;
int err = -ENOMEM;
if (!start || !size) {
pr_err("start or size not specified\n");
@ -132,7 +133,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
return -ENOMEM;
goto out;
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART_SCAN;
@ -147,11 +148,18 @@ static int __init n64cart_probe(struct platform_device *pdev)
blk_queue_physical_block_size(disk->queue, 4096);
blk_queue_logical_block_size(disk->queue, 4096);
add_disk(disk);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
pr_info("n64cart: %u kb disk\n", size / 1024);
return 0;
out_cleanup_disk:
blk_cleanup_disk(disk);
out:
return err;
}
static struct platform_driver n64cart_driver = {

View File

@ -122,15 +122,21 @@ struct nbd_device {
struct work_struct remove_work;
struct list_head list;
struct task_struct *task_recv;
struct task_struct *task_setup;
unsigned long flags;
pid_t pid; /* pid of nbd-client, if attached */
char *backend;
};
#define NBD_CMD_REQUEUED 1
/*
* This flag will be set if nbd_queue_rq() succeed, and will be checked and
* cleared in completion. Both setting and clearing of the flag are protected
* by cmd->lock.
*/
#define NBD_CMD_INFLIGHT 2
struct nbd_cmd {
struct nbd_device *nbd;
@ -217,7 +223,7 @@ static ssize_t pid_show(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
return sprintf(buf, "%d\n", nbd->pid);
}
static const struct device_attribute pid_attr = {
@ -322,7 +328,7 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
if (!nbd->task_recv)
if (!nbd->pid)
return 0;
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
@ -398,6 +404,11 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
mutex_unlock(&cmd->lock);
@ -477,7 +488,8 @@ done:
}
/*
* Send or receive packet.
* Send or receive packet. Return a positive value on success and
* negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
@ -603,7 +615,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
trace_nbd_header_sent(req, handle);
if (result <= 0) {
if (result < 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
@ -647,7 +659,7 @@ send_pages:
skip = 0;
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result <= 0) {
if (result < 0) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
@ -681,38 +693,45 @@ out:
return 0;
}
/* NULL returned = something went wrong, inform userspace */
static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
static int nbd_read_reply(struct nbd_device *nbd, int index,
struct nbd_reply *reply)
{
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
struct iov_iter to;
int result;
reply->magic = 0;
iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
return result;
}
if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply->magic));
return -EPROTO;
}
return 0;
}
/* NULL returned = something went wrong, inform userspace */
static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
struct nbd_reply *reply)
{
struct nbd_config *config = nbd->config;
int result;
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
u64 handle;
u16 hwq;
u32 tag;
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
struct iov_iter to;
int ret = 0;
reply.magic = 0;
iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
if (!nbd_disconnected(config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
return ERR_PTR(result);
}
if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply.magic));
return ERR_PTR(-EPROTO);
}
memcpy(&handle, reply.handle, sizeof(handle));
memcpy(&handle, reply->handle, sizeof(handle));
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@ -727,6 +746,16 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
tag, cmd->status, cmd->flags);
ret = -ENOENT;
goto out;
}
if (cmd->index != index) {
dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
tag, index, cmd->index);
}
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
@ -745,9 +774,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
ret = -ENOENT;
goto out;
}
if (ntohl(reply.error)) {
if (ntohl(reply->error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
ntohl(reply->error));
cmd->status = BLK_STS_IOERR;
goto out;
}
@ -756,11 +785,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
struct iov_iter to;
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
if (result < 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
@ -769,7 +799,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
if (nbd_disconnected(config)) {
if (nbd_disconnected(nbd->config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
@ -793,24 +823,46 @@ static void recv_work(struct work_struct *work)
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
if (IS_ERR(cmd)) {
struct nbd_sock *nsock = config->socks[args->index];
struct nbd_reply reply;
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
if (nbd_read_reply(nbd, args->index, &reply))
break;
/*
* Grab .q_usage_counter so request pool won't go away, then no
* request use-after-free is possible during nbd_handle_reply().
* If queue is frozen, there won't be any inflight requests, we
* needn't to handle the incoming garbage message.
*/
if (!percpu_ref_tryget(&q->q_usage_counter)) {
dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
__func__);
break;
}
cmd = nbd_handle_reply(nbd, args->index, &reply);
if (IS_ERR(cmd)) {
percpu_ref_put(&q->q_usage_counter);
break;
}
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
percpu_ref_put(&q->q_usage_counter);
}
nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@ -826,6 +878,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
return true;
mutex_lock(&cmd->lock);
if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return true;
}
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@ -907,7 +963,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@ -916,7 +971,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@ -940,7 +994,6 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
blk_mq_start_request(req);
return -EIO;
}
goto again;
@ -962,7 +1015,13 @@ again:
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
if (ret == -EAGAIN) {
/*
* Access to this flag is protected by cmd->lock, thus it's safe to set
* the flag after nbd_send_cmd() succeed to send request to server.
*/
if (!ret)
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
@ -1199,7 +1258,7 @@ static void send_disconnects(struct nbd_device *nbd)
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
if (ret < 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
@ -1236,7 +1295,7 @@ static void nbd_config_put(struct nbd_device *nbd)
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->task_recv = NULL;
nbd->pid = 0;
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
&config->runtime_flags)) {
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
@ -1277,7 +1336,7 @@ static int nbd_start_device(struct nbd_device *nbd)
int num_connections = config->num_connections;
int error = 0, i;
if (nbd->task_recv)
if (nbd->pid)
return -EBUSY;
if (!config->socks)
return -EINVAL;
@ -1296,7 +1355,7 @@ static int nbd_start_device(struct nbd_device *nbd)
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
nbd->task_recv = current;
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@ -1552,8 +1611,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
if (nbd->task_recv)
seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
if (nbd->pid)
seq_printf(s, "recv: %d\n", nbd->pid);
return 0;
}
@ -1757,7 +1816,9 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
add_disk(disk);
err = add_disk(disk);
if (err)
goto out_err_disk;
/*
* Now publish the device.
@ -1766,6 +1827,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
out_err_disk:
blk_cleanup_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@ -2130,7 +2193,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->task_recv) {
!nbd->pid) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
ret = -EINVAL;

View File

@ -92,6 +92,10 @@ static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
static int g_poll_queues = 1;
module_param_named(poll_queues, g_poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
@ -324,29 +328,69 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
struct nullb *nullb = dev->nullb;
struct blk_mq_tag_set *set;
static int nullb_update_nr_hw_queues(struct nullb_device *dev,
unsigned int submit_queues,
unsigned int poll_queues)
if (!nullb)
{
struct blk_mq_tag_set *set;
int ret, nr_hw_queues;
if (!dev->nullb)
return 0;
/*
* Make sure at least one queue exists for each of submit and poll.
*/
if (!submit_queues || !poll_queues)
return -EINVAL;
/*
* Make sure that null_init_hctx() does not access nullb->queues[] past
* the end of that array.
*/
if (submit_queues > nr_cpu_ids)
if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
return -EINVAL;
set = nullb->tag_set;
blk_mq_update_nr_hw_queues(set, submit_queues);
return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
/*
* Keep previous and new queue numbers in nullb_device for reference in
* the call back function null_map_queues().
*/
dev->prev_submit_queues = dev->submit_queues;
dev->prev_poll_queues = dev->poll_queues;
dev->submit_queues = submit_queues;
dev->poll_queues = poll_queues;
set = dev->nullb->tag_set;
nr_hw_queues = submit_queues + poll_queues;
blk_mq_update_nr_hw_queues(set, nr_hw_queues);
ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
if (ret) {
/* on error, revert the queue numbers */
dev->submit_queues = dev->prev_submit_queues;
dev->poll_queues = dev->prev_poll_queues;
}
return ret;
}
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
}
static int nullb_apply_poll_queues(struct nullb_device *dev,
unsigned int poll_queues)
{
return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
@ -466,6 +510,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_size,
&nullb_device_attr_completion_nsec,
&nullb_device_attr_submit_queues,
&nullb_device_attr_poll_queues,
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
@ -593,6 +638,9 @@ static struct nullb_device *null_alloc_dev(void)
dev->size = g_gb * 1024;
dev->completion_nsec = g_completion_nsec;
dev->submit_queues = g_submit_queues;
dev->prev_submit_queues = g_submit_queues;
dev->poll_queues = g_poll_queues;
dev->prev_poll_queues = g_poll_queues;
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
@ -1454,12 +1502,100 @@ static bool should_requeue_request(struct request *rq)
return false;
}
static int null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
int i, qoff;
unsigned int submit_queues = g_submit_queues;
unsigned int poll_queues = g_poll_queues;
if (nullb) {
struct nullb_device *dev = nullb->dev;
/*
* Refer nr_hw_queues of the tag set to check if the expected
* number of hardware queues are prepared. If block layer failed
* to prepare them, use previous numbers of submit queues and
* poll queues to map queues.
*/
if (set->nr_hw_queues ==
dev->submit_queues + dev->poll_queues) {
submit_queues = dev->submit_queues;
poll_queues = dev->poll_queues;
} else if (set->nr_hw_queues ==
dev->prev_submit_queues + dev->prev_poll_queues) {
submit_queues = dev->prev_submit_queues;
poll_queues = dev->prev_poll_queues;
} else {
pr_warn("tag set has unexpected nr_hw_queues: %d\n",
set->nr_hw_queues);
return -EINVAL;
}
}
for (i = 0, qoff = 0; i < set->nr_maps; i++) {
struct blk_mq_queue_map *map = &set->map[i];
switch (i) {
case HCTX_TYPE_DEFAULT:
map->nr_queues = submit_queues;
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
continue;
case HCTX_TYPE_POLL:
map->nr_queues = poll_queues;
break;
}
map->queue_offset = qoff;
qoff += map->nr_queues;
blk_mq_map_queues(map);
}
return 0;
}
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
struct nullb_cmd *cmd;
struct request *req;
req = list_first_entry(&list, struct request, queuelist);
list_del_init(&req->queuelist);
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
end_cmd(cmd);
nr++;
}
return nr;
}
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
@ -1480,10 +1616,11 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(bd->rq);
sector_t sector = blk_rq_pos(bd->rq);
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
@ -1507,6 +1644,13 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
}
if (is_poll) {
spin_lock(&nq->poll_lock);
list_add_tail(&bd->rq->queuelist, &nq->poll_list);
spin_unlock(&nq->poll_lock);
return BLK_STS_OK;
}
if (cmd->fake_timeout)
return BLK_STS_OK;
@ -1542,6 +1686,8 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
INIT_LIST_HEAD(&nq->poll_list);
spin_lock_init(&nq->poll_lock);
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
@ -1567,6 +1713,8 @@ static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
.poll = null_poll,
.map_queues = null_map_queues,
.init_hctx = null_init_hctx,
.exit_hctx = null_exit_hctx,
};
@ -1663,13 +1811,17 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
int nqueues = nr_cpu_ids;
if (g_poll_queues)
nqueues += g_poll_queues;
nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
@ -1721,9 +1873,14 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
int poll_queues;
set->ops = &null_mq_ops;
set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
g_submit_queues;
poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
if (poll_queues)
set->nr_hw_queues += poll_queues;
set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
g_hw_queue_depth;
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
@ -1733,7 +1890,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags |= BLK_MQ_F_NO_SCHED;
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = NULL;
set->driver_data = nullb;
if (g_poll_queues)
set->nr_maps = 3;
else
set->nr_maps = 1;
if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
@ -1753,6 +1914,13 @@ static int null_validate_conf(struct nullb_device *dev)
dev->submit_queues = nr_cpu_ids;
else if (dev->submit_queues == 0)
dev->submit_queues = 1;
dev->prev_submit_queues = dev->submit_queues;
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
else if (dev->poll_queues == 0)
dev->poll_queues = 1;
dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);

View File

@ -32,6 +32,9 @@ struct nullb_queue {
struct nullb_device *dev;
unsigned int requeue_selection;
struct list_head poll_list;
spinlock_t poll_lock;
struct nullb_cmd *cmds;
};
@ -83,6 +86,9 @@ struct nullb_device {
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
unsigned int prev_submit_queues; /* number of submission queues before change */
unsigned int poll_queues; /* number of IOPOLL submission queues */
unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */

View File

@ -183,8 +183,6 @@ static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
static int pcd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc);
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
@ -302,53 +300,6 @@ static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
static void pcd_init_units(void)
{
struct pcd_unit *cd;
int unit;
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
struct gendisk *disk;
if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE))
continue;
disk = blk_mq_alloc_disk(&cd->tag_set, cd);
if (IS_ERR(disk)) {
blk_mq_free_tag_set(&cd->tag_set);
continue;
}
INIT_LIST_HEAD(&cd->rq_list);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
cd->present = 0;
cd->last_sense = 0;
cd->changed = 1;
cd->drive = (*drives[unit])[D_SLV];
if ((*drives[unit])[D_PRT])
pcd_drive_count++;
cd->name = &cd->info.name[0];
snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
cd->info.ops = &pcd_dops;
cd->info.handle = cd;
cd->info.speed = 0;
cd->info.capacity = 1;
cd->info.mask = 0;
disk->major = major;
disk->first_minor = unit;
disk->minors = 1;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
static int pcd_open(struct cdrom_device_info *cdi, int purpose)
{
struct pcd_unit *cd = cdi->handle;
@ -630,10 +581,11 @@ static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DISC_OK;
}
static int pcd_identify(struct pcd_unit *cd, char *id)
static int pcd_identify(struct pcd_unit *cd)
{
int k, s;
char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
char id[18];
int k, s;
pcd_bufblk = -1;
@ -661,108 +613,47 @@ static int pcd_identify(struct pcd_unit *cd, char *id)
}
/*
* returns 0, with id set if drive is detected
* -1, if drive detection failed
* returns 0, with id set if drive is detected, otherwise an error code.
*/
static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
static int pcd_probe(struct pcd_unit *cd, int ms)
{
if (ms == -1) {
for (cd->drive = 0; cd->drive <= 1; cd->drive++)
if (!pcd_reset(cd) && !pcd_identify(cd, id))
if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
} else {
cd->drive = ms;
if (!pcd_reset(cd) && !pcd_identify(cd, id))
if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
}
return -1;
return -ENODEV;
}
static void pcd_probe_capabilities(void)
static int pcd_probe_capabilities(struct pcd_unit *cd)
{
int unit, r;
char buffer[32];
char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
struct pcd_unit *cd;
char buffer[32];
int ret;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->present)
continue;
r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
if (r)
continue;
/* we should now have the cap page */
if ((buffer[11] & 1) == 0)
cd->info.mask |= CDC_CD_R;
if ((buffer[11] & 2) == 0)
cd->info.mask |= CDC_CD_RW;
if ((buffer[12] & 1) == 0)
cd->info.mask |= CDC_PLAY_AUDIO;
if ((buffer[14] & 1) == 0)
cd->info.mask |= CDC_LOCK;
if ((buffer[14] & 8) == 0)
cd->info.mask |= CDC_OPEN_TRAY;
if ((buffer[14] >> 6) == 0)
cd->info.mask |= CDC_CLOSE_TRAY;
}
}
ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
if (ret)
return ret;
static int pcd_detect(void)
{
char id[18];
int k, unit;
struct pcd_unit *cd;
/* we should now have the cap page */
if ((buffer[11] & 1) == 0)
cd->info.mask |= CDC_CD_R;
if ((buffer[11] & 2) == 0)
cd->info.mask |= CDC_CD_RW;
if ((buffer[12] & 1) == 0)
cd->info.mask |= CDC_PLAY_AUDIO;
if ((buffer[14] & 1) == 0)
cd->info.mask |= CDC_LOCK;
if ((buffer[14] & 8) == 0)
cd->info.mask |= CDC_OPEN_TRAY;
if ((buffer[14] >> 6) == 0)
cd->info.mask |= CDC_CLOSE_TRAY;
printk("%s: %s version %s, major %d, nice %d\n",
name, name, PCD_VERSION, major, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
}
k = 0;
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
cd = pcd;
if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
pcd_buffer, PI_PCD, verbose, cd->name)) {
if (!pcd_probe(cd, -1, id)) {
cd->present = 1;
k++;
} else
pi_release(cd->pi);
}
} else {
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (!cd->disk)
continue;
if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pcd_buffer, PI_PCD, verbose, cd->name))
continue;
if (!pcd_probe(cd, conf[D_SLV], id)) {
cd->present = 1;
k++;
} else
pi_release(cd->pi);
}
}
if (k)
return 0;
printk("%s: No CD-ROM drive found\n", name);
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
blk_cleanup_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
}
pi_unregister_driver(par_drv);
return -1;
return 0;
}
/* I/O request processing */
@ -999,43 +890,130 @@ static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
return 0;
}
static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
int mode, int unit, int protocol, int delay, int ms)
{
struct gendisk *disk;
int ret;
ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE);
if (ret)
return ret;
disk = blk_mq_alloc_disk(&cd->tag_set, cd);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_free_tag_set;
}
INIT_LIST_HEAD(&cd->rq_list);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
cd->present = 0;
cd->last_sense = 0;
cd->changed = 1;
cd->drive = (*drives[cd - pcd])[D_SLV];
cd->name = &cd->info.name[0];
snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
cd->info.ops = &pcd_dops;
cd->info.handle = cd;
cd->info.speed = 0;
cd->info.capacity = 1;
cd->info.mask = 0;
disk->major = major;
disk->first_minor = unit;
disk->minors = 1;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
pcd_buffer, PI_PCD, verbose, cd->name)) {
ret = -ENODEV;
goto out_free_disk;
}
ret = pcd_probe(cd, ms);
if (ret)
goto out_pi_release;
cd->present = 1;
pcd_probe_capabilities(cd);
ret = register_cdrom(cd->disk, &cd->info);
if (ret)
goto out_pi_release;
ret = add_disk(cd->disk);
if (ret)
goto out_unreg_cdrom;
return 0;
out_unreg_cdrom:
unregister_cdrom(&cd->info);
out_pi_release:
pi_release(cd->pi);
out_free_disk:
blk_cleanup_disk(cd->disk);
out_free_tag_set:
blk_mq_free_tag_set(&cd->tag_set);
return ret;
}
static int __init pcd_init(void)
{
struct pcd_unit *cd;
int unit;
int found = 0, unit;
if (disable)
return -EINVAL;
pcd_init_units();
if (pcd_detect())
return -ENODEV;
/* get the atapi capabilities page */
pcd_probe_capabilities();
if (register_blkdev(major, name)) {
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
blk_cleanup_queue(cd->disk->queue);
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
if (register_blkdev(major, name))
return -EBUSY;
pr_info("%s: %s version %s, major %d, nice %d\n",
name, name, PCD_VERSION, major, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
goto out_unregister_blkdev;
}
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (cd->present) {
register_cdrom(cd->disk, &cd->info);
cd->disk->private_data = cd;
add_disk(cd->disk);
for (unit = 0; unit < PCD_UNITS; unit++) {
if ((*drives[unit])[D_PRT])
pcd_drive_count++;
}
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
found++;
} else {
for (unit = 0; unit < PCD_UNITS; unit++) {
struct pcd_unit *cd = &pcd[unit];
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
conf[D_SLV]))
found++;
}
}
if (!found) {
pr_info("%s: No CD-ROM drive found\n", name);
goto out_unregister_pi_driver;
}
return 0;
out_unregister_pi_driver:
pi_unregister_driver(par_drv);
out_unregister_blkdev:
unregister_blkdev(major, name);
return -ENODEV;
}
static void __exit pcd_exit(void)
@ -1044,20 +1022,18 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
if (!cd->present)
continue;
if (cd->present) {
del_gendisk(cd->disk);
pi_release(cd->pi);
unregister_cdrom(&cd->info);
}
blk_cleanup_queue(cd->disk->queue);
unregister_cdrom(&cd->info);
del_gendisk(cd->disk);
pi_release(cd->pi);
blk_cleanup_disk(cd->disk);
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
unregister_blkdev(major, name);
pi_unregister_driver(par_drv);
unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");

View File

@ -875,9 +875,27 @@ static const struct blk_mq_ops pd_mq_ops = {
.queue_rq = pd_queue_rq,
};
static void pd_probe_drive(struct pd_unit *disk)
static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
int mode, int unit, int protocol, int delay)
{
int index = disk - pd;
int *parm = *drives[index];
struct gendisk *p;
int ret;
disk->pi = &disk->pia;
disk->access = 0;
disk->changed = 1;
disk->capacity = 0;
disk->drive = parm[D_SLV];
snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
disk->alt_geom = parm[D_GEO];
disk->standby = parm[D_SBY];
INIT_LIST_HEAD(&disk->rq_list);
if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
pd_scratch, PI_PD, verbose, disk->name))
return -ENXIO;
memset(&disk->tag_set, 0, sizeof(disk->tag_set));
disk->tag_set.ops = &pd_mq_ops;
@ -887,14 +905,14 @@ static void pd_probe_drive(struct pd_unit *disk)
disk->tag_set.queue_depth = 2;
disk->tag_set.numa_node = NUMA_NO_NODE;
disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
if (blk_mq_alloc_tag_set(&disk->tag_set))
return;
ret = blk_mq_alloc_tag_set(&disk->tag_set);
if (ret)
goto pi_release;
p = blk_mq_alloc_disk(&disk->tag_set, disk);
if (IS_ERR(p)) {
blk_mq_free_tag_set(&disk->tag_set);
return;
ret = PTR_ERR(p);
goto free_tag_set;
}
disk->gd = p;
@ -905,102 +923,88 @@ static void pd_probe_drive(struct pd_unit *disk)
p->minors = 1 << PD_BITS;
p->events = DISK_EVENT_MEDIA_CHANGE;
p->private_data = disk;
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
if (disk->drive == -1) {
for (disk->drive = 0; disk->drive <= 1; disk->drive++)
if (pd_special_command(disk, pd_identify) == 0)
return;
} else if (pd_special_command(disk, pd_identify) == 0)
return;
disk->gd = NULL;
for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
ret = pd_special_command(disk, pd_identify);
if (ret == 0)
break;
}
} else {
ret = pd_special_command(disk, pd_identify);
}
if (ret)
goto put_disk;
set_capacity(disk->gd, disk->capacity);
ret = add_disk(disk->gd);
if (ret)
goto cleanup_disk;
return 0;
cleanup_disk:
blk_cleanup_disk(disk->gd);
put_disk:
put_disk(p);
disk->gd = NULL;
free_tag_set:
blk_mq_free_tag_set(&disk->tag_set);
pi_release:
pi_release(disk->pi);
return ret;
}
static int pd_detect(void)
static int __init pd_init(void)
{
int found = 0, unit, pd_drive_count = 0;
struct pd_unit *disk;
for (unit = 0; unit < PD_UNITS; unit++) {
int *parm = *drives[unit];
struct pd_unit *disk = pd + unit;
disk->pi = &disk->pia;
disk->access = 0;
disk->changed = 1;
disk->capacity = 0;
disk->drive = parm[D_SLV];
snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
disk->alt_geom = parm[D_GEO];
disk->standby = parm[D_SBY];
if (parm[D_PRT])
pd_drive_count++;
INIT_LIST_HEAD(&disk->rq_list);
}
if (disable)
return -ENODEV;
if (register_blkdev(major, name))
return -ENODEV;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PD_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
goto out_unregister_blkdev;
}
for (unit = 0; unit < PD_UNITS; unit++) {
int *parm = *drives[unit];
if (parm[D_PRT])
pd_drive_count++;
}
if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
disk = pd;
if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
PI_PD, verbose, disk->name)) {
pd_probe_drive(disk);
if (!disk->gd)
pi_release(disk->pi);
}
if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
found++;
} else {
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
int *parm = *drives[unit];
if (!parm[D_PRT])
continue;
if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
parm[D_UNI], parm[D_PRO], parm[D_DLY],
pd_scratch, PI_PD, verbose, disk->name)) {
pd_probe_drive(disk);
if (!disk->gd)
pi_release(disk->pi);
}
}
}
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
if (disk->gd) {
set_capacity(disk->gd, disk->capacity);
add_disk(disk->gd);
found = 1;
if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
parm[D_UNI], parm[D_PRO], parm[D_DLY]))
found++;
}
}
if (!found) {
printk("%s: no valid drive found\n", name);
pi_unregister_driver(par_drv);
goto out_pi_unregister_driver;
}
return found;
}
static int __init pd_init(void)
{
if (disable)
goto out1;
if (register_blkdev(major, name))
goto out1;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PD_VERSION, major, cluster, nice);
if (!pd_detect())
goto out2;
return 0;
out2:
out_pi_unregister_driver:
pi_unregister_driver(par_drv);
out_unregister_blkdev:
unregister_blkdev(major, name);
out1:
return -ENODEV;
}

View File

@ -214,7 +214,6 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
@ -285,45 +284,6 @@ static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
static void __init pf_init_units(void)
{
struct pf_unit *pf;
int unit;
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
struct gendisk *disk;
if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE))
continue;
disk = blk_mq_alloc_disk(&pf->tag_set, pf);
if (IS_ERR(disk)) {
blk_mq_free_tag_set(&pf->tag_set);
continue;
}
INIT_LIST_HEAD(&pf->rq_list);
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
pf->pi = &pf->pia;
pf->media_status = PF_NM;
pf->drive = (*drives[unit])[D_SLV];
pf->lun = (*drives[unit])[D_LUN];
snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
disk->major = major;
disk->first_minor = unit;
disk->minors = 1;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
}
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
@ -691,9 +651,9 @@ static int pf_identify(struct pf_unit *pf)
return 0;
}
/* returns 0, with id set if drive is detected
-1, if drive detection failed
*/
/*
* returns 0, with id set if drive is detected, otherwise an error code.
*/
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
@ -715,60 +675,7 @@ static int pf_probe(struct pf_unit *pf)
if (!pf_identify(pf))
return 0;
}
return -1;
}
static int pf_detect(void)
{
struct pf_unit *pf = units;
int k, unit;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
}
k = 0;
if (pf_drive_count == 0) {
if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
verbose, pf->name)) {
if (!pf_probe(pf) && pf->disk) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
} else
for (unit = 0; unit < PF_UNITS; unit++, pf++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pf_scratch, PI_PF, verbose, pf->name)) {
if (pf->disk && !pf_probe(pf)) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
}
if (k)
return 0;
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
}
pi_unregister_driver(par_drv);
return -1;
return -ENODEV;
}
/* The i/o request engine */
@ -1014,61 +921,134 @@ static void do_pf_write_done(void)
next_request(0);
}
static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
int mode, int unit, int protocol, int delay, int ms)
{
struct gendisk *disk;
int ret;
ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE);
if (ret)
return ret;
disk = blk_mq_alloc_disk(&pf->tag_set, pf);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_free_tag_set;
}
disk->major = major;
disk->first_minor = pf - units;
disk->minors = 1;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
INIT_LIST_HEAD(&pf->rq_list);
pf->disk = disk;
pf->pi = &pf->pia;
pf->media_status = PF_NM;
pf->drive = (*drives[disk->first_minor])[D_SLV];
pf->lun = (*drives[disk->first_minor])[D_LUN];
snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
pf_scratch, PI_PF, verbose, pf->name)) {
ret = -ENODEV;
goto out_free_disk;
}
ret = pf_probe(pf);
if (ret)
goto out_pi_release;
ret = add_disk(disk);
if (ret)
goto out_pi_release;
pf->present = 1;
return 0;
out_pi_release:
pi_release(pf->pi);
out_free_disk:
blk_cleanup_disk(pf->disk);
out_free_tag_set:
blk_mq_free_tag_set(&pf->tag_set);
return ret;
}
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
int unit;
int found = 0, unit;
if (disable)
return -EINVAL;
pf_init_units();
if (pf_detect())
return -ENODEV;
pf_busy = 0;
if (register_blkdev(major, name)) {
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
}
if (register_blkdev(major, name))
return -EBUSY;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
goto out_unregister_blkdev;
}
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
add_disk(disk);
for (unit = 0; unit < PF_UNITS; unit++) {
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
pf = units;
if (pf_drive_count == 0) {
if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
found++;
} else {
for (unit = 0; unit < PF_UNITS; unit++, pf++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
verbose))
found++;
}
}
if (!found) {
printk("%s: No ATAPI disk detected\n", name);
goto out_unregister_pi_driver;
}
pf_busy = 0;
return 0;
out_unregister_pi_driver:
pi_unregister_driver(par_drv);
out_unregister_blkdev:
unregister_blkdev(major, name);
return -ENODEV;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
if (!pf->present)
continue;
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
del_gendisk(pf->disk);
blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
pi_release(pf->pi);
}
unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");

View File

@ -2728,7 +2728,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
add_disk(disk);
ret = add_disk(disk);
if (ret)
goto out_mem2;
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);

View File

@ -7054,7 +7054,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rc)
goto err_out_image_lock;
device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
if (rc)
goto err_out_cleanup_disk;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@ -7068,6 +7070,8 @@ out:
module_put(THIS_MODULE);
return rc;
err_out_cleanup_disk:
rbd_free_disk(rbd_dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);

View File

@ -1384,8 +1384,10 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
}
static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
{
int err;
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
dev->gd->minors = 1 << RNBD_PART_BITS;
@ -1410,7 +1412,11 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
if (!dev->rotational)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
add_disk(dev->gd);
err = add_disk(dev->gd);
if (err)
blk_cleanup_disk(dev->gd);
return err;
}
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
@ -1426,8 +1432,7 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev);
rnbd_clt_setup_gen_disk(dev, idx);
return 0;
return rnbd_clt_setup_gen_disk(dev, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,

View File

@ -935,7 +935,9 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->size8 = 0;
}
rsxx_attach_dev(card);
st = rsxx_attach_dev(card);
if (st)
goto failed_create_dev;
/************* Setup Debugfs *************/
rsxx_debugfs_dev_new(card);

View File

@ -191,6 +191,8 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
int err = 0;
mutex_lock(&card->dev_lock);
/* The block device requires the stripe size from the config. */
@ -199,13 +201,17 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
card->bdev_attached = 1;
err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
if (err == 0)
card->bdev_attached = 1;
}
mutex_unlock(&card->dev_lock);
return 0;
if (err)
blk_cleanup_disk(card->gendisk);
return err;
}
void rsxx_detach_dev(struct rsxx_cardinfo *card)

View File

@ -185,6 +185,7 @@ struct floppy_state {
int track;
int ref_count;
bool registered;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
@ -772,6 +773,20 @@ static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
static void swim_cleanup_floppy_disk(struct floppy_state *fs)
{
struct gendisk *disk = fs->disk;
if (!disk)
return;
if (fs->registered)
del_gendisk(fs->disk);
blk_cleanup_disk(disk);
blk_mq_free_tag_set(&fs->tag_set);
}
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@ -828,7 +843,10 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
err = add_disk(swd->unit[drive].disk);
if (err)
goto exit_put_disks;
swd->unit[drive].registered = true;
}
return 0;
@ -836,12 +854,7 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
do {
struct gendisk *disk = swd->unit[drive].disk;
if (!disk)
continue;
blk_cleanup_disk(disk);
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
swim_cleanup_floppy_disk(&swd->unit[drive]);
} while (drive--);
return err;
}
@ -910,12 +923,8 @@ static int swim_remove(struct platform_device *dev)
int drive;
struct resource *res;
for (drive = 0; drive < swd->floppy_count; drive++) {
del_gendisk(swd->unit[drive].disk);
blk_cleanup_queue(swd->unit[drive].disk->queue);
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
put_disk(swd->unit[drive].disk);
}
for (drive = 0; drive < swd->floppy_count; drive++)
swim_cleanup_floppy_disk(&swd->unit[drive]);
unregister_blkdev(FLOPPY_MAJOR, "fd");

View File

@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/major.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@ -1229,7 +1230,9 @@ static int swim3_attach(struct macio_dev *mdev,
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
add_disk(disk);
rc = add_disk(disk);
if (rc)
goto out_cleanup_disk;
disks[floppy_count++] = disk;
return 0;

View File

@ -297,6 +297,7 @@ struct carm_host {
struct work_struct fsm_task;
int probe_err;
struct completion probe_comp;
};
@ -1181,8 +1182,11 @@ static void carm_fsm_task (struct work_struct *work)
struct gendisk *disk = port->disk;
set_capacity(disk, port->capacity);
add_disk(disk);
activated++;
host->probe_err = add_disk(disk);
if (!host->probe_err)
activated++;
else
break;
}
printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
@ -1192,11 +1196,9 @@ static void carm_fsm_task (struct work_struct *work)
reschedule = 1;
break;
}
case HST_PROBE_FINISHED:
complete(&host->probe_comp);
break;
case HST_ERROR:
/* FIXME: TODO */
break;
@ -1507,7 +1509,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_irq;
DPRINTK("waiting for probe_comp\n");
host->probe_err = -ENODEV;
wait_for_completion(&host->probe_comp);
if (host->probe_err) {
rc = host->probe_err;
goto err_out_free_irq;
}
printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,

View File

@ -2386,7 +2386,13 @@ static void blkfront_connect(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
if (err) {
blk_cleanup_disk(info->gd);
blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
goto fail;
}
info->is_ready = 1;
return;

View File

@ -805,9 +805,14 @@ static int probe_gdrom(struct platform_device *devptr)
err = -ENOMEM;
goto probe_fail_free_irqs;
}
add_disk(gd.disk);
err = add_disk(gd.disk);
if (err)
goto probe_fail_add_disk;
return 0;
probe_fail_add_disk:
kfree(gd.toc);
probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);

View File

@ -178,7 +178,6 @@
#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@ -190,6 +189,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include "bcache_ondisk.h"
#include "bset.h"
#include "util.h"
#include "closure.h"
@ -395,8 +395,6 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
char backing_dev_name[BDEVNAME_SIZE];
};
enum alloc_reserve {
@ -470,8 +468,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
char cache_dev_name[BDEVNAME_SIZE];
};
struct gc_stat {

View File

@ -43,9 +43,9 @@ static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
#define KEY_MAX_U64S 8
KEY_FIELD(KEY_PTRS, high, 60, 3)
KEY_FIELD(HEADER_SIZE, high, 58, 2)
KEY_FIELD(__PAD0, high, 58, 2)
KEY_FIELD(KEY_CSUM, high, 56, 2)
KEY_FIELD(KEY_PINNED, high, 55, 1)
KEY_FIELD(__PAD1, high, 55, 1)
KEY_FIELD(KEY_DIRTY, high, 36, 1)
KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)

View File

@ -2,10 +2,10 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "bcache_ondisk.h"
#include "util.h" /* for time_stats */
/*

View File

@ -141,7 +141,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
uint64_t crc = b->key.ptr[0];
void *data = (void *) i + 8, *end = bset_bkey_last(i);
crc = bch_crc64_update(crc, data, end - data);
crc = crc64_be(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}

View File

@ -127,21 +127,20 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
void *p1 = bvec_kmap_local(&bv);
void *p2;
cbv = bio_iter_iovec(check, citer);
p2 = page_address(cbv.bv_page);
p2 = bvec_kmap_local(&cbv);
cache_set_err_on(memcmp(p1 + bv.bv_offset,
p2 + bv.bv_offset,
bv.bv_len),
cache_set_err_on(memcmp(p1, p2, bv.bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
dc->backing_dev_name,
"verify failed at dev %pg sector %llu",
dc->bdev,
(uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
kunmap_local(p2);
kunmap_local(p1);
bio_advance_iter(check, &citer, bv.bv_len);
}

View File

@ -6,7 +6,7 @@
* Copyright 2020 Coly Li <colyli@suse.de>
*
*/
#include <linux/bcache.h>
#include "bcache_ondisk.h"
#include "bcache.h"
#include "features.h"

View File

@ -2,10 +2,11 @@
#ifndef _BCACHE_FEATURES_H
#define _BCACHE_FEATURES_H
#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "bcache_ondisk.h"
#define BCH_FEATURE_COMPAT 0
#define BCH_FEATURE_RO_COMPAT 1
#define BCH_FEATURE_INCOMPAT 2

View File

@ -65,15 +65,15 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
dc->backing_dev_name);
pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
dc->bdev);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable\n",
dc->backing_dev_name);
pr_err("%pg: IO error on backing device, unrecoverable\n",
dc->bdev);
else
bch_cached_dev_error(dc);
}
@ -123,13 +123,13 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
pr_err("%s: IO error on %s%s\n",
ca->cache_dev_name, m,
pr_err("%pg: IO error on %s%s\n",
ca->bdev, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s\n",
ca->cache_dev_name, m);
"%pg: too many IO errors %s\n",
ca->bdev, m);
}
}

View File

@ -46,7 +46,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
csum = bch_crc64_update(csum, d, bv.bv_len);
csum = crc64_be(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@ -651,8 +651,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
pr_err("Can't flush %s: returned bi_status %i\n",
dc->backing_dev_name, bio->bi_status);
pr_err("Can't flush %pg: returned bi_status %i\n",
dc->bdev, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;

View File

@ -1026,8 +1026,8 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
pr_err("%s: device offline for %d seconds\n",
dc->backing_dev_name,
pr_err("%pg: device offline for %d seconds\n",
dc->bdev,
BACKING_DEV_OFFLINE_TIMEOUT);
pr_err("%s: disable I/O request due to backing device offline\n",
dc->disk.name);
@ -1058,15 +1058,13 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
pr_err("I/O disabled on cached dev %s\n",
dc->backing_dev_name);
pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
ret = -EIO;
goto out;
}
if (atomic_xchg(&dc->running, 1)) {
pr_info("cached dev %s is running already\n",
dc->backing_dev_name);
pr_info("cached dev %pg is running already\n", dc->bdev);
ret = -EBUSY;
goto out;
}
@ -1082,7 +1080,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
closure_sync(&cl);
}
add_disk(d->disk);
ret = add_disk(d->disk);
if (ret)
goto out;
bd_link_disk_holder(dc->bdev, dc->disk.disk);
/*
* won't show up in the uevent file, use udevadm monitor -e instead
@ -1154,16 +1154,16 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
calc_cached_dev_sectors(dc->disk.c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s\n", dc->backing_dev_name);
pr_info("Caching disabled for %pg\n", dc->bdev);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@ -1203,29 +1203,27 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
pr_err("Can't attach %s: already attached\n",
dc->backing_dev_name);
pr_err("Can't attach %pg: already attached\n", dc->bdev);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
pr_err("Can't attach %s: shutting down\n",
dc->backing_dev_name);
pr_err("Can't attach %pg: shutting down\n", dc->bdev);
return -EINVAL;
}
if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
pr_err("Couldn't attach %pg: block size less than set's block size\n",
dc->bdev);
return -EINVAL;
}
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
pr_err("Tried to attach %s but duplicate UUID already attached\n",
dc->backing_dev_name);
pr_err("Tried to attach %pg but duplicate UUID already attached\n",
dc->bdev);
return -EINVAL;
}
@ -1243,15 +1241,13 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
pr_err("Couldn't find uuid for %s in set\n",
dc->backing_dev_name);
pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
pr_err("Not caching %s, no room for UUID\n",
dc->backing_dev_name);
pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
return -EINVAL;
}
}
@ -1319,8 +1315,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
pr_err("Couldn't run cached device %s\n",
dc->backing_dev_name);
pr_err("Couldn't run cached device %pg\n", dc->bdev);
return ret;
}
@ -1336,8 +1331,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
pr_info("Caching %s as %s on set %pU\n",
dc->backing_dev_name,
pr_info("Caching %pg as %s on set %pU\n",
dc->bdev,
dc->disk.disk->disk_name,
dc->disk.c->set_uuid);
return 0;
@ -1461,7 +1456,6 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct cache_set *c;
int ret = -ENOMEM;
bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
@ -1476,7 +1470,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
pr_info("registered backing device %s\n", dc->backing_dev_name);
pr_info("registered backing device %pg\n", dc->bdev);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@ -1493,7 +1487,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
pr_notice("error %s: %s\n", dc->backing_dev_name, err);
pr_notice("error %pg: %s\n", dc->bdev, err);
bcache_device_stop(&dc->disk);
return ret;
}
@ -1534,10 +1528,11 @@ static void flash_dev_flush(struct closure *cl)
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
int err = -ENOMEM;
struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
GFP_KERNEL);
if (!d)
return -ENOMEM;
goto err_ret;
closure_init(&d->cl, NULL);
set_closure_fn(&d->cl, flash_dev_flush, system_wq);
@ -1551,9 +1546,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_attach(d, c, u - c->uuids);
bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
err = add_disk(d->disk);
if (err)
goto err;
if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
if (err)
goto err;
bcache_device_link(d, c, "volume");
@ -1567,7 +1565,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
return 0;
err:
kobject_put(&d->kobj);
return -ENOMEM;
err_ret:
return err;
}
static int flash_devs_run(struct cache_set *c)
@ -1621,8 +1620,8 @@ bool bch_cached_dev_error(struct cached_dev *dc)
/* make others know io_disable is true earlier */
smp_mb();
pr_err("stop %s: too many IO errors on backing device %s\n",
dc->disk.disk->disk_name, dc->backing_dev_name);
pr_err("stop %s: too many IO errors on backing device %pg\n",
dc->disk.disk->disk_name, dc->bdev);
bcache_device_stop(&dc->disk);
return true;
@ -2338,7 +2337,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
pr_notice("error %s: %s\n", ca->cache_dev_name, err);
pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@ -2348,7 +2347,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
const char *err = NULL; /* must be set for any error case */
int ret = 0;
bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@ -2390,14 +2388,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
pr_info("registered cache device %s\n", ca->cache_dev_name);
pr_info("registered cache device %pg\n", ca->bdev);
out:
kobject_put(&ca->kobj);
err:
if (err)
pr_notice("error %s: %s\n", ca->cache_dev_name, err);
pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@ -2617,8 +2615,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
if (!dc) {
ret = -ENOMEM;
err = "cannot allocate memory";
goto out_put_sb_page;
}
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev, dc);
@ -2629,11 +2630,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
if (!ca) {
ret = -ENOMEM;
err = "cannot allocate memory";
goto out_put_sb_page;
}
/* blkdev_put() will be called in bch_cache_release() */
if (register_cache(sb, sb_disk, bdev, ca) != 0)
ret = register_cache(sb, sb_disk, bdev, ca);
if (ret)
goto out_free_sb;
}
@ -2750,7 +2755,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
* The reason bch_register_lock is not held to call
* bch_cache_set_stop() and bcache_device_stop() is to
* avoid potential deadlock during reboot, because cache
* set or bcache device stopping process will acqurie
* set or bcache device stopping process will acquire
* bch_register_lock too.
*
* We are safe here because bcache_is_reboot sets to

View File

@ -271,7 +271,7 @@ SHOW(__bch_cached_dev)
}
if (attr == &sysfs_backing_dev_name) {
snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
strcat(buf, "\n");
return strlen(buf);
}

View File

@ -51,13 +51,27 @@ STORE(fn) \
#define sysfs_printf(file, fmt, ...) \
do { \
if (attr == &sysfs_ ## file) \
return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \
} while (0)
#define sysfs_print(file, var) \
do { \
if (attr == &sysfs_ ## file) \
return snprint(buf, PAGE_SIZE, var); \
return sysfs_emit(buf, \
__builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \
__builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \
__builtin_types_compatible_p(typeof(var), unsigned long)\
? "%lu\n" : \
__builtin_types_compatible_p(typeof(var), int64_t) \
? "%lli\n" : \
__builtin_types_compatible_p(typeof(var), uint64_t) \
? "%llu\n" : \
__builtin_types_compatible_p(typeof(var), const char *) \
? "%s\n" : "%i\n", var); \
} while (0)
#define sysfs_hprint(file, val) \

View File

@ -340,23 +340,6 @@ static inline int bch_strtoul_h(const char *cp, long *res)
_r; \
})
#define snprint(buf, size, var) \
snprintf(buf, size, \
__builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \
__builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \
__builtin_types_compatible_p(typeof(var), unsigned long)\
? "%lu\n" : \
__builtin_types_compatible_p(typeof(var), int64_t) \
? "%lli\n" : \
__builtin_types_compatible_p(typeof(var), uint64_t) \
? "%llu\n" : \
__builtin_types_compatible_p(typeof(var), const char *) \
? "%s\n" : "%i\n", var)
ssize_t bch_hprint(char *buf, int64_t v);
bool bch_is_zero(const char *p, size_t n);
@ -548,14 +531,6 @@ static inline uint64_t bch_crc64(const void *p, size_t len)
return crc ^ 0xffffffffffffffffULL;
}
static inline uint64_t bch_crc64_update(uint64_t crc,
const void *p,
size_t len)
{
crc = crc64_be(crc, p, len);
return crc;
}
/*
* A stepwise-linear pseudo-exponential. This returns 1 << (x >>
* frac_bits), with the less-significant bits filled in by linear

View File

@ -2078,7 +2078,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (r)
return r;
add_disk(md->disk);
r = add_disk(md->disk);
if (r)
return r;
r = dm_sysfs_init(md);
if (r) {

View File

@ -354,7 +354,7 @@ static bool create_on_open = true;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(struct mddev *mddev)
void md_new_event(void)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
@ -2886,7 +2886,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_new_event(mddev);
md_new_event();
md_wakeup_thread(mddev->thread);
return 0;
}
@ -2976,7 +2976,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* -write_error - clears WriteErrorSeen
* {,-}failfast - set/clear FailFast
*/
struct mddev *mddev = rdev->mddev;
int err = -EINVAL;
bool need_update_sb = false;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
@ -2991,7 +2995,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
err = 0;
if (mddev_is_clustered(mddev))
err = md_cluster_ops->remove_disk(mddev, rdev);
@ -3002,16 +3005,18 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
md_new_event(mddev);
md_new_event();
}
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
mddev_create_serial_pool(rdev->mddev, rdev, false);
need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
@ -3037,9 +3042,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "failfast")) {
set_bit(FailFast, &rdev->flags);
need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-failfast")) {
clear_bit(FailFast, &rdev->flags);
need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
@ -3118,6 +3125,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(ExternalBbl, &rdev->flags);
err = 0;
}
if (need_update_sb)
md_update_sb(mddev, 1);
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
@ -4099,7 +4108,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify_dirent_safe(mddev->sysfs_level);
md_new_event(mddev);
md_new_event();
rv = len;
out_unlock:
mddev_unlock(mddev);
@ -4620,7 +4629,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
export_rdev(rdev);
mddev_unlock(mddev);
if (!err)
md_new_event(mddev);
md_new_event();
return err ? err : len;
}
@ -5490,6 +5499,10 @@ static struct attribute *md_default_attrs[] = {
NULL,
};
static const struct attribute_group md_default_group = {
.attrs = md_default_attrs,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
@ -5512,6 +5525,12 @@ static const struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
static const struct attribute_group *md_attr_groups[] = {
&md_default_group,
&md_bitmap_group,
NULL,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@ -5587,7 +5606,7 @@ static const struct sysfs_ops md_sysfs_ops = {
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
.default_groups = md_attr_groups,
};
int mdp_major = 0;
@ -5596,7 +5615,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@ -5663,7 +5681,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
goto abort;
goto out_unlock_disks_mutex;
}
spin_unlock(&all_mddevs_lock);
}
@ -5676,7 +5694,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
goto abort;
goto out_unlock_disks_mutex;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@ -5700,27 +5718,25 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
add_disk(disk);
error = add_disk(disk);
if (error)
goto out_cleanup_disk;
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
pr_debug("md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
pr_debug("pointless warning\n");
abort:
if (error)
goto out_del_gendisk;
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
goto out_unlock_disks_mutex;
out_del_gendisk:
del_gendisk(disk);
out_cleanup_disk:
blk_cleanup_disk(disk);
out_unlock_disks_mutex:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
}
mddev_put(mddev);
return error;
}
@ -6034,7 +6050,7 @@ int md_run(struct mddev *mddev)
if (mddev->sb_flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
md_new_event();
return 0;
bitmap_abort:
@ -6424,7 +6440,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
md_new_event(mddev);
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@ -6928,7 +6944,7 @@ kick_rdev:
md_wakeup_thread(mddev->thread);
else
md_update_sb(mddev, 1);
md_new_event(mddev);
md_new_event();
return 0;
busy:
@ -7001,7 +7017,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
md_new_event();
return 0;
abort_export:
@ -7975,7 +7991,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event(mddev);
md_new_event();
}
EXPORT_SYMBOL(md_error);
@ -8859,7 +8875,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event(mddev);
md_new_event();
update_time = jiffies;
blk_start_plug(&plug);
@ -8930,7 +8946,7 @@ void md_do_sync(struct md_thread *thread)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
md_new_event();
if (last_check + window > io_sectors || j == max_sectors)
continue;
@ -9154,7 +9170,7 @@ static int remove_and_add_spares(struct mddev *mddev,
sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
md_new_event();
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@ -9188,7 +9204,7 @@ static void md_start_sync(struct work_struct *ws)
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
md_new_event();
}
/*
@ -9447,7 +9463,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
md_new_event();
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}

View File

@ -731,7 +731,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);
extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);

View File

@ -1496,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!r1_bio->bios[i])
continue;
if (first_clone) {
if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
@ -1529,13 +1529,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = mbio;
mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
if (test_bit(FailFast, &rdev->flags) &&
!test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
@ -1546,7 +1545,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
mbio->bi_bdev = (void *)rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)

View File

@ -4647,7 +4647,7 @@ out:
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
md_new_event(mddev);
md_new_event();
return 0;
abort:

View File

@ -7732,10 +7732,7 @@ static int raid5_run(struct mddev *mddev)
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
/* Round up to power of 2, as discard handling
* currently assumes that */
while ((stripe-1) & stripe)
stripe = (stripe | (stripe-1)) + 1;
stripe = roundup_pow_of_two(stripe);
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
@ -8282,7 +8279,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
md_new_event(mddev);
md_new_event();
return 0;
}

View File

@ -384,7 +384,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->readonly)
set_disk_ro(gd, 1);
device_add_disk(&new->mtd->dev, gd, NULL);
ret = device_add_disk(&new->mtd->dev, gd, NULL);
if (ret)
goto out_cleanup_disk;
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@ -393,6 +395,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
out_cleanup_disk:
blk_cleanup_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:

View File

@ -203,7 +203,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
"Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
"Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
@ -815,6 +815,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@ -866,6 +867,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR;
}
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@ -882,6 +884,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
memset(cmnd, 0, sizeof(*cmnd));
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
@ -915,9 +919,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd->rw.opcode = op;
cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.rsvd2 = 0;
cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->rw.reftag = 0;
cmnd->rw.apptag = 0;
cmnd->rw.appmask = 0;
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
@ -974,10 +984,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP)) {
if (!(req->rq_flags & RQF_DONTPREP))
nvme_clear_nvme_request(req);
memset(cmd, 0, sizeof(*cmd));
}
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@ -2593,6 +2601,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
static ssize_t nvme_subsys_show_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
switch (subsys->subtype) {
case NVME_NQN_DISC:
return sysfs_emit(buf, "discovery\n");
case NVME_NQN_NVME:
return sysfs_emit(buf, "nvm\n");
default:
return sysfs_emit(buf, "reserved\n");
}
}
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@ -2613,6 +2639,7 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
&subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
@ -2683,6 +2710,21 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
/* Versions prior to 1.4 don't necessarily report a valid type */
if (id->cntrltype == NVME_CTRL_DISC ||
!strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
subsys->subtype = NVME_NQN_DISC;
else
subsys->subtype = NVME_NQN_NVME;
if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
dev_err(ctrl->device,
"Subsystem %s is not a discovery controller",
subsys->subnqn);
kfree(subsys);
return -EINVAL;
}
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;

View File

@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_ERR, NULL }
};
@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *

View File

@ -67,6 +67,7 @@ enum {
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
};
/**
@ -178,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsys)
return ctrl->opts->subsysnqn;
return ctrl->subsys->subnqn;
}
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);

View File

@ -16,6 +16,7 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
int i;
for (i = 0; i < set->nr_maps; i++) {
struct blk_mq_queue_map *map = &set->map[i];
if (!map->nr_queues) {
WARN_ON(i == HCTX_TYPE_DEFAULT);
continue;
}
/* Call LLDD map queue functionality if defined */
if (ctrl->lport->ops->map_queues)
ctrl->lport->ops->map_queues(&ctrl->lport->localport,
map);
else
blk_mq_map_queues(map);
}
return 0;
}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
.map_queues = nvme_fc_map_queues,
};
static int
@ -3572,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;

View File

@ -105,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->head->disk)
kblockd_schedule_work(&ns->head->requeue_work);
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (ctrl->state == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@ -143,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
if (nvme_mpath_clear_current_path(ns))
kblockd_schedule_work(&ns->head->requeue_work);
list_for_each_entry(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
up_read(&ctrl->namespaces_rwsem);
mutex_unlock(&ctrl->scan_lock);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@ -506,13 +508,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
int rc;
if (!head->disk)
return;
/*
* test_and_set_bit() is used because it is protecting against two nvme
* paths simultaneously calling device_add_disk() on the same namespace
* head.
*/
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
rc = device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
if (rc) {
clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
return;
}
nvme_add_ns_head_cdev(head);
}
@ -550,7 +562,7 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return -EINVAL;
nr_nsids = le32_to_cpu(desc->nnsids);
nsid_buf_size = nr_nsids * sizeof(__le32);
nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;

View File

@ -373,6 +373,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;

View File

@ -245,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs)
if (dev->dbbuf_dbs) {
/*
* Clear the dbbuf memory so the driver doesn't observe stale
* values from the previous instantiation.
*/
memset(dev->dbbuf_dbs, 0, mem_size);
memset(dev->dbbuf_eis, 0, mem_size);
return 0;
}
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,

View File

@ -1096,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@ -1112,6 +1114,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
dev_warn(ctrl->ctrl.device,
"ctrl sqsize %u > max queue size %u, clamping down\n",
ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
}
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@ -2386,7 +2395,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);

View File

@ -2585,7 +2585,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);

View File

@ -233,6 +233,8 @@ out_free:
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
memset(c, 0, sizeof(*c));
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));

View File

@ -264,7 +264,7 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
desc->state = req->port->ana_state[grpid];
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
return struct_size(desc, nsids, count);
}
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
GFP_KERNEL);
if (!desc)
goto out;
@ -374,13 +374,19 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->rab = 6;
if (nvmet_is_disc_subsys(ctrl->subsys))
id->cntrltype = NVME_CTRL_DISC;
else
id->cntrltype = NVME_CTRL_IO;
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
/* Limit MDTS according to transport capability */
if (ctrl->ops->get_mdts)
@ -536,7 +542,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
@ -1008,7 +1014,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req);

View File

@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n",
nvmet_disc_subsys->subsysnqn);
}
static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
char *subsysnqn;
int len;
len = strcspn(page, "\n");
if (!len)
return -EINVAL;
subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
if (!subsysnqn)
return -ENOMEM;
/*
* The discovery NQN must be different from subsystem NQN.
*/
if (!strcmp(subsysnqn, subsys->subsysnqn)) {
kfree(subsysnqn);
return -EBUSY;
}
down_write(&nvmet_config_sem);
kfree(nvmet_disc_subsys->subsysnqn);
nvmet_disc_subsys->subsysnqn = subsysnqn;
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
@ -1553,6 +1592,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);

View File

@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* should verify iosqes,iocqes are zeroed, however that
* would break backwards compatibility, so don't enforce it.
*/
if (ctrl->subsys->type != NVME_NQN_DISC &&
if (!nvmet_is_disc_subsys(ctrl->subsys) &&
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
ctrl->csts = NVME_CSTS_CFS;
@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (ctrl->ops->get_max_queue_size)
ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
else
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@ -1278,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_init(&ctrl->lock);
ctrl->port = req->port;
ctrl->ops = req->ops;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@ -1405,13 +1409,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
ctrl->ops = req->ops;
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@ -1491,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
!strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
@ -1538,6 +1541,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->max_qid = NVMET_NR_QUEUES;
break;
case NVME_NQN_DISC:
case NVME_NQN_CURR:
subsys->max_qid = 0;
break;
default:

View File

@ -146,7 +146,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
size_t entries = 0;
size_t entries = 1;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
@ -171,6 +171,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u32 numrec = 0;
u16 status = 0;
void *buffer;
char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_check_transfer_len(req, data_len))
return;
@ -203,15 +204,19 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) {
char traddr[NVMF_TRADDR_SIZE];
nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
nvmet_disc_subsys->subsysnqn,
traddr, NVME_NQN_CURR, numrec);
numrec++;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
@ -268,6 +273,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
id->cntrltype = NVME_CTRL_DISC;
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
@ -387,7 +394,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}

View File

@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);

View File

@ -309,6 +309,7 @@ struct nvmet_fabrics_ops {
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@ -576,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
return req->sq->ctrl->subsys;
}
static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
{
return subsys->type != NVME_NQN_NVME;
}
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);

View File

@ -1819,12 +1819,36 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
{
struct nvmet_rdma_queue *queue, *tmp;
struct nvmet_port *nport = port->nport;
mutex_lock(&nvmet_rdma_queue_mutex);
list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
queue_list) {
if (queue->port != nport)
continue;
list_del_init(&queue->queue_list);
__nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
}
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
if (cm_id)
rdma_destroy_id(cm_id);
/*
* Destroy the remaining queues, which are not belong to any
* controller yet. Do it here after the RDMA-CM was destroyed
* guarantees that no new queue will be created.
*/
nvmet_rdma_destroy_port_queues(port);
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
@ -1976,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
return NVMET_RDMA_MAX_MDTS;
}
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{
return NVME_RDMA_MAX_QUEUE_SIZE;
}
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@ -1987,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)

View File

@ -1740,6 +1740,17 @@ err_port:
return ret;
}
static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
{
struct nvmet_tcp_queue *queue;
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
if (queue->port == port)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
}
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@ -1749,6 +1760,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
/*
* Destroy the remaining queues, which are not belong to any
* controller yet.
*/
nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);

View File

@ -2077,12 +2077,15 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
return;
dasd_path_clear_all_verify(device);
dasd_path_clear_all_fcsec(device);
rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
if (rc) {
dasd_path_add_tbvpm(device, tbvpm);
dasd_path_add_fcsecpm(device, fcsecpm);
dasd_device_set_timer(device, 50);
} else {
dasd_path_clear_all_verify(device);
dasd_path_clear_all_fcsec(device);
}
};

View File

@ -201,7 +201,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
struct ccw1 *ccw;
struct dasd_ccw_req *dctl_cqr;
dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
dctl_cqr = dasd_alloc_erp_request(erp->magic, 1,
sizeof(struct DCTL_data),
device);
if (IS_ERR(dctl_cqr)) {
@ -1652,7 +1652,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
}
/* Build new ERP request including DE/LO */
erp = dasd_alloc_erp_request((char *) &cqr->magic,
erp = dasd_alloc_erp_request(cqr->magic,
2 + 1,/* DE/LO + TIC */
sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data), device);
@ -2388,7 +2388,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
}
/* allocate additional request block */
erp = dasd_alloc_erp_request((char *) &cqr->magic,
erp = dasd_alloc_erp_request(cqr->magic,
cplength, datasize, device);
if (IS_ERR(erp)) {
if (cqr->retries <= 0) {

View File

@ -560,8 +560,8 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
return -EINVAL;
}
pfxdata->format = format;
pfxdata->base_address = basepriv->ned->unit_addr;
pfxdata->base_lss = basepriv->ned->ID;
pfxdata->base_address = basepriv->conf.ned->unit_addr;
pfxdata->base_lss = basepriv->conf.ned->ID;
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@ -736,32 +736,30 @@ dasd_eckd_cdl_reclen(int recid)
return LABEL_SIZE;
}
/* create unique id from private structure. */
static void create_uid(struct dasd_eckd_private *private)
static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
{
int count;
struct dasd_uid *uid;
uid = &private->uid;
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
memcpy(uid->vendor, conf->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
memcpy(uid->serial, &private->ned->serial,
memcpy(uid->serial, &conf->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
uid->ssid = private->gneq->subsystemID;
uid->real_unit_addr = private->ned->unit_addr;
if (private->sneq) {
uid->type = private->sneq->sua_flags;
uid->ssid = conf->gneq->subsystemID;
uid->real_unit_addr = conf->ned->unit_addr;
if (conf->sneq) {
uid->type = conf->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
uid->base_unit_addr = private->sneq->base_unit_addr;
uid->base_unit_addr = conf->sneq->base_unit_addr;
} else {
uid->type = UA_BASE_DEVICE;
}
if (private->vdsneq) {
if (conf->vdsneq) {
for (count = 0; count < 16; count++) {
sprintf(uid->vduit+2*count, "%02x",
private->vdsneq->uit[count]);
conf->vdsneq->uit[count]);
}
}
}
@ -776,10 +774,10 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
if (!private->conf.ned || !private->conf.gneq)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
create_uid(private);
create_uid(&private->conf, &private->uid);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@ -803,14 +801,15 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
* return 0 for match
*/
static int dasd_eckd_compare_path_uid(struct dasd_device *device,
struct dasd_eckd_private *private)
struct dasd_conf *path_conf)
{
struct dasd_uid device_uid;
struct dasd_uid path_uid;
create_uid(private);
create_uid(path_conf, &path_uid);
dasd_eckd_get_uid(device, &device_uid);
return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
}
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
@ -946,34 +945,34 @@ out_error:
return ret;
}
static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
{
struct dasd_sneq *sneq;
int i, count;
private->ned = NULL;
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
count = private->conf_len / sizeof(struct dasd_sneq);
sneq = (struct dasd_sneq *)private->conf_data;
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
count = conf->len / sizeof(struct dasd_sneq);
sneq = (struct dasd_sneq *)conf->data;
for (i = 0; i < count; ++i) {
if (sneq->flags.identifier == 1 && sneq->format == 1)
private->sneq = sneq;
conf->sneq = sneq;
else if (sneq->flags.identifier == 1 && sneq->format == 4)
private->vdsneq = (struct vd_sneq *)sneq;
conf->vdsneq = (struct vd_sneq *)sneq;
else if (sneq->flags.identifier == 2)
private->gneq = (struct dasd_gneq *)sneq;
conf->gneq = (struct dasd_gneq *)sneq;
else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
private->ned = (struct dasd_ned *)sneq;
conf->ned = (struct dasd_ned *)sneq;
sneq++;
}
if (!private->ned || !private->gneq) {
private->ned = NULL;
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
if (!conf->ned || !conf->gneq) {
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
return -EINVAL;
}
return 0;
@ -1016,9 +1015,9 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
* with the new one if this points to the same data
*/
cdp = device->path[chp].conf_data;
if (private->conf_data == cdp) {
private->conf_data = (void *)conf_data;
dasd_eckd_identify_conf_parts(private);
if (private->conf.data == cdp) {
private->conf.data = (void *)conf_data;
dasd_eckd_identify_conf_parts(&private->conf);
}
ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
@ -1036,8 +1035,8 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
private->conf_data = NULL;
private->conf_len = 0;
private->conf.data = NULL;
private->conf.len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
device->path[i].conf_data = NULL;
@ -1071,15 +1070,55 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
if (strlen(uid.vduit) > 0)
snprintf(print_uid, sizeof(*print_uid),
"%s.%s.%04x.%02x.%s",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr, uid.vduit);
else
snprintf(print_uid, sizeof(*print_uid),
"%s.%s.%04x.%02x",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
char print_path_uid[60], print_device_uid[60];
struct dasd_conf path_conf;
path_conf.data = conf_data;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf))
return 1;
if (dasd_eckd_compare_path_uid(device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_path_uid);
dasd_eckd_get_uid_string(&private->conf, print_device_uid);
dev_err(&device->cdev->dev,
"Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
lpm, print_path_uid, print_device_uid);
return 1;
}
return 0;
}
static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
struct dasd_eckd_private *private;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
@ -1109,11 +1148,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
if (!conf_data_saved) {
/* initially clear previously stored conf_data */
dasd_eckd_clear_conf_data(device);
private->conf_data = conf_data;
private->conf_len = conf_len;
if (dasd_eckd_identify_conf_parts(private)) {
private->conf_data = NULL;
private->conf_len = 0;
private->conf.data = conf_data;
private->conf.len = conf_len;
if (dasd_eckd_identify_conf_parts(&private->conf)) {
private->conf.data = NULL;
private->conf.len = 0;
kfree(conf_data);
continue;
}
@ -1123,59 +1162,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
*/
dasd_eckd_generate_uid(device);
conf_data_saved++;
} else {
path_private.conf_data = conf_data;
path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(
&path_private)) {
path_private.conf_data = NULL;
path_private.conf_len = 0;
kfree(conf_data);
continue;
}
if (dasd_eckd_compare_path_uid(
device, &path_private)) {
uid = &path_private.uid;
if (strlen(uid->vduit) > 0)
snprintf(print_path_uid,
sizeof(print_path_uid),
"%s.%s.%04x.%02x.%s",
uid->vendor, uid->serial,
uid->ssid, uid->real_unit_addr,
uid->vduit);
else
snprintf(print_path_uid,
sizeof(print_path_uid),
"%s.%s.%04x.%02x",
uid->vendor, uid->serial,
uid->ssid,
uid->real_unit_addr);
uid = &private->uid;
if (strlen(uid->vduit) > 0)
snprintf(print_device_uid,
sizeof(print_device_uid),
"%s.%s.%04x.%02x.%s",
uid->vendor, uid->serial,
uid->ssid, uid->real_unit_addr,
uid->vduit);
else
snprintf(print_device_uid,
sizeof(print_device_uid),
"%s.%s.%04x.%02x",
uid->vendor, uid->serial,
uid->ssid,
uid->real_unit_addr);
dev_err(&device->cdev->dev,
"Not all channel paths lead to "
"the same device, path %02X leads to "
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
path_err = -EINVAL;
dasd_path_add_cablepm(device, lpm);
continue;
}
path_private.conf_data = NULL;
path_private.conf_len = 0;
} else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
dasd_path_add_cablepm(device, lpm);
path_err = -EINVAL;
kfree(conf_data);
continue;
}
pos = pathmask_to_pos(lpm);
@ -1197,8 +1188,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
}
dasd_eckd_read_fc_security(device);
return path_err;
}
@ -1213,7 +1202,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
/* is transport mode supported? */
fcx_in_css = css_general_characteristics.fcx;
fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
@ -1282,9 +1271,9 @@ static int rebuild_device_uid(struct dasd_device *device,
"returned error %d", rc);
break;
}
memcpy(private->conf_data, data->rcd_buffer,
memcpy(private->conf.data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
if (dasd_eckd_identify_conf_parts(private)) {
if (dasd_eckd_identify_conf_parts(&private->conf)) {
rc = -ENODEV;
} else /* first valid path is enough */
break;
@ -1299,11 +1288,10 @@ static int rebuild_device_uid(struct dasd_device *device,
static void dasd_eckd_path_available_action(struct dasd_device *device,
struct pe_handler_work_data *data)
{
struct dasd_eckd_private path_private;
struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
struct dasd_conf path_conf;
unsigned long flags;
char print_uid[60];
int rc, pos;
@ -1367,11 +1355,11 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
memcpy(&path_rcd_buf, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
path_private.conf_data = (void *) &path_rcd_buf;
path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_private)) {
path_private.conf_data = NULL;
path_private.conf_len = 0;
path_conf.data = (void *)&path_rcd_buf;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf)) {
path_conf.data = NULL;
path_conf.len = 0;
continue;
}
@ -1382,7 +1370,7 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
* the first working path UID will be used as device UID
*/
if (dasd_path_get_opm(device) &&
dasd_eckd_compare_path_uid(device, &path_private)) {
dasd_eckd_compare_path_uid(device, &path_conf)) {
/*
* the comparison was not successful
* rebuild the device UID with at least one
@ -1396,20 +1384,8 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
if (rebuild_device_uid(device, data) ||
dasd_eckd_compare_path_uid(
device, &path_private)) {
uid = &path_private.uid;
if (strlen(uid->vduit) > 0)
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x.%s",
uid->vendor, uid->serial,
uid->ssid, uid->real_unit_addr,
uid->vduit);
else
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x",
uid->vendor, uid->serial,
uid->ssid,
uid->real_unit_addr);
device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_uid);
dev_err(&device->cdev->dev,
"The newly added channel path %02X "
"will not be used because it leads "
@ -1427,6 +1403,14 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
if (conf_data) {
memcpy(conf_data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
} else {
/*
* path is operational but path config data could not
* be stored due to low mem condition
* add it to the error path mask and schedule a path
* verification later that this could be added again
*/
epm |= lpm;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
@ -1447,7 +1431,10 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
}
dasd_path_add_nppm(device, npm);
dasd_path_add_ppm(device, ppm);
dasd_path_add_tbvpm(device, epm);
if (epm) {
dasd_path_add_tbvpm(device, epm);
dasd_device_set_timer(device, 50);
}
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@ -1625,8 +1612,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
prssdp = cqr->data;
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
prssdp->lss = private->ned->ID;
prssdp->volume = private->ned->unit_addr;
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
@ -2085,11 +2072,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
device->path_interval = DASD_ECKD_PATH_INTERVAL;
if (private->gneq) {
if (private->conf.gneq) {
value = 1;
for (i = 0; i < private->gneq->timeout.value; i++)
for (i = 0; i < private->conf.gneq->timeout.value; i++)
value = 10 * value;
value = value * private->gneq->timeout.number;
value = value * private->conf.gneq->timeout.number;
/* do not accept useless values */
if (value != 0 && value <= DASD_EXPIRES_MAX)
device->default_expires = value;
@ -2121,6 +2108,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err3;
dasd_eckd_read_fc_security(device);
dasd_path_create_kobjects(device);
/* Read Feature Codes */
@ -2195,10 +2183,10 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
return;
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
private->conf.ned = NULL;
private->conf.sneq = NULL;
private->conf.vdsneq = NULL;
private->conf.gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
@ -3750,8 +3738,8 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
* subset.
*/
ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
ras_data->lss = private->ned->ID;
ras_data->dev_addr = private->ned->unit_addr;
ras_data->lss = private->conf.ned->ID;
ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
if (by_extent) {
@ -4293,8 +4281,8 @@ static int prepare_itcw(struct itcw *itcw,
memset(&pfxdata, 0, sizeof(pfxdata));
pfxdata.format = 1; /* PFX with LRE */
pfxdata.base_address = basepriv->ned->unit_addr;
pfxdata.base_lss = basepriv->ned->ID;
pfxdata.base_address = basepriv->conf.ned->unit_addr;
pfxdata.base_lss = basepriv->conf.ned->ID;
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@ -4963,9 +4951,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
info->confdata_size = min((unsigned long)private->conf_len,
sizeof(info->configuration_data));
memcpy(info->configuration_data, private->conf_data,
info->confdata_size = min_t(unsigned long, private->conf.len,
sizeof(info->configuration_data));
memcpy(info->configuration_data, private->conf.data,
info->confdata_size);
return 0;
}
@ -5808,6 +5796,8 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
if (rc)
goto out_err;
dasd_eckd_read_fc_security(device);
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
@ -5820,15 +5810,7 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
if (strlen(uid.vduit) > 0)
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
uid.ssid, uid.base_unit_addr, uid.vduit);
else
snprintf(print_uid, sizeof(print_uid),
"%s.%s.%04x.%02x", uid.vendor, uid.serial,
uid.ssid, uid.base_unit_addr);
dasd_eckd_get_uid_string(&private->conf, print_uid);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
@ -5966,8 +5948,8 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
prssdp->lss = private->ned->ID;
prssdp->volume = private->ned->unit_addr;
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;

View File

@ -658,16 +658,19 @@ struct dasd_conf_data {
struct dasd_gneq gneq;
} __packed;
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
struct dasd_conf {
u8 *data;
int len;
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
struct vd_sneq *vdsneq;
struct dasd_gneq *gneq;
};
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
struct dasd_conf conf;
struct eckd_count count_area[5];
int init_cqr_status;

View File

@ -24,7 +24,7 @@
#include "dasd_int.h"
struct dasd_ccw_req *
dasd_alloc_erp_request(char *magic, int cplength, int datasize,
dasd_alloc_erp_request(unsigned int magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
@ -33,8 +33,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
int size;
/* Sanity checks */
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
@ -62,7 +62,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
strncpy((char *) &cqr->magic, magic, 4);
cqr->magic = magic;
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);

View File

@ -887,7 +887,7 @@ void dasd_proc_exit(void);
/* externals in dasd_erp.c */
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
struct dasd_ccw_req *dasd_alloc_erp_request(unsigned int, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
@ -1305,6 +1305,15 @@ static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
dasd_path_preferred(device, chp);
}
static inline void dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_fcsec(device, chp);
}
/*
* set functions for path masks
* the existing path mask will be replaced by the given path mask

View File

@ -650,8 +650,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
/**
* dasd_biodasdinfo() - fill out the dasd information structure
* @disk [in]: pointer to gendisk structure that references a DASD
* @info [out]: pointer to the dasd_information2_t structure
* @disk: [in] pointer to gendisk structure that references a DASD
* @info: [out] pointer to the dasd_information2_t structure
*
* Provide access to DASD specific information.
* The gendisk structure is checked if it belongs to the DASD driver by

View File

@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@ -642,6 +644,18 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
}
static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
struct blk_mq_queue_map *map)
{
struct scsi_qla_host *vha = lport->private;
int rc;
rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
if (rc)
ql_log(ql_log_warn, vha, 0x21de,
"pci map queue failed 0x%x", rc);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@ -676,6 +690,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = 8,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,

View File

@ -7,6 +7,7 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
/*
@ -497,6 +498,8 @@ struct nvme_fc_port_template {
int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *rport,
struct nvmefc_ls_rsp *ls_rsp);
void (*map_queues)(struct nvme_fc_local_port *localport,
struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@ -779,6 +782,10 @@ struct nvmet_fc_target_port {
* LS received.
* Entrypoint is Mandatory.
*
* @map_queues: This functions lets the driver expose the queue mapping
* to the block layer.
* Entrypoint is Optional.
*
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call

View File

@ -6,6 +6,8 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
#define NVME_RDMA_MAX_QUEUE_SIZE 128
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};

View File

@ -27,8 +27,20 @@
#define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type {
NVME_NQN_DISC = 1, /* Discovery type target subsystem */
NVME_NQN_NVME = 2, /* NVME type target subsystem */
/* Referral to another discovery type target subsystem */
NVME_NQN_DISC = 1,
/* NVME type target subsystem */
NVME_NQN_NVME = 2,
/* Current discovery type target subsystem */
NVME_NQN_CURR = 3,
};
enum nvme_ctrl_type {
NVME_CTRL_IO = 1, /* I/O controller */
NVME_CTRL_DISC = 2, /* Discovery controller */
NVME_CTRL_ADMIN = 3, /* Administrative controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@ -244,7 +256,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
__u8 rsvd100[28];
__u8 rsvd100[11];
__u8 cntrltype;
__u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@ -312,6 +326,7 @@ struct nvme_id_ctrl {
};
enum {
NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
@ -1303,6 +1318,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
/* Discovery log page entry flags (EFLAGS): */
enum {
NVME_DISC_EFLAGS_EPCSD = (1 << 1),
NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
};
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@ -1312,7 +1333,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
__u8 resv8[22];
__le16 eflags;
__u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];