block-5.11-2021-01-29

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmAUXQsQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgppO4EAClcqoneAuhT4UvRVNxblXPhPaoC69aNgXd
 s+34uQSCqeWrWIAokfKp8bh3kyRqe00591auA7DwtwNqGpWuIECX8o9QvROEkuxv
 0o4JFGMTHOJKP1W79Oy3RpF5oee6rMMOQN7EFL272p2xd8NRCP33c4fKvJRz+DDE
 0kCcZhVjca0nZ+9OJC+WAlV+dit3azCAKSp7cItJsdOgZL74ZcGECm0pA8RpStyi
 tQrUr2yiHLkm1lcOYfid0fG2/5a4vAGZQav+EshOWYw9UGeMquq/aqPuZZtEUjKe
 oEECACfJ9cWErsi1CirIk5j5RKHOHmFSG3kRAmyvFB4f3YDGYxerI7eodWjNA0d5
 38wW96sWuV4l0ShPmD3jGWIDTTcDZh4nEImCObf5YJFbr2fQXofWVWseIyo0zG8Y
 zDa1N/M7XgkrScX8OF33NC1uv/oExhHA7jXuQN6mRBESYjcCrH2Lf6mXAA2C8u4T
 z1RaG7ckRXGSbV3ol1ROrHj0RTXQ3zeIHj3yMRU8TKH0z6s+ob46D2PZCLi6cLvI
 IuELhzKsS1EzMSVsYk9/AegynWFjVCRJoVUVxTsrxfGEF7attwmur3lOAjbZwSWb
 jXlRbrkgBL1Pwbjg8AODEoq0jJgVM/S/3fG2rpcYLwwYC+FQ73/K+URmEuMsqkFC
 GrYllTSMFg==
 =hb7W
 -----END PGP SIGNATURE-----

Merge tag 'block-5.11-2021-01-29' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "All over the place fixes for this release:

   - blk-cgroup iteration teardown resched fix (Baolin)

   - NVMe pull request from Christoph:
        - add another Write Zeroes quirk (Chaitanya Kulkarni)
        - handle a no path available corner case (Daniel Wagner)
        - use the proper RCU aware list_add helper (Chao Leng)

   - bcache regression fix (Coly)

   - bdev->bd_size_lock IRQ fix. This will be fixed in drivers for 5.12,
     but for now, we'll make it IRQ safe (Damien)

   - null_blk zoned init fix (Damien)

   - add_partition() error handling fix (Dinghao)

   - s390 dasd kobject fix (Jan)

   - nbd fix for freezing queue while adding connections (Josef)

   - tag queueing regression fix (Ming)

   - revert of a patch that inadvertently meant that we regressed write
     performance on raid (Maxim)"

* tag 'block-5.11-2021-01-29' of git://git.kernel.dk/linux-block:
  null_blk: cleanup zoned mode initialization
  nvme-core: use list_add_tail_rcu instead of list_add_tail for nvme_init_ns_head
  nvme-multipath: Early exit if no path is available
  nvme-pci: add the DISABLE_WRITE_ZEROES quirk for a SPCC device
  bcache: only check feature sets when sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES
  block: fix bd_size_lock use
  blk-cgroup: Use cond_resched() when destroy blkgs
  Revert "block: simplify set_init_blocksize" to regain lost performance
  nbd: freeze the queue while we're adding connections
  s390/dasd: Fix inconsistent kobject removal
  block: Fix an error handling in add_partition
  blk-mq: test QUEUE_FLAG_HCTX_ACTIVE for sbitmap_shared in hctx_may_queue
This commit is contained in:
Linus Torvalds 2021-01-29 13:50:06 -08:00
commit 2ba1c4d1a4
14 changed files with 75 additions and 29 deletions

View File

@ -1016,6 +1016,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
might_sleep();
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
@ -1023,14 +1025,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg);
spin_unlock(&q->queue_lock);
} else {
if (need_resched() || !spin_trylock(&q->queue_lock)) {
/*
* Given that the system can accumulate a huge number
* of blkgs in pathological cases, check to see if we
* need to rescheduling to avoid softlockup.
*/
spin_unlock_irq(&blkcg->lock);
cpu_relax();
cond_resched();
spin_lock_irq(&blkcg->lock);
continue;
}
blkg_destroy(blkg);
spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);

View File

@ -304,7 +304,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {

View File

@ -45,10 +45,11 @@ static void disk_release_events(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
unsigned long flags;
spin_lock(&bdev->bd_size_lock);
spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
spin_unlock(&bdev->bd_size_lock);
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
EXPORT_SYMBOL(set_capacity);

View File

@ -88,9 +88,11 @@ static int (*check_part[])(struct parsed_partitions *) = {
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
unsigned long flags;
spin_lock_irqsave(&bdev->bd_size_lock, flags);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
spin_unlock(&bdev->bd_size_lock);
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
@ -384,7 +386,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
err = blk_alloc_devt(bdev, &devt);
if (err)
goto out_bdput;
goto out_put;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */

View File

@ -1022,6 +1022,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
/*
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@ -1060,10 +1066,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}

View File

@ -6,7 +6,10 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
return -EINVAL;
}
zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
dev_capacity_sects = MB_TO_SECTS(dev->size);
dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
if (dev_capacity_sects & (dev->zone_size_sects - 1))
dev->nr_zones++;
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
dev_capacity_sects = mb_to_sects(dev->size);
dev->zone_size_sects = mb_to_sects(dev->zone_size);
dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
>> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);

View File

@ -33,6 +33,8 @@
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_compat & \
BCH##_FEATURE_COMPAT_##flagname) != 0); \
} \
@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_ro_compat & \
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
} \
@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline int bch_has_feature_##name(struct cache_sb *sb) \
{ \
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
return 0; \
return (((sb)->feature_incompat & \
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
} \

View File

@ -3829,7 +3829,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
}
list_add_tail(&ns->siblings, &head->list);
list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;

View File

@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
}
for (ns = nvme_next_ns(head, old);
ns != old;
ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;

View File

@ -3257,6 +3257,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },

View File

@ -1874,18 +1874,26 @@ void dasd_path_create_kobjects(struct dasd_device *device)
}
EXPORT_SYMBOL(dasd_path_create_kobjects);
/*
* As we keep kobjects for the lifetime of a device, this function must not be
* called anywhere but in the context of offlining a device.
*/
void dasd_path_remove_kobj(struct dasd_device *device, int chp)
static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
{
if (device->path[chp].in_sysfs) {
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = false;
}
}
EXPORT_SYMBOL(dasd_path_remove_kobj);
/*
* As we keep kobjects for the lifetime of a device, this function must not be
* called anywhere but in the context of offlining a device.
*/
void dasd_path_remove_kobjects(struct dasd_device *device)
{
int i;
for (i = 0; i < 8; i++)
dasd_path_remove_kobj(device, i);
}
EXPORT_SYMBOL(dasd_path_remove_kobjects);
int dasd_add_sysfs_files(struct ccw_device *cdev)
{

View File

@ -1036,7 +1036,6 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
dasd_path_remove_kobj(device, i);
}
}
@ -2173,6 +2172,7 @@ out_err2:
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
@ -2191,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->vdsneq = NULL;
private->gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *

View File

@ -858,7 +858,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
void dasd_path_create_kobj(struct dasd_device *, int);
void dasd_path_create_kobjects(struct dasd_device *);
void dasd_path_remove_kobj(struct dasd_device *, int);
void dasd_path_remove_kobjects(struct dasd_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);

View File

@ -130,7 +130,15 @@ EXPORT_SYMBOL(truncate_bdev_range);
static void set_init_blocksize(struct block_device *bdev)
{
bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
unsigned int bsize = bdev_logical_block_size(bdev);
loff_t size = i_size_read(bdev->bd_inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size)