for-linus-20190202
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlxVxeIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpq35EADCoVRFF8mi7wBhNvfN/yLqC9sgnqJM7vF3 gPpm/E4MsTzCXrhgRmulrikfM8ywOT5ZBgIQp+BrhQgDZIlJ9fcyinFjU7o/gRm5 R32IMJ4o7uh+YKWQyRSQu1WCaF3hvbNGUT7duMfTKjZ6t9TxTBgy46wYhb7YmNAP Ur8C1+4NfF/aHp59n6COM70KdYfswRxtgdEHGfmHAbmKDpvAeC+7I2LPpjTf5bH/ YULnxk5sQFvCINDJH4zprZ0lSDy+63qk+Q1xxqpFNR1tnmRIVNvKhPPB5xfRLvzB mw3JdtwnvoY8Yv6eCs0u7mZs5L3I8zTI+4RtHH9nPD7ykIiUHpejgaRc4TGy2tox Dpgfc/Nvdsscpuy4QcFNHbWWBUu5pa5Li+KVS0FEP9FmD5UhcvVmZaZK+V6NuGO4 9G9wraASFasPK7I0FMHlWLMIWKdj4s4n/H55QnP6yvFsnsrFaqnDwybUFiicjFkv hmNQmq8+5p0n3ZBVGQ/SI//vPUjuaFUKU2MhhW0NVz+KkgmEnOJ+W+C77//U33l+ zhBURtdlnqfImFejEayhhtCtMATJcf2E1rHlA3nM6JyVWMRvQR6asb78QhAKZd6w el7rqRdWMwryUYFaROVurfOBMPFdhhyDB1qrOzAZIkhFqWwM9GX6+MH/y8+00HSA aA/rXg/daw== =8Zz+ -----END PGP SIGNATURE----- Merge tag 'for-linus-20190202' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A few fixes that should go into this release. This contains: - MD pull request from Song, fixing a recovery OOM issue (Alexei) - Fix for a sync related stall (Jianchao) - Dummy callback for timeouts (Tetsuo) - IDE atapi sense ordering fix (me)" * tag 'for-linus-20190202' of git://git.kernel.dk/linux-block: ide: ensure atapi sense request aren't preempted blk-mq: fix a hung issue when fsync block: pass no-op callback to INIT_WORK(). md/raid5: fix 'out of memory' during raid cache recovery
This commit is contained in:
commit
c8864cb70f
|
@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
|
|||
kblockd_schedule_work(&q->timeout_work);
|
||||
}
|
||||
|
||||
static void blk_timeout_work(struct work_struct *work)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_alloc_queue_node - allocate a request queue
|
||||
* @gfp_mask: memory allocation flags
|
||||
|
@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, 0);
|
||||
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
|
||||
INIT_WORK(&q->timeout_work, NULL);
|
||||
INIT_WORK(&q->timeout_work, blk_timeout_work);
|
||||
INIT_LIST_HEAD(&q->icq_list);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
INIT_LIST_HEAD(&q->blkg_list);
|
||||
|
|
|
@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
blk_mq_sched_restart(hctx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
|
|||
|
||||
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
|
||||
{
|
||||
struct request *sense_rq = drive->sense_rq;
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct request *sense_rq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hwif->lock, flags);
|
||||
|
||||
/* deferred failure from ide_prep_sense() */
|
||||
if (!drive->sense_rq_armed) {
|
||||
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
|
||||
drive->name);
|
||||
spin_unlock_irqrestore(&hwif->lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sense_rq = drive->sense_rq;
|
||||
ide_req(sense_rq)->special = special;
|
||||
drive->sense_rq_armed = false;
|
||||
|
||||
drive->hwif->rq = NULL;
|
||||
|
||||
ide_insert_request_head(drive, sense_rq);
|
||||
spin_unlock_irqrestore(&hwif->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
|
||||
|
|
|
@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
|
|||
}
|
||||
|
||||
if (!blk_update_request(rq, error, nr_bytes)) {
|
||||
if (rq == drive->sense_rq)
|
||||
if (rq == drive->sense_rq) {
|
||||
drive->sense_rq = NULL;
|
||||
drive->sense_rq_active = false;
|
||||
}
|
||||
|
||||
__blk_mq_end_request(rq, error);
|
||||
return 0;
|
||||
|
@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
|
|||
blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a new request to a device.
|
||||
*/
|
||||
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
|
||||
bool local_requeue)
|
||||
{
|
||||
ide_drive_t *drive = hctx->queue->queuedata;
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct ide_host *host = hwif->host;
|
||||
struct request *rq = bd->rq;
|
||||
ide_startstop_t startstop;
|
||||
|
||||
if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
|
||||
|
@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
if (ide_lock_host(host, hwif))
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
spin_lock_irq(&hwif->lock);
|
||||
|
||||
if (!ide_lock_port(hwif)) {
|
||||
|
@ -510,18 +505,6 @@ repeat:
|
|||
hwif->cur_dev = drive;
|
||||
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
|
||||
|
||||
/*
|
||||
* we know that the queue isn't empty, but this can happen
|
||||
* if ->prep_rq() decides to kill a request
|
||||
*/
|
||||
if (!rq) {
|
||||
rq = bd->rq;
|
||||
if (!rq) {
|
||||
ide_unlock_port(hwif);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity: don't accept a request that isn't a PM request
|
||||
* if we are currently power managed. This is very important as
|
||||
|
@ -560,9 +543,12 @@ repeat:
|
|||
}
|
||||
} else {
|
||||
plug_device:
|
||||
if (local_requeue)
|
||||
list_add(&rq->queuelist, &drive->rq_list);
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
ide_unlock_host(host);
|
||||
ide_requeue_and_plug(drive, rq);
|
||||
if (!local_requeue)
|
||||
ide_requeue_and_plug(drive, rq);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
@ -573,6 +559,26 @@ out:
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a new request to a device.
|
||||
*/
|
||||
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
ide_drive_t *drive = hctx->queue->queuedata;
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
spin_lock_irq(&hwif->lock);
|
||||
if (drive->sense_rq_active) {
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
}
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
|
||||
blk_mq_start_request(bd->rq);
|
||||
return ide_issue_rq(drive, bd->rq, false);
|
||||
}
|
||||
|
||||
static int drive_is_ready(ide_drive_t *drive)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
|
|||
|
||||
void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hwif->lock, flags);
|
||||
drive->sense_rq_active = true;
|
||||
list_add_tail(&rq->queuelist, &drive->rq_list);
|
||||
spin_unlock_irqrestore(&hwif->lock, flags);
|
||||
|
||||
kblockd_schedule_work(&drive->rq_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_insert_request_head);
|
||||
|
|
|
@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
|
|||
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
|
||||
scsi_req(rq)->cmd_len = 1;
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
spin_lock_irq(&hwif->lock);
|
||||
ide_insert_request_head(drive, rq);
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
|
||||
out:
|
||||
return;
|
||||
|
|
|
@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
|
|||
ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
struct request *rq;
|
||||
blk_status_t ret;
|
||||
LIST_HEAD(list);
|
||||
|
||||
blk_mq_quiesce_queue(drive->queue);
|
||||
|
||||
ret = BLK_STS_OK;
|
||||
spin_lock_irq(&hwif->lock);
|
||||
if (!list_empty(&drive->rq_list))
|
||||
list_splice_init(&drive->rq_list, &list);
|
||||
while (!list_empty(&drive->rq_list)) {
|
||||
rq = list_first_entry(&drive->rq_list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
ret = ide_issue_rq(drive, rq, true);
|
||||
spin_lock_irq(&hwif->lock);
|
||||
}
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
rq = list_first_entry(&list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
|
||||
}
|
||||
blk_mq_unquiesce_queue(drive->queue);
|
||||
|
||||
if (ret != BLK_STS_OK)
|
||||
kblockd_schedule_work(&drive->rq_work);
|
||||
}
|
||||
|
||||
static const u8 ide_hwif_to_major[] =
|
||||
|
|
|
@ -1935,12 +1935,14 @@ out:
|
|||
}
|
||||
|
||||
static struct stripe_head *
|
||||
r5c_recovery_alloc_stripe(struct r5conf *conf,
|
||||
sector_t stripe_sect)
|
||||
r5c_recovery_alloc_stripe(
|
||||
struct r5conf *conf,
|
||||
sector_t stripe_sect,
|
||||
int noblock)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
|
||||
sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
|
||||
sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
|
||||
if (!sh)
|
||||
return NULL; /* no more stripe available */
|
||||
|
||||
|
@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
stripe_sect);
|
||||
|
||||
if (!sh) {
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
|
||||
/*
|
||||
* cannot get stripe from raid5_get_active_stripe
|
||||
* try replay some stripes
|
||||
|
@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
r5c_recovery_replay_stripes(
|
||||
cached_stripe_list, ctx);
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect);
|
||||
conf, stripe_sect, 1);
|
||||
}
|
||||
if (!sh) {
|
||||
int new_size = conf->min_nr_stripes * 2;
|
||||
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
|
||||
mdname(mddev),
|
||||
conf->min_nr_stripes * 2);
|
||||
raid5_set_cache_size(mddev,
|
||||
conf->min_nr_stripes * 2);
|
||||
sh = r5c_recovery_alloc_stripe(conf,
|
||||
stripe_sect);
|
||||
new_size);
|
||||
ret = raid5_set_cache_size(mddev, new_size);
|
||||
if (conf->min_nr_stripes <= new_size / 2) {
|
||||
pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
|
||||
mdname(mddev),
|
||||
ret,
|
||||
new_size,
|
||||
conf->min_nr_stripes,
|
||||
conf->max_nr_stripes);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect, 0);
|
||||
}
|
||||
if (!sh) {
|
||||
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
|
||||
mdname(mddev));
|
||||
mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
list_add_tail(&sh->lru, cached_stripe_list);
|
||||
|
|
|
@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
|
|||
int
|
||||
raid5_set_cache_size(struct mddev *mddev, int size)
|
||||
{
|
||||
int result = 0;
|
||||
struct r5conf *conf = mddev->private;
|
||||
|
||||
if (size <= 16 || size > 32768)
|
||||
|
@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
|
|||
|
||||
mutex_lock(&conf->cache_size_mutex);
|
||||
while (size > conf->max_nr_stripes)
|
||||
if (!grow_one_stripe(conf, GFP_KERNEL))
|
||||
if (!grow_one_stripe(conf, GFP_KERNEL)) {
|
||||
conf->min_nr_stripes = conf->max_nr_stripes;
|
||||
result = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&conf->cache_size_mutex);
|
||||
|
||||
return 0;
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(raid5_set_cache_size);
|
||||
|
||||
|
|
|
@ -615,6 +615,7 @@ struct ide_drive_s {
|
|||
|
||||
/* current sense rq and buffer */
|
||||
bool sense_rq_armed;
|
||||
bool sense_rq_active;
|
||||
struct request *sense_rq;
|
||||
struct request_sense sense_data;
|
||||
|
||||
|
@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
|
|||
extern void ide_timer_expiry(struct timer_list *t);
|
||||
extern irqreturn_t ide_intr(int irq, void *dev_id);
|
||||
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
||||
extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
|
||||
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
|
||||
|
||||
void ide_init_disk(struct gendisk *, ide_drive_t *);
|
||||
|
|
Loading…
Reference in New Issue