for-linus-20191010

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl2f5MIQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpvscD/4v8E1s1rt6JwqM0Fa27UjRdhfGnc8ad8vs
 fD7rf3ZmLkoM1apVopMcAscUH726wU4qbxwEUDEntxxv2wJHuZdSZ64zhFJ17uis
 uJ2pF4MpK/m6DnHZu/SAU4t9aU+l6SBqX0tS1bFycecPgGRk46jrVX5tNJggt0Fy
 hqmx3ACWbkGiFDERT2AAQ69WHfmzeI9aUjx3jJY2eLnK7OjjEpyoEBs0j/AHl3ep
 kydhItU5NSFCv94X7vmZy/dvQ5hE4/1HTFfg79fOZcywQi1AN5DafKxiM2kgaSJ0
 jW58i+AFVtUPysNpVsxvjAgqGwDX/UJkOkggPd6V8/6LMfEvBKY4YNXlUEbqTN3Y
 pqn19/cgdKHaQpHKqwettcQujc71kry/yHsaudD+g2fi0efYi3d4qxIp9XA0TF03
 z6jzp8Hfo2SKbwapIFPa7Wqj86ZpbBxtROibCA17WKSNzn0UR3pJmEigo4l364ow
 nJpvZChLDHZXjovgzISmUnbR+O1yP0+ZnI9b7kgNp0UV4SI5ajf6f2T7667dcQs0
 J1GNt4QvqPza3R0z1SuoEi6tbc3GyMj7NZyIseNOXR/NtqXEWtiNvDIuZqs6Wn/T
 4GhaF0Mjqc17B3UEkdU1z09HL0JR40vUrGYE4lDxHhPWd0YngDGJJX2pZG2Y0WBp
 VQ20AzijzQ==
 =wZnt
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-20191010' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Fix wbt performance regression introduced with the blk-rq-qos
   refactoring (Harshad)

 - Fix io_uring fileset removal inadvertently killing the workqueue (me)

 - Fix io_uring typo in linked command nonblock submission (Pavel)

 - Remove spurious io_uring wakeups on request free (Pavel)

 - Fix null_blk zoned command error return (Keith)

 - Don't use freezable workqueues for backing_dev, also means we can
   revert a previous libata hack (Mika)

 - Fix nbd sysfs mutex dropped too soon at removal time (Xiubo)

* tag 'for-linus-20191010' of git://git.kernel.dk/linux-block:
  nbd: fix possible sysfs duplicate warning
  null_blk: Fix zoned command return code
  io_uring: only flush workqueues on fileset removal
  io_uring: remove wait loop spurious wakeups
  blk-wbt: fix performance regression in wbt scale_up/scale_down
  Revert "libata, freezer: avoid block device removal while system is frozen"
  bdi: Do not use freezable workqueue
  io_uring: fix reversed nonblock flag for link submission
This commit is contained in:
Linus Torvalds 2019-10-11 08:45:32 -07:00
commit 297cbcccc2
9 changed files with 29 additions and 55 deletions

View File

@ -160,24 +160,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
return ret; return ret;
} }
void rq_depth_scale_up(struct rq_depth *rqd) /* Returns true on success and false if scaling up wasn't possible */
bool rq_depth_scale_up(struct rq_depth *rqd)
{ {
/* /*
* Hit max in previous round, stop here * Hit max in previous round, stop here
*/ */
if (rqd->scaled_max) if (rqd->scaled_max)
return; return false;
rqd->scale_step--; rqd->scale_step--;
rqd->scaled_max = rq_depth_calc_max_depth(rqd); rqd->scaled_max = rq_depth_calc_max_depth(rqd);
return true;
} }
/* /*
* Scale rwb down. If 'hard_throttle' is set, do it quicker, since we * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
* had a latency violation. * had a latency violation. Returns true on success and returns false if
* scaling down wasn't possible.
*/ */
void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
{ {
/* /*
* Stop scaling down when we've hit the limit. This also prevents * Stop scaling down when we've hit the limit. This also prevents
@ -185,7 +188,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
* keep up. * keep up.
*/ */
if (rqd->max_depth == 1) if (rqd->max_depth == 1)
return; return false;
if (rqd->scale_step < 0 && hard_throttle) if (rqd->scale_step < 0 && hard_throttle)
rqd->scale_step = 0; rqd->scale_step = 0;
@ -194,6 +197,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
rqd->scaled_max = false; rqd->scaled_max = false;
rq_depth_calc_max_depth(rqd); rq_depth_calc_max_depth(rqd);
return true;
} }
struct rq_qos_wait_data { struct rq_qos_wait_data {

View File

@ -130,8 +130,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
acquire_inflight_cb_t *acquire_inflight_cb, acquire_inflight_cb_t *acquire_inflight_cb,
cleanup_cb_t *cleanup_cb); cleanup_cb_t *cleanup_cb);
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
void rq_depth_scale_up(struct rq_depth *rqd); bool rq_depth_scale_up(struct rq_depth *rqd);
void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd); bool rq_depth_calc_max_depth(struct rq_depth *rqd);
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);

View File

@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
static void scale_up(struct rq_wb *rwb) static void scale_up(struct rq_wb *rwb)
{ {
rq_depth_scale_up(&rwb->rq_depth); if (!rq_depth_scale_up(&rwb->rq_depth))
return;
calc_wb_limits(rwb); calc_wb_limits(rwb);
rwb->unknown_cnt = 0; rwb->unknown_cnt = 0;
rwb_wake_all(rwb); rwb_wake_all(rwb);
@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
static void scale_down(struct rq_wb *rwb, bool hard_throttle) static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{ {
rq_depth_scale_down(&rwb->rq_depth, hard_throttle); if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
return;
calc_wb_limits(rwb); calc_wb_limits(rwb);
rwb->unknown_cnt = 0; rwb->unknown_cnt = 0;
rwb_trace_step(rwb, "scale down"); rwb_trace_step(rwb, "scale down");

View File

@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work)
return; return;
} }
/*
* XXX - UGLY HACK
*
* The block layer suspend/resume path is fundamentally broken due
* to freezable kthreads and workqueue and may deadlock if a block
* device gets removed while resume is in progress. I don't know
* what the solution is short of removing freezable kthreads and
* workqueues altogether.
*
* The following is an ugly hack to avoid kicking off device
* removal while freezer is active. This is a joke but does avoid
* this particular deadlock scenario.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=62801
* http://marc.info/?l=linux-kernel&m=138695698516487
*/
#ifdef CONFIG_FREEZER
while (pm_freezing)
msleep(10);
#endif
DPRINTK("ENTER\n"); DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex); mutex_lock(&ap->scsi_scan_mutex);

View File

@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd)
if (refcount_dec_and_mutex_lock(&nbd->refs, if (refcount_dec_and_mutex_lock(&nbd->refs,
&nbd_index_mutex)) { &nbd_index_mutex)) {
idr_remove(&nbd_index_idr, nbd->index); idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
nbd_dev_remove(nbd); nbd_dev_remove(nbd);
mutex_unlock(&nbd_index_mutex);
} }
} }

View File

@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->wp = zone->start; zone->wp = zone->start;
break; break;
default: default:
cmd->error = BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
break;
} }
return BLK_STS_OK; return BLK_STS_OK;
} }

View File

@ -591,14 +591,6 @@ static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
} }
static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
{
percpu_ref_put_many(&ctx->refs, refs);
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
}
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state) struct io_submit_state *state)
{ {
@ -646,7 +638,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
req->result = 0; req->result = 0;
return req; return req;
out: out:
io_ring_drop_ctx_refs(ctx, 1); percpu_ref_put(&ctx->refs);
return NULL; return NULL;
} }
@ -654,7 +646,7 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
{ {
if (*nr) { if (*nr) {
kmem_cache_free_bulk(req_cachep, *nr, reqs); kmem_cache_free_bulk(req_cachep, *nr, reqs);
io_ring_drop_ctx_refs(ctx, *nr); percpu_ref_put_many(&ctx->refs, *nr);
*nr = 0; *nr = 0;
} }
} }
@ -663,7 +655,7 @@ static void __io_free_req(struct io_kiocb *req)
{ {
if (req->file && !(req->flags & REQ_F_FIXED_FILE)) if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file); fput(req->file);
io_ring_drop_ctx_refs(req->ctx, 1); percpu_ref_put(&req->ctx->refs);
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
} }
@ -2761,7 +2753,7 @@ out:
if (link) if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req, io_queue_link_head(ctx, link, &link->submit, shadow_req,
block_for_last); !block_for_last);
if (statep) if (statep)
io_submit_state_end(statep); io_submit_state_end(statep);
@ -2920,8 +2912,12 @@ static void io_finish_async(struct io_ring_ctx *ctx)
static void io_destruct_skb(struct sk_buff *skb) static void io_destruct_skb(struct sk_buff *skb)
{ {
struct io_ring_ctx *ctx = skb->sk->sk_user_data; struct io_ring_ctx *ctx = skb->sk->sk_user_data;
int i;
for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
if (ctx->sqo_wq[i])
flush_workqueue(ctx->sqo_wq[i]);
io_finish_async(ctx);
unix_destruct_scm(skb); unix_destruct_scm(skb);
} }
@ -3630,7 +3626,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
} }
} }
io_ring_drop_ctx_refs(ctx, 1); percpu_ref_put(&ctx->refs);
out_fput: out_fput:
fdput(f); fdput(f);
return submitted ? submitted : ret; return submitted ? submitted : ret;

View File

@ -22,12 +22,6 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing; bool pm_freezing;
bool pm_nosig_freezing; bool pm_nosig_freezing;
/*
* Temporary export for the deadlock workaround in ata_scsi_hotplug().
* Remove once the hack becomes unnecessary.
*/
EXPORT_SYMBOL_GPL(pm_freezing);
/* protects freezing and frozen transitions */ /* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock); static DEFINE_SPINLOCK(freezer_lock);

View File

@ -239,8 +239,8 @@ static int __init default_bdi_init(void)
{ {
int err; int err;
bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE | bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
WQ_UNBOUND | WQ_SYSFS, 0); WQ_SYSFS, 0);
if (!bdi_wq) if (!bdi_wq)
return -ENOMEM; return -ENOMEM;