for-5.18/block-2022-04-01
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmJHUe0QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpvpNEAC1bxwOgI8Kbi7j37pPClrB2aQRgp1WsTkA z56rU7BTPApaKGjfObv0CvmUIBcyG6uJhTSr9QGvg0mZDCDDJz58ESIYomvfw+Ob tfdBLykxL6ad2/JAVTslTH/UUzfyZj5/+JT5KmldOMh1q6KDRQJt022AAKI5Lkdu XKkAvCV9ZQFwcfzVROb/ribYUkokRHjtQVv8nqyJ7CJ5OEYoI0ghQJNr7/Va9MXA 6YbHJHErbQUsJbxDqqScqkQ3H9upUnJg/CIDKyuptUPT3vDzDkRT9yPvrOhzEk9E 8VEufNO8v/0P26xw/thqPwn8poXTVd61i8HZMvmclofTqL9kqoii1+v4OPgl9uws 7liR2j2HLF/Xd5uceVP/RYvRGzdujdpdj4MgQK6AcPz2LivWY9vMekG/FW0+LxBY AvILmpSvPAhbRW94lZU6AU/mdqYBolWrz97pke0zPVHSv9OopaYca5pzXWytszPT o633R3Au/0tUQj4be/v7JZNnK1ESj8KZD7aon/cRH2aejIN87bCLo4BZLELVliPZ cBdizPJu2tzhhAZyEuaz4IyftL69tCxi2NCiN4mER43mIsDVMxauz7LhDwO0527q oBHIs7fAObOuNCtXOe9/BiMicGgCp+yil/6EdYexQmyNkVkSOejj9kyI/UAVpgQe NZSNBuD9UQ== =QzvG -----END PGP SIGNATURE----- Merge tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Either fixes or a few additions that got missed in the initial merge window pull. In detail: - List iterator fix to avoid leaking value post loop (Jakob) - One-off fix in minor count (Christophe) - Fix for a regression in how io priority setting works for an exiting task (Jiri) - Fix a regression in this merge window with blkg_free() being called in an inappropriate context (Ming) - Misc fixes (Ming, Tom)" * tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block: blk-wbt: remove wbt_track stub block: use dedicated list iterator variable block: Fix the maximum minor value is blk_alloc_ext_minor() block: restore the old set_task_ioprio() behaviour wrt PF_EXITING block: avoid calling blkg_free() in atomic context lib/sbitmap: allocate sb->map via kvzalloc_node
This commit is contained in:
commit
d589ae0d44
|
@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
|
|||
return pol && test_bit(pol->plid, q->blkcg_pols);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_free - free a blkg
|
||||
* @blkg: blkg to free
|
||||
*
|
||||
* Free @blkg which may be partially allocated.
|
||||
*/
|
||||
static void blkg_free(struct blkcg_gq *blkg)
|
||||
static void blkg_free_workfn(struct work_struct *work)
|
||||
{
|
||||
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
|
||||
free_work);
|
||||
int i;
|
||||
|
||||
if (!blkg)
|
||||
return;
|
||||
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++)
|
||||
if (blkg->pd[i])
|
||||
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
|
||||
|
@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
|
|||
kfree(blkg);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_free - free a blkg
|
||||
* @blkg: blkg to free
|
||||
*
|
||||
* Free @blkg which may be partially allocated.
|
||||
*/
|
||||
static void blkg_free(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (!blkg)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Both ->pd_free_fn() and request queue's release handler may
|
||||
* sleep, so free us by scheduling one work func
|
||||
*/
|
||||
INIT_WORK(&blkg->free_work, blkg_free_workfn);
|
||||
schedule_work(&blkg->free_work);
|
||||
}
|
||||
|
||||
static void __blkg_release(struct rcu_head *rcu)
|
||||
{
|
||||
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
|
||||
|
|
|
@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
|
|||
|
||||
task_lock(task);
|
||||
if (task->flags & PF_EXITING) {
|
||||
err = -ESRCH;
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
goto out;
|
||||
}
|
||||
|
@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
|
|||
task->io_context->ioprio = ioprio;
|
||||
out:
|
||||
task_unlock(task);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_task_ioprio);
|
||||
|
||||
|
|
|
@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void blk_mq_elv_switch_back(struct list_head *head,
|
||||
struct request_queue *q)
|
||||
static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
|
||||
struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_qe_pair *qe;
|
||||
struct elevator_type *t = NULL;
|
||||
|
||||
list_for_each_entry(qe, head, node)
|
||||
if (qe->q == q) {
|
||||
t = qe->type;
|
||||
break;
|
||||
}
|
||||
if (qe->q == q)
|
||||
return qe;
|
||||
|
||||
if (!t)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void blk_mq_elv_switch_back(struct list_head *head,
|
||||
struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_qe_pair *qe;
|
||||
struct elevator_type *t;
|
||||
|
||||
qe = blk_lookup_qe_pair(head, q);
|
||||
if (!qe)
|
||||
return;
|
||||
|
||||
t = qe->type;
|
||||
list_del(&qe->node);
|
||||
kfree(qe);
|
||||
|
||||
|
|
|
@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
|
|||
|
||||
#else
|
||||
|
||||
static inline void wbt_track(struct request *rq, enum wbt_flags flags)
|
||||
{
|
||||
}
|
||||
static inline int wbt_init(struct request_queue *q)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
|
|
@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
|
|||
{
|
||||
int idx;
|
||||
|
||||
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
|
||||
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
|
||||
if (idx == -ENOSPC)
|
||||
return -EBUSY;
|
||||
return idx;
|
||||
|
|
|
@ -95,7 +95,10 @@ struct blkcg_gq {
|
|||
|
||||
spinlock_t async_bio_lock;
|
||||
struct bio_list async_bios;
|
||||
struct work_struct async_bio_work;
|
||||
union {
|
||||
struct work_struct async_bio_work;
|
||||
struct work_struct free_work;
|
||||
};
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
|
|
|
@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
|
|||
static inline void sbitmap_free(struct sbitmap *sb)
|
||||
{
|
||||
free_percpu(sb->alloc_hint);
|
||||
kfree(sb->map);
|
||||
kvfree(sb->map);
|
||||
sb->map = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
|||
sb->alloc_hint = NULL;
|
||||
}
|
||||
|
||||
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
|
||||
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
|
||||
if (!sb->map) {
|
||||
free_percpu(sb->alloc_hint);
|
||||
return -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue