Merge branch 'for-4.13-fixes' into for-4.14
This commit is contained in:
commit
058fc47ee2
|
@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
|
||||||
|
|
||||||
rdev_for_each(rdev, mddev) {
|
rdev_for_each(rdev, mddev) {
|
||||||
if (! test_bit(In_sync, &rdev->flags)
|
if (! test_bit(In_sync, &rdev->flags)
|
||||||
|| test_bit(Faulty, &rdev->flags))
|
|| test_bit(Faulty, &rdev->flags)
|
||||||
|
|| test_bit(Bitmap_sync, &rdev->flags))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
target = offset + index * (PAGE_SIZE/512);
|
target = offset + index * (PAGE_SIZE/512);
|
||||||
|
|
|
@ -134,7 +134,9 @@ enum flag_bits {
|
||||||
Faulty, /* device is known to have a fault */
|
Faulty, /* device is known to have a fault */
|
||||||
In_sync, /* device is in_sync with rest of array */
|
In_sync, /* device is in_sync with rest of array */
|
||||||
Bitmap_sync, /* ..actually, not quite In_sync. Need a
|
Bitmap_sync, /* ..actually, not quite In_sync. Need a
|
||||||
* bitmap-based recovery to get fully in sync
|
* bitmap-based recovery to get fully in sync.
|
||||||
|
* The bit is only meaningful before device
|
||||||
|
* has been passed to pers->hot_add_disk.
|
||||||
*/
|
*/
|
||||||
WriteMostly, /* Avoid reading if at all possible */
|
WriteMostly, /* Avoid reading if at all possible */
|
||||||
AutoDetected, /* added by auto-detect */
|
AutoDetected, /* added by auto-detect */
|
||||||
|
|
|
@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
|
ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
|
||||||
if (!ppl_conf->bs) {
|
if (!ppl_conf->bs) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
|
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
|
||||||
struct md_rdev *rdev;
|
|
||||||
|
|
||||||
spin_lock_irq(&conf->device_lock);
|
spin_lock_irq(&conf->device_lock);
|
||||||
conf->previous_raid_disks = conf->raid_disks;
|
conf->previous_raid_disks = conf->raid_disks;
|
||||||
rdev_for_each(rdev, conf->mddev)
|
md_finish_reshape(conf->mddev);
|
||||||
rdev->data_offset = rdev->new_data_offset;
|
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
conf->reshape_progress = MaxSector;
|
conf->reshape_progress = MaxSector;
|
||||||
conf->mddev->reshape_position = MaxSector;
|
conf->mddev->reshape_position = MaxSector;
|
||||||
|
|
|
@ -323,6 +323,7 @@ enum {
|
||||||
|
|
||||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||||
|
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
|
||||||
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
|
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
|
||||||
|
|
||||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||||
|
@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||||
*/
|
*/
|
||||||
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
||||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
|
||||||
|
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
|
||||||
|
|
||||||
#define create_workqueue(name) \
|
#define create_workqueue(name) \
|
||||||
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
|
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
|
||||||
|
|
|
@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
|
||||||
|
|
||||||
/* yeap, return possible CPUs in @node that @attrs wants */
|
/* yeap, return possible CPUs in @node that @attrs wants */
|
||||||
cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
|
cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
|
||||||
|
|
||||||
|
if (cpumask_empty(cpumask)) {
|
||||||
|
pr_warn_once("WARNING: workqueue cpumask: online intersect > "
|
||||||
|
"possible intersect\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return !cpumask_equal(cpumask, attrs->cpumask);
|
return !cpumask_equal(cpumask, attrs->cpumask);
|
||||||
|
|
||||||
use_dfl:
|
use_dfl:
|
||||||
|
@ -3744,9 +3751,13 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* creating multiple pwqs breaks ordering guarantee */
|
/* creating multiple pwqs breaks ordering guarantee */
|
||||||
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
|
if (!list_empty(&wq->pwqs)) {
|
||||||
|
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
wq->flags &= ~__WQ_ORDERED;
|
||||||
|
}
|
||||||
|
|
||||||
ctx = apply_wqattrs_prepare(wq, attrs);
|
ctx = apply_wqattrs_prepare(wq, attrs);
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct pool_workqueue *pwq;
|
struct pool_workqueue *pwq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unbound && max_active == 1 used to imply ordered, which is no
|
||||||
|
* longer the case on NUMA machines due to per-node pools. While
|
||||||
|
* alloc_ordered_workqueue() is the right way to create an ordered
|
||||||
|
* workqueue, keep the previous behavior to avoid subtle breakages
|
||||||
|
* on NUMA.
|
||||||
|
*/
|
||||||
|
if ((flags & WQ_UNBOUND) && max_active == 1)
|
||||||
|
flags |= __WQ_ORDERED;
|
||||||
|
|
||||||
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
||||||
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
|
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
|
||||||
flags |= WQ_UNBOUND;
|
flags |= WQ_UNBOUND;
|
||||||
|
@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||||
struct pool_workqueue *pwq;
|
struct pool_workqueue *pwq;
|
||||||
|
|
||||||
/* disallow meddling with max_active for ordered workqueues */
|
/* disallow meddling with max_active for ordered workqueues */
|
||||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
|
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
|
||||||
|
|
||||||
mutex_lock(&wq->mutex);
|
mutex_lock(&wq->mutex);
|
||||||
|
|
||||||
|
wq->flags &= ~__WQ_ORDERED;
|
||||||
wq->saved_max_active = max_active;
|
wq->saved_max_active = max_active;
|
||||||
|
|
||||||
for_each_pwq(pwq, wq)
|
for_each_pwq(pwq, wq)
|
||||||
|
@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
|
||||||
* attributes breaks ordering guarantee. Disallow exposing ordered
|
* attributes breaks ordering guarantee. Disallow exposing ordered
|
||||||
* workqueues.
|
* workqueues.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
|
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
|
||||||
|
|
Loading…
Reference in New Issue