workqueue: Make default affinity_scope dynamically updatable
While workqueue.default_affinity_scope is writable, it only affects workqueues which are created afterwards and isn't very useful. Instead, let's introduce explicit "default" scope and update the effective scope dynamically when workqueue.default_affinity_scope is changed. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
7dbf15c5c0
commit
523a301e66
|
@ -7014,10 +7014,10 @@
|
|||
information, see the Affinity Scopes section in
|
||||
Documentation/core-api/workqueue.rst.
|
||||
|
||||
This can be updated after boot through the matching
|
||||
file under /sys/module/workqueue/parameters.
|
||||
However, the changed default will only apply to
|
||||
unbound workqueues created afterwards.
|
||||
This can be changed after boot by writing to the
|
||||
matching /sys/module/workqueue/parameters file. All
|
||||
workqueues with the "default" affinity scope will be
|
||||
updated accordignly.
|
||||
|
||||
workqueue.debug_force_rr_cpu
|
||||
Workqueue used to implicitly guarantee that work
|
||||
|
|
|
@ -358,7 +358,11 @@ on one of the CPUs which share the last level cache with the issuing CPU.
|
|||
Once started, the worker may or may not be allowed to move outside the scope
|
||||
depending on the ``affinity_strict`` setting of the scope.
|
||||
|
||||
Workqueue currently supports the following five affinity scopes.
|
||||
Workqueue currently supports the following affinity scopes.
|
||||
|
||||
``default``
|
||||
Use the scope in module parameter ``workqueue.default_affinity_scope``
|
||||
which is always set to one of the scopes below.
|
||||
|
||||
``cpu``
|
||||
CPUs are not grouped. A work item issued on one CPU is processed by a
|
||||
|
@ -392,6 +396,9 @@ directory.
|
|||
``affinity_scope``
|
||||
Read to see the current affinity scope. Write to change.
|
||||
|
||||
When default is the current scope, reading this file will also show the
|
||||
current effective scope in parentheses, for example, ``default (cache)``.
|
||||
|
||||
``affinity_strict``
|
||||
0 by default indicating that affinity scopes are not strict. When a work
|
||||
item starts execution, workqueue makes a best-effort attempt to ensure
|
||||
|
|
|
@ -126,6 +126,7 @@ struct rcu_work {
|
|||
};
|
||||
|
||||
enum wq_affn_scope {
|
||||
WQ_AFFN_DFL, /* use system default */
|
||||
WQ_AFFN_CPU, /* one pod per CPU */
|
||||
WQ_AFFN_SMT, /* one pod poer SMT */
|
||||
WQ_AFFN_CACHE, /* one pod per LLC */
|
||||
|
@ -133,8 +134,6 @@ enum wq_affn_scope {
|
|||
WQ_AFFN_SYSTEM, /* one pod across the whole system */
|
||||
|
||||
WQ_AFFN_NR_TYPES,
|
||||
|
||||
WQ_AFFN_DFL = WQ_AFFN_CACHE,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -339,9 +339,10 @@ struct wq_pod_type {
|
|||
};
|
||||
|
||||
static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
|
||||
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_DFL;
|
||||
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
|
||||
|
||||
static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
|
||||
[WQ_AFFN_DFL] = "default",
|
||||
[WQ_AFFN_CPU] = "cpu",
|
||||
[WQ_AFFN_SMT] = "smt",
|
||||
[WQ_AFFN_CACHE] = "cache",
|
||||
|
@ -3734,7 +3735,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void)
|
|||
goto fail;
|
||||
|
||||
cpumask_copy(attrs->cpumask, cpu_possible_mask);
|
||||
attrs->affn_scope = wq_affn_dfl;
|
||||
attrs->affn_scope = WQ_AFFN_DFL;
|
||||
return attrs;
|
||||
fail:
|
||||
free_workqueue_attrs(attrs);
|
||||
|
@ -3815,7 +3816,18 @@ static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
|
|||
static const struct wq_pod_type *
|
||||
wqattrs_pod_type(const struct workqueue_attrs *attrs)
|
||||
{
|
||||
struct wq_pod_type *pt = &wq_pod_types[attrs->affn_scope];
|
||||
enum wq_affn_scope scope;
|
||||
struct wq_pod_type *pt;
|
||||
|
||||
/* to synchronize access to wq_affn_dfl */
|
||||
lockdep_assert_held(&wq_pool_mutex);
|
||||
|
||||
if (attrs->affn_scope == WQ_AFFN_DFL)
|
||||
scope = wq_affn_dfl;
|
||||
else
|
||||
scope = attrs->affn_scope;
|
||||
|
||||
pt = &wq_pod_types[scope];
|
||||
|
||||
if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
|
||||
likely(pt->nr_pods))
|
||||
|
@ -5847,13 +5859,29 @@ static int parse_affn_scope(const char *val)
|
|||
|
||||
static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int affn;
|
||||
struct workqueue_struct *wq;
|
||||
int affn, cpu;
|
||||
|
||||
affn = parse_affn_scope(val);
|
||||
if (affn < 0)
|
||||
return affn;
|
||||
if (affn == WQ_AFFN_DFL)
|
||||
return -EINVAL;
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
|
||||
wq_affn_dfl = affn;
|
||||
|
||||
list_for_each_entry(wq, &workqueues, list) {
|
||||
for_each_online_cpu(cpu) {
|
||||
wq_update_pod(wq, cpu, cpu, true);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&wq_pool_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6033,8 +6061,13 @@ static ssize_t wq_affn_scope_show(struct device *dev,
|
|||
int written;
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
written = scnprintf(buf, PAGE_SIZE, "%s\n",
|
||||
wq_affn_names[wq->unbound_attrs->affn_scope]);
|
||||
if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
|
||||
written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
|
||||
wq_affn_names[WQ_AFFN_DFL],
|
||||
wq_affn_names[wq_affn_dfl]);
|
||||
else
|
||||
written = scnprintf(buf, PAGE_SIZE, "%s\n",
|
||||
wq_affn_names[wq->unbound_attrs->affn_scope]);
|
||||
mutex_unlock(&wq->mutex);
|
||||
|
||||
return written;
|
||||
|
|
Loading…
Reference in New Issue