mm, mempolicy: stop adjusting current->il_next in mpol_rebind_nodemask()
The task->il_next variable stores the next allocation node id for task's MPOL_INTERLEAVE policy. mpol_rebind_nodemask() updates interleave and bind mempolicies due to changing cpuset mems. Currently it also tries to make sure that current->il_next is valid within the updated nodemask. This is bogus, because 1) we are updating potentially any task's mempolicy, not just current, and 2) we might be updating a per-vma mempolicy, not task one. The interleave_nodes() function that uses il_next can cope fine with the value not being within the currently allowed nodes, so this hasn't manifested as an actual issue. We can remove the need for updating il_next completely by changing it to il_prev and store the node id of the previous interleave allocation instead of the next id. Then interleave_nodes() can calculate the next id using the current nodemask and also store it as il_prev, except when querying the next node via do_get_mempolicy(). Link: http://lkml.kernel.org/r/20170517081140.30654-3-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Dimitri Sivanich <sivanich@sgi.com> Cc: Hugh Dickins <hughd@google.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
902b62810a
commit
45816682b2
|
@ -904,7 +904,7 @@ struct task_struct {
|
|||
#ifdef CONFIG_NUMA
|
||||
/* Protected by alloc_lock: */
|
||||
struct mempolicy *mempolicy;
|
||||
short il_next;
|
||||
short il_prev;
|
||||
short pref_node_fork;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
|
|
|
@ -349,12 +349,6 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
|
|||
pol->v.nodes = tmp;
|
||||
else
|
||||
BUG();
|
||||
|
||||
if (!node_isset(current->il_next, tmp)) {
|
||||
current->il_next = next_node_in(current->il_next, tmp);
|
||||
if (current->il_next >= MAX_NUMNODES)
|
||||
current->il_next = numa_node_id();
|
||||
}
|
||||
}
|
||||
|
||||
static void mpol_rebind_preferred(struct mempolicy *pol,
|
||||
|
@ -812,9 +806,8 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
|
|||
}
|
||||
old = current->mempolicy;
|
||||
current->mempolicy = new;
|
||||
if (new && new->mode == MPOL_INTERLEAVE &&
|
||||
nodes_weight(new->v.nodes))
|
||||
current->il_next = first_node(new->v.nodes);
|
||||
if (new && new->mode == MPOL_INTERLEAVE)
|
||||
current->il_prev = MAX_NUMNODES-1;
|
||||
task_unlock(current);
|
||||
mpol_put(old);
|
||||
ret = 0;
|
||||
|
@ -916,7 +909,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
|||
*policy = err;
|
||||
} else if (pol == current->mempolicy &&
|
||||
pol->mode == MPOL_INTERLEAVE) {
|
||||
*policy = current->il_next;
|
||||
*policy = next_node_in(current->il_prev, pol->v.nodes);
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1697,14 +1690,13 @@ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
|
|||
/* Do dynamic interleaving for a process */
|
||||
static unsigned interleave_nodes(struct mempolicy *policy)
|
||||
{
|
||||
unsigned nid, next;
|
||||
unsigned next;
|
||||
struct task_struct *me = current;
|
||||
|
||||
nid = me->il_next;
|
||||
next = next_node_in(nid, policy->v.nodes);
|
||||
next = next_node_in(me->il_prev, policy->v.nodes);
|
||||
if (next < MAX_NUMNODES)
|
||||
me->il_next = next;
|
||||
return nid;
|
||||
me->il_prev = next;
|
||||
return next;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue