sched/numa: Be more careful about joining numa groups
Due to the way the pid is truncated, and tasks are moved between CPUs by the scheduler, it is possible for the current task_numa_fault to group together tasks that do not actually share memory together. This patch adds a few easy sanity checks to task_numa_fault, joining tasks together if they share the same tsk->mm, or if the fault was on a page with an elevated mapcount, in a shared VMA. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-57-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0ec8aa00f2
commit
dabe1d9924
|
@ -1454,6 +1454,7 @@ struct task_struct {
|
|||
|
||||
#define TNF_MIGRATED 0x01
|
||||
#define TNF_NO_GROUP 0x02
|
||||
#define TNF_SHARED 0x04
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
extern void task_numa_fault(int last_node, int node, int pages, int flags);
|
||||
|
|
|
@ -1381,7 +1381,7 @@ static void double_lock(spinlock_t *l1, spinlock_t *l2)
|
|||
spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
static void task_numa_group(struct task_struct *p, int cpupid)
|
||||
static void task_numa_group(struct task_struct *p, int cpupid, int flags)
|
||||
{
|
||||
struct numa_group *grp, *my_grp;
|
||||
struct task_struct *tsk;
|
||||
|
@ -1439,10 +1439,16 @@ static void task_numa_group(struct task_struct *p, int cpupid)
|
|||
if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
|
||||
goto unlock;
|
||||
|
||||
if (!get_numa_group(grp))
|
||||
goto unlock;
|
||||
/* Always join threads in the same process. */
|
||||
if (tsk->mm == current->mm)
|
||||
join = true;
|
||||
|
||||
join = true;
|
||||
/* Simple filter to avoid false positives due to PID collisions */
|
||||
if (flags & TNF_SHARED)
|
||||
join = true;
|
||||
|
||||
if (join && !get_numa_group(grp))
|
||||
join = false;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
|
@ -1539,7 +1545,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
|
|||
} else {
|
||||
priv = cpupid_match_pid(p, last_cpupid);
|
||||
if (!priv && !(flags & TNF_NO_GROUP))
|
||||
task_numa_group(p, last_cpupid);
|
||||
task_numa_group(p, last_cpupid, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -3584,6 +3584,13 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (!pte_write(pte))
|
||||
flags |= TNF_NO_GROUP;
|
||||
|
||||
/*
|
||||
* Flag if the page is shared between multiple address spaces. This
|
||||
* is later used when determining whether to group tasks together
|
||||
*/
|
||||
if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
|
||||
flags |= TNF_SHARED;
|
||||
|
||||
last_cpupid = page_cpupid_last(page);
|
||||
page_nid = page_to_nid(page);
|
||||
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
||||
|
|
Loading…
Reference in New Issue