memcg: cleanup preparation for page table walk
pagewalk.c can handle vma in itself, so we don't have to pass vma via walk->private. And both of mem_cgroup_count_precharge() and mem_cgroup_move_charge() do for each vma loop themselves, but now it's done in pagewalk.c, so let's clean up them. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d85f4d6d3b
commit
26bcd64aa9
|
@ -4839,7 +4839,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|||
unsigned long addr, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
struct vm_area_struct *vma = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
|
@ -4865,20 +4865,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|||
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long precharge;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
struct mm_walk mem_cgroup_count_precharge_walk = {
|
||||
.pmd_entry = mem_cgroup_count_precharge_pte_range,
|
||||
.mm = mm,
|
||||
};
|
||||
down_read(&mm->mmap_sem);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
struct mm_walk mem_cgroup_count_precharge_walk = {
|
||||
.pmd_entry = mem_cgroup_count_precharge_pte_range,
|
||||
.mm = mm,
|
||||
.private = vma,
|
||||
};
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
continue;
|
||||
walk_page_range(vma->vm_start, vma->vm_end,
|
||||
&mem_cgroup_count_precharge_walk);
|
||||
}
|
||||
walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
precharge = mc.precharge;
|
||||
|
@ -5011,7 +5004,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
|
|||
struct mm_walk *walk)
|
||||
{
|
||||
int ret = 0;
|
||||
struct vm_area_struct *vma = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
enum mc_target_type target_type;
|
||||
|
@ -5107,7 +5100,10 @@ put: /* get_mctgt_type() gets the page */
|
|||
|
||||
static void mem_cgroup_move_charge(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_walk mem_cgroup_move_charge_walk = {
|
||||
.pmd_entry = mem_cgroup_move_charge_pte_range,
|
||||
.mm = mm,
|
||||
};
|
||||
|
||||
lru_add_drain_all();
|
||||
/*
|
||||
|
@ -5130,24 +5126,11 @@ retry:
|
|||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
int ret;
|
||||
struct mm_walk mem_cgroup_move_charge_walk = {
|
||||
.pmd_entry = mem_cgroup_move_charge_pte_range,
|
||||
.mm = mm,
|
||||
.private = vma,
|
||||
};
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
continue;
|
||||
ret = walk_page_range(vma->vm_start, vma->vm_end,
|
||||
&mem_cgroup_move_charge_walk);
|
||||
if (ret)
|
||||
/*
|
||||
* means we have consumed all precharges and failed in
|
||||
* doing additional charge. Just abandon here.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* When we have consumed all precharges and failed in doing
|
||||
* additional charge, the page walk just aborts.
|
||||
*/
|
||||
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
|
||||
up_read(&mm->mmap_sem);
|
||||
atomic_dec(&mc.from->moving_account);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue