mm: remove src/dst mm parameter in copy_page_range()
Both of the mm pointers are not needed after commit 7a4830c380
("mm/fork: Pass new vma pointer into copy_page_range()").
Jason Gunthorpe also reported that the ordering of copy_page_range() is
odd. Since working at it, reorder the parameters to be logical, by (1)
always put the dst_* fields to be before src_* fields, and (2) keep the
same type of parameters together.
[peterx@redhat.com: further reorder some parameters and line format, per Jason]
Link: https://lkml.kernel.org/r/20201002192647.7161-1-peterx@redhat.com
[peterx@redhat.com: fix warnings]
Link: https://lkml.kernel.org/r/20201006200138.GA6026@xz-x1
Reported-by: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/20200930204950.6668-1-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8332326e8e
commit
c78f463649
|
@ -1653,8 +1653,8 @@ struct mmu_notifier_range;
|
||||||
|
|
||||||
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
int
|
||||||
struct vm_area_struct *vma, struct vm_area_struct *new);
|
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
||||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||||
struct mmu_notifier_range *range,
|
struct mmu_notifier_range *range,
|
||||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||||
|
|
|
@ -590,7 +590,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||||
|
|
||||||
mm->map_count++;
|
mm->map_count++;
|
||||||
if (!(tmp->vm_flags & VM_WIPEONFORK))
|
if (!(tmp->vm_flags & VM_WIPEONFORK))
|
||||||
retval = copy_page_range(mm, oldmm, mpnt, tmp);
|
retval = copy_page_range(tmp, mpnt);
|
||||||
|
|
||||||
if (tmp->vm_ops && tmp->vm_ops->open)
|
if (tmp->vm_ops && tmp->vm_ops->open)
|
||||||
tmp->vm_ops->open(tmp);
|
tmp->vm_ops->open(tmp);
|
||||||
|
|
141
mm/memory.c
141
mm/memory.c
|
@ -794,15 +794,14 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
* lock.
|
* lock.
|
||||||
*/
|
*/
|
||||||
static inline int
|
static inline int
|
||||||
copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
pte_t *dst_pte, pte_t *src_pte,
|
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
|
||||||
struct vm_area_struct *vma, struct vm_area_struct *new,
|
struct page **prealloc, pte_t pte, struct page *page)
|
||||||
unsigned long addr, int *rss, struct page **prealloc,
|
|
||||||
pte_t pte, struct page *page)
|
|
||||||
{
|
{
|
||||||
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
struct page *new_page;
|
struct page *new_page;
|
||||||
|
|
||||||
if (!is_cow_mapping(vma->vm_flags))
|
if (!is_cow_mapping(src_vma->vm_flags))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -832,16 +831,16 @@ copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
* over and copy the page & arm it.
|
* over and copy the page & arm it.
|
||||||
*/
|
*/
|
||||||
*prealloc = NULL;
|
*prealloc = NULL;
|
||||||
copy_user_highpage(new_page, page, addr, vma);
|
copy_user_highpage(new_page, page, addr, src_vma);
|
||||||
__SetPageUptodate(new_page);
|
__SetPageUptodate(new_page);
|
||||||
page_add_new_anon_rmap(new_page, new, addr, false);
|
page_add_new_anon_rmap(new_page, dst_vma, addr, false);
|
||||||
lru_cache_add_inactive_or_unevictable(new_page, new);
|
lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
|
||||||
rss[mm_counter(new_page)]++;
|
rss[mm_counter(new_page)]++;
|
||||||
|
|
||||||
/* All done, just insert the new page copy in the child */
|
/* All done, just insert the new page copy in the child */
|
||||||
pte = mk_pte(new_page, new->vm_page_prot);
|
pte = mk_pte(new_page, dst_vma->vm_page_prot);
|
||||||
pte = maybe_mkwrite(pte_mkdirty(pte), new);
|
pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
|
||||||
set_pte_at(dst_mm, addr, dst_pte, pte);
|
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -850,24 +849,21 @@ copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
* is required to copy this pte.
|
* is required to copy this pte.
|
||||||
*/
|
*/
|
||||||
static inline int
|
static inline int
|
||||||
copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
|
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
|
||||||
struct vm_area_struct *new,
|
struct page **prealloc)
|
||||||
unsigned long addr, int *rss, struct page **prealloc)
|
|
||||||
{
|
{
|
||||||
unsigned long vm_flags = vma->vm_flags;
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
|
unsigned long vm_flags = src_vma->vm_flags;
|
||||||
pte_t pte = *src_pte;
|
pte_t pte = *src_pte;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = vm_normal_page(vma, addr, pte);
|
page = vm_normal_page(src_vma, addr, pte);
|
||||||
if (page) {
|
if (page) {
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
retval = copy_present_page(dst_mm, src_mm,
|
retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
|
||||||
dst_pte, src_pte,
|
addr, rss, prealloc, pte, page);
|
||||||
vma, new,
|
|
||||||
addr, rss, prealloc,
|
|
||||||
pte, page);
|
|
||||||
if (retval <= 0)
|
if (retval <= 0)
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
|
@ -901,7 +897,7 @@ copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
if (!(vm_flags & VM_UFFD_WP))
|
if (!(vm_flags & VM_UFFD_WP))
|
||||||
pte = pte_clear_uffd_wp(pte);
|
pte = pte_clear_uffd_wp(pte);
|
||||||
|
|
||||||
set_pte_at(dst_mm, addr, dst_pte, pte);
|
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -924,11 +920,13 @@ page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
|
||||||
return new_page;
|
return new_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static int
|
||||||
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
|
copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
struct vm_area_struct *new,
|
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
pte_t *orig_src_pte, *orig_dst_pte;
|
pte_t *orig_src_pte, *orig_dst_pte;
|
||||||
pte_t *src_pte, *dst_pte;
|
pte_t *src_pte, *dst_pte;
|
||||||
spinlock_t *src_ptl, *dst_ptl;
|
spinlock_t *src_ptl, *dst_ptl;
|
||||||
|
@ -971,15 +969,15 @@ again:
|
||||||
if (unlikely(!pte_present(*src_pte))) {
|
if (unlikely(!pte_present(*src_pte))) {
|
||||||
entry.val = copy_nonpresent_pte(dst_mm, src_mm,
|
entry.val = copy_nonpresent_pte(dst_mm, src_mm,
|
||||||
dst_pte, src_pte,
|
dst_pte, src_pte,
|
||||||
vma, addr, rss);
|
src_vma, addr, rss);
|
||||||
if (entry.val)
|
if (entry.val)
|
||||||
break;
|
break;
|
||||||
progress += 8;
|
progress += 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* copy_present_pte() will clear `*prealloc' if consumed */
|
/* copy_present_pte() will clear `*prealloc' if consumed */
|
||||||
ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
|
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
|
||||||
vma, new, addr, rss, &prealloc);
|
addr, rss, &prealloc);
|
||||||
/*
|
/*
|
||||||
* If we need a pre-allocated page for this pte, drop the
|
* If we need a pre-allocated page for this pte, drop the
|
||||||
* locks, allocate, and try again.
|
* locks, allocate, and try again.
|
||||||
|
@ -1014,7 +1012,7 @@ again:
|
||||||
entry.val = 0;
|
entry.val = 0;
|
||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
WARN_ON_ONCE(ret != -EAGAIN);
|
WARN_ON_ONCE(ret != -EAGAIN);
|
||||||
prealloc = page_copy_prealloc(src_mm, vma, addr);
|
prealloc = page_copy_prealloc(src_mm, src_vma, addr);
|
||||||
if (!prealloc)
|
if (!prealloc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
/* We've captured and resolved the error. Reset, try again. */
|
/* We've captured and resolved the error. Reset, try again. */
|
||||||
|
@ -1028,11 +1026,13 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int
|
||||||
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
|
copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
struct vm_area_struct *new,
|
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
pmd_t *src_pmd, *dst_pmd;
|
pmd_t *src_pmd, *dst_pmd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
|
@ -1045,9 +1045,9 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
|
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
|
||||||
|| pmd_devmap(*src_pmd)) {
|
|| pmd_devmap(*src_pmd)) {
|
||||||
int err;
|
int err;
|
||||||
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
|
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
|
||||||
err = copy_huge_pmd(dst_mm, src_mm,
|
err = copy_huge_pmd(dst_mm, src_mm,
|
||||||
dst_pmd, src_pmd, addr, vma);
|
dst_pmd, src_pmd, addr, src_vma);
|
||||||
if (err == -ENOMEM)
|
if (err == -ENOMEM)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (!err)
|
if (!err)
|
||||||
|
@ -1056,18 +1056,20 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
}
|
}
|
||||||
if (pmd_none_or_clear_bad(src_pmd))
|
if (pmd_none_or_clear_bad(src_pmd))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
|
if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
|
||||||
vma, new, addr, next))
|
addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int
|
||||||
p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
|
copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
struct vm_area_struct *new,
|
p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
pud_t *src_pud, *dst_pud;
|
pud_t *src_pud, *dst_pud;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
|
@ -1080,9 +1082,9 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
|
if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
|
VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
|
||||||
err = copy_huge_pud(dst_mm, src_mm,
|
err = copy_huge_pud(dst_mm, src_mm,
|
||||||
dst_pud, src_pud, addr, vma);
|
dst_pud, src_pud, addr, src_vma);
|
||||||
if (err == -ENOMEM)
|
if (err == -ENOMEM)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (!err)
|
if (!err)
|
||||||
|
@ -1091,18 +1093,19 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
}
|
}
|
||||||
if (pud_none_or_clear_bad(src_pud))
|
if (pud_none_or_clear_bad(src_pud))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
|
if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
|
||||||
vma, new, addr, next))
|
addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int
|
||||||
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
|
copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
struct vm_area_struct *new,
|
pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
p4d_t *src_p4d, *dst_p4d;
|
p4d_t *src_p4d, *dst_p4d;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
|
@ -1114,20 +1117,22 @@ static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
next = p4d_addr_end(addr, end);
|
next = p4d_addr_end(addr, end);
|
||||||
if (p4d_none_or_clear_bad(src_p4d))
|
if (p4d_none_or_clear_bad(src_p4d))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
|
if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
|
||||||
vma, new, addr, next))
|
addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
|
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
int
|
||||||
struct vm_area_struct *vma, struct vm_area_struct *new)
|
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||||
{
|
{
|
||||||
pgd_t *src_pgd, *dst_pgd;
|
pgd_t *src_pgd, *dst_pgd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
unsigned long addr = vma->vm_start;
|
unsigned long addr = src_vma->vm_start;
|
||||||
unsigned long end = vma->vm_end;
|
unsigned long end = src_vma->vm_end;
|
||||||
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
struct mmu_notifier_range range;
|
struct mmu_notifier_range range;
|
||||||
bool is_cow;
|
bool is_cow;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1138,19 +1143,19 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
* readonly mappings. The tradeoff is that copy_page_range is more
|
* readonly mappings. The tradeoff is that copy_page_range is more
|
||||||
* efficient than faulting.
|
* efficient than faulting.
|
||||||
*/
|
*/
|
||||||
if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
|
if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
|
||||||
!vma->anon_vma)
|
!src_vma->anon_vma)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (is_vm_hugetlb_page(vma))
|
if (is_vm_hugetlb_page(src_vma))
|
||||||
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
|
||||||
|
|
||||||
if (unlikely(vma->vm_flags & VM_PFNMAP)) {
|
if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
|
||||||
/*
|
/*
|
||||||
* We do not free on error cases below as remove_vma
|
* We do not free on error cases below as remove_vma
|
||||||
* gets called on error from higher level routine
|
* gets called on error from higher level routine
|
||||||
*/
|
*/
|
||||||
ret = track_pfn_copy(vma);
|
ret = track_pfn_copy(src_vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1161,11 +1166,11 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
* parent mm. And a permission downgrade will only happen if
|
* parent mm. And a permission downgrade will only happen if
|
||||||
* is_cow_mapping() returns true.
|
* is_cow_mapping() returns true.
|
||||||
*/
|
*/
|
||||||
is_cow = is_cow_mapping(vma->vm_flags);
|
is_cow = is_cow_mapping(src_vma->vm_flags);
|
||||||
|
|
||||||
if (is_cow) {
|
if (is_cow) {
|
||||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||||
0, vma, src_mm, addr, end);
|
0, src_vma, src_mm, addr, end);
|
||||||
mmu_notifier_invalidate_range_start(&range);
|
mmu_notifier_invalidate_range_start(&range);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1176,8 +1181,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none_or_clear_bad(src_pgd))
|
if (pgd_none_or_clear_bad(src_pgd))
|
||||||
continue;
|
continue;
|
||||||
if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
|
if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
|
||||||
vma, new, addr, next))) {
|
addr, next))) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue