Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton: - a few misc things - ocfs2 udpates - kernel/watchdog.c feature work (took ages to get right) - most of MM. A few tricky bits are held up and probably won't make 4.2. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (91 commits) mm: kmemleak_alloc_percpu() should follow the gfp from per_alloc() mm, thp: respect MPOL_PREFERRED policy with non-local node tmpfs: truncate prealloc blocks past i_size mm/memory hotplug: print the last vmemmap region at the end of hot add memory mm/mmap.c: optimization of do_mmap_pgoff function mm: kmemleak: optimise kmemleak_lock acquiring during kmemleak_scan mm: kmemleak: avoid deadlock on the kmemleak object insertion error path mm: kmemleak: do not acquire scan_mutex in kmemleak_do_cleanup() mm: kmemleak: fix delete_object_*() race when called on the same memory block mm: kmemleak: allow safe memory scanning during kmemleak disabling memcg: convert mem_cgroup->under_oom from atomic_t to int memcg: remove unused mem_cgroup->oom_wakeups frontswap: allow multiple backends x86, mirror: x86 enabling - find mirrored memory ranges mm/memblock: allocate boot time data structures from mirrored memory mm/memblock: add extra "flags" to memblock to allow selection of memory based on attribute mm: do not ignore mapping_gfp_mask in page cache allocation paths mm/cma.c: fix typos in comments mm/oom_kill.c: print points as unsigned int mm/hugetlb: handle races in alloc_huge_page and hugetlb_reserve_pages ...
This commit is contained in:
commit
aefbef10e3
|
@ -61,3 +61,21 @@ As explained above, a kernel knob is provided that allows
|
|||
administrators to configure the period of the hrtimer and the perf
|
||||
event. The right value for a particular environment is a trade-off
|
||||
between fast response to lockups and detection overhead.
|
||||
|
||||
By default, the watchdog runs on all online cores. However, on a
|
||||
kernel configured with NO_HZ_FULL, by default the watchdog runs only
|
||||
on the housekeeping cores, not the cores specified in the "nohz_full"
|
||||
boot argument. If we allowed the watchdog to run by default on
|
||||
the "nohz_full" cores, we would have to run timer ticks to activate
|
||||
the scheduler, which would prevent the "nohz_full" functionality
|
||||
from protecting the user code on those cores from the kernel.
|
||||
Of course, disabling it by default on the nohz_full cores means that
|
||||
when those cores do enter the kernel, by default we will not be
|
||||
able to detect if they lock up. However, allowing the watchdog
|
||||
to continue to run on the housekeeping (non-tickless) cores means
|
||||
that we will continue to detect lockups properly on those cores.
|
||||
|
||||
In either case, the set of cores excluded from running the watchdog
|
||||
may be adjusted via the kernel.watchdog_cpumask sysctl. For
|
||||
nohz_full cores, this may be useful for debugging a case where the
|
||||
kernel seems to be hanging on the nohz_full cores.
|
||||
|
|
|
@ -923,6 +923,27 @@ and nmi_watchdog.
|
|||
|
||||
==============================================================
|
||||
|
||||
watchdog_cpumask:
|
||||
|
||||
This value can be used to control on which cpus the watchdog may run.
|
||||
The default cpumask is all possible cores, but if NO_HZ_FULL is
|
||||
enabled in the kernel config, and cores are specified with the
|
||||
nohz_full= boot argument, those cores are excluded by default.
|
||||
Offline cores can be included in this mask, and if the core is later
|
||||
brought online, the watchdog will be started based on the mask value.
|
||||
|
||||
Typically this value would only be touched in the nohz_full case
|
||||
to re-enable cores that by default were not running the watchdog,
|
||||
if a kernel lockup was suspected on those cores.
|
||||
|
||||
The argument value is the standard cpulist format for cpumasks,
|
||||
so for example to enable the watchdog on cores 0, 2, 3, and 4 you
|
||||
might say:
|
||||
|
||||
echo 0,2-4 > /proc/sys/kernel/watchdog_cpumask
|
||||
|
||||
==============================================================
|
||||
|
||||
watchdog_thresh:
|
||||
|
||||
This value can be used to control the frequency of hrtimer and NMI
|
||||
|
|
|
@ -467,7 +467,13 @@ mmap(MAP_LOCKED) SYSTEM CALL HANDLING
|
|||
|
||||
In addition the mlock()/mlockall() system calls, an application can request
|
||||
that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap()
|
||||
call. Furthermore, any mmap() call or brk() call that expands the heap by a
|
||||
call. There is one important and subtle difference here, though. mmap() + mlock()
|
||||
will fail if the range cannot be faulted in (e.g. because mm_populate fails)
|
||||
and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped
|
||||
area will still have properties of the locked area - aka. pages will not get
|
||||
swapped out - but major page faults to fault memory in might still happen.
|
||||
|
||||
Furthermore, any mmap() call or brk() call that expands the heap by a
|
||||
task that has previously called mlockall() with the MCL_FUTURE flag will result
|
||||
in the newly mapped memory being mlocked. Before the unevictable/mlock
|
||||
changes, the kernel simply called make_pages_present() to allocate pages and
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H
|
||||
#define _ASM_ALPHA_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ARC_MM_ARCH_HOOKS_H
|
||||
#define _ASM_ARC_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */
|
|
@ -53,10 +53,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ARM_MM_ARCH_HOOKS_H
|
||||
#define _ASM_ARM_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */
|
|
@ -41,11 +41,6 @@ int pud_huge(pud_t pud)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
|
|
|
@ -86,10 +86,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
|
||||
#define _ASM_ARM64_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
|
|
@ -31,13 +31,6 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H
|
||||
#define _ASM_AVR32_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H
|
||||
#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_C6X_MM_ARCH_HOOKS_H
|
||||
#define _ASM_C6X_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H
|
||||
#define _ASM_CRIS_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_FRV_MM_ARCH_HOOKS_H
|
||||
#define _ASM_FRV_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H
|
||||
#define _ASM_HEXAGON_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */
|
|
@ -20,10 +20,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_MM_ARCH_HOOKS_H
|
||||
#define _ASM_IA64_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */
|
|
@ -65,11 +65,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
|
|||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_M32R_MM_ARCH_HOOKS_H
|
||||
#define _ASM_M32R_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_M68K_MM_ARCH_HOOKS_H
|
||||
#define _ASM_M68K_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */
|
|
@ -134,20 +134,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
|||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
dma_sync_for_device(sg_virt(sg), sg->length, direction);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,10 +14,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
int prepare_hugepage_range(struct file *file, unsigned long addr,
|
||||
unsigned long len);
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_METAG_MM_ARCH_HOOKS_H
|
||||
#define _ASM_METAG_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */
|
|
@ -89,11 +89,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_page_shift(pmd) > PAGE_SHIFT;
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
|
||||
#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */
|
|
@ -38,10 +38,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr,
|
||||
unsigned long end,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H
|
||||
#define _ASM_MIPS_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */
|
|
@ -568,11 +568,11 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
|||
}
|
||||
|
||||
/*
|
||||
* The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
|
||||
* The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
|
||||
* different prototype.
|
||||
*/
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
pmd_t old = *pmdp;
|
||||
|
|
|
@ -51,11 +51,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|||
return (pte_t *) pmd;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks for proper alignment of input addr and len parameters.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H
|
||||
#define _ASM_MN10300_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H
|
||||
#define _ASM_NIOS2_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H
|
||||
#define _ASM_OPENRISC_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H
|
||||
#define _ASM_PARISC_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */
|
|
@ -478,14 +478,16 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
|
|||
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sglist++ ) {
|
||||
unsigned long vaddr = (unsigned long)sg_virt(sglist);
|
||||
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
|
||||
sg_dma_len(sglist) = sglist->length;
|
||||
flush_kernel_dcache_range(vaddr, sglist->length);
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
unsigned long vaddr = (unsigned long)sg_virt(sg);
|
||||
|
||||
sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
flush_kernel_dcache_range(vaddr, sg->length);
|
||||
}
|
||||
return nents;
|
||||
}
|
||||
|
@ -493,6 +495,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
|
|||
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
|
@ -501,8 +504,8 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
|
|||
|
||||
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
|
||||
|
||||
for (i = 0; i < nents; i++, sglist++ )
|
||||
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
|
||||
for_each_sg(sglist, sg, nents, i)
|
||||
flush_kernel_vmap_range(sg_virt(sg), sg->length);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -523,21 +526,23 @@ static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_h
|
|||
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
|
||||
|
||||
for (i = 0; i < nents; i++, sglist++ )
|
||||
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
|
||||
for_each_sg(sglist, sg, nents, i)
|
||||
flush_kernel_vmap_range(sg_virt(sg), sg->length);
|
||||
}
|
||||
|
||||
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
|
||||
|
||||
for (i = 0; i < nents; i++, sglist++ )
|
||||
flush_kernel_vmap_range(sg_virt(sglist), sglist->length);
|
||||
for_each_sg(sglist, sg, nents, i)
|
||||
flush_kernel_vmap_range(sg_virt(sg), sg->length);
|
||||
}
|
||||
|
||||
struct hppa_dma_ops pcxl_dma_ops = {
|
||||
|
|
|
@ -112,11 +112,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
|
||||
#define _ASM_POWERPC_MM_ARCH_HOOKS_H
|
||||
|
||||
static inline void arch_remap(struct mm_struct *mm,
|
||||
unsigned long old_start, unsigned long old_end,
|
||||
unsigned long new_start, unsigned long new_end)
|
||||
{
|
||||
/*
|
||||
* mremap() doesn't allow moving multiple vmas so we can limit the
|
||||
* check to old_start == vdso_base.
|
||||
*/
|
||||
if (old_start == mm->context.vdso_base)
|
||||
mm->context.vdso_base = new_start;
|
||||
}
|
||||
#define arch_remap arch_remap
|
||||
|
||||
#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */
|
|
@ -8,7 +8,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
/*
|
||||
|
@ -127,5 +126,27 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arch_exit_mmap(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arch_unmap(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
|
||||
mm->context.vdso_base = 0;
|
||||
}
|
||||
|
||||
static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|
||||
|
|
|
@ -569,14 +569,10 @@ extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|||
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
|
||||
extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
|
@ -592,6 +588,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
|
||||
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
#define pmdp_collapse_flush pmdp_collapse_flush
|
||||
|
||||
#define __HAVE_ARCH_PGTABLE_DEPOSIT
|
||||
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
pgtable_t pgtable);
|
||||
|
|
|
@ -557,11 +557,11 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
struct iommu_table *tbl;
|
||||
struct scatterlist *sgl;
|
||||
int ret, count = 0;
|
||||
int ret, count;
|
||||
size_t alloc_size = 0;
|
||||
|
||||
tbl = get_iommu_table_base(dev);
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
for_each_sg(sglist, sgl, nelems, count)
|
||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
||||
|
||||
if (vio_cmo_alloc(viodev, alloc_size)) {
|
||||
|
@ -577,7 +577,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
|
||||
for_each_sg(sglist, sgl, ret, count)
|
||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||
if (alloc_size)
|
||||
vio_cmo_dealloc(viodev, alloc_size);
|
||||
|
@ -594,10 +594,10 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
|||
struct iommu_table *tbl;
|
||||
struct scatterlist *sgl;
|
||||
size_t alloc_size = 0;
|
||||
int count = 0;
|
||||
int count;
|
||||
|
||||
tbl = get_iommu_table_base(dev);
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
for_each_sg(sglist, sgl, nelems, count)
|
||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||
|
||||
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
||||
|
|
|
@ -439,11 +439,6 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
|
|||
}
|
||||
#endif
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
#define HUGEPD_FREELIST_SIZE \
|
||||
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
|
||||
|
|
|
@ -554,18 +554,14 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
|
|||
return old;
|
||||
}
|
||||
|
||||
pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
||||
if (pmd_trans_huge(*pmdp)) {
|
||||
pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
} else {
|
||||
/*
|
||||
* khugepaged calls this for normal pmd
|
||||
*/
|
||||
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
||||
|
||||
pmd = *pmdp;
|
||||
pmd_clear(pmdp);
|
||||
/*
|
||||
|
@ -594,7 +590,6 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|||
* the old content.
|
||||
*/
|
||||
flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
|
||||
}
|
||||
return pmd;
|
||||
}
|
||||
|
||||
|
@ -817,7 +812,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|||
return;
|
||||
}
|
||||
|
||||
pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
pmd_t old_pmd;
|
||||
|
|
|
@ -35,7 +35,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
|
||||
#define arch_clear_hugepage_flags(page) do { } while (0)
|
||||
|
||||
int arch_prepare_hugepage(struct page *page);
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_MM_ARCH_HOOKS_H
|
||||
#define _ASM_S390_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
|
|
@ -1498,8 +1498,8 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|||
return pmd_young(pmd);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = *pmdp;
|
||||
|
@ -1509,8 +1509,8 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
|||
return pmd;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
|
||||
static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
|
||||
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp, int full)
|
||||
{
|
||||
|
@ -1522,11 +1522,11 @@ static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
|
|||
return pmd;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
|
||||
static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
|
||||
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_INVALIDATE
|
||||
|
@ -1548,6 +1548,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|||
}
|
||||
}
|
||||
|
||||
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
}
|
||||
#define pmdp_collapse_flush pmdp_collapse_flush
|
||||
|
||||
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
|
||||
|
|
|
@ -33,11 +33,12 @@ static struct memblock_type oldmem_type = {
|
|||
};
|
||||
|
||||
#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
|
||||
for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \
|
||||
for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \
|
||||
&memblock.physmem, \
|
||||
&oldmem_type, p_start, \
|
||||
p_end, p_nid); \
|
||||
i != (u64)ULLONG_MAX; \
|
||||
__next_mem_range(&i, nid, &memblock.physmem, \
|
||||
__next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\
|
||||
&oldmem_type, \
|
||||
p_start, p_end, p_nid))
|
||||
|
||||
|
|
|
@ -193,11 +193,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|||
return (pte_t *) pmdp;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
if (!MACHINE_HAS_HPAGE)
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H
|
||||
#define _ASM_SCORE_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */
|
|
@ -26,9 +26,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SH_MM_ARCH_HOOKS_H
|
||||
#define _ASM_SH_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_SH_MM_ARCH_HOOKS_H */
|
|
@ -62,11 +62,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -11,10 +11,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len) {
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H
|
||||
#define _ASM_SPARC_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */
|
|
@ -865,8 +865,8 @@ static inline unsigned long pud_pfn(pud_t pud)
|
|||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
|
|
@ -2086,6 +2086,7 @@ int ldc_map_sg(struct ldc_channel *lp,
|
|||
struct cookie_state state;
|
||||
struct ldc_iommu *iommu;
|
||||
int err;
|
||||
struct scatterlist *s;
|
||||
|
||||
if (map_perm & ~LDC_MAP_ALL)
|
||||
return -EINVAL;
|
||||
|
@ -2112,9 +2113,10 @@ int ldc_map_sg(struct ldc_channel *lp,
|
|||
state.pte_idx = (base - iommu->page_table);
|
||||
state.nc = 0;
|
||||
|
||||
for (i = 0; i < num_sg; i++)
|
||||
fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
|
||||
sg[i].offset, sg[i].length);
|
||||
for_each_sg(sg, s, num_sg, i) {
|
||||
fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT,
|
||||
s->offset, s->length);
|
||||
}
|
||||
|
||||
return state.nc;
|
||||
}
|
||||
|
|
|
@ -172,11 +172,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
|
|
|
@ -1966,7 +1966,8 @@ static phys_addr_t __init available_memory(void)
|
|||
phys_addr_t pa_start, pa_end;
|
||||
u64 i;
|
||||
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL)
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
|
||||
&pa_end, NULL)
|
||||
available = available + (pa_end - pa_start);
|
||||
|
||||
return available;
|
||||
|
@ -1992,7 +1993,8 @@ static void __init reduce_memory(phys_addr_t limit_ram)
|
|||
if (limit_ram >= avail_ram)
|
||||
return;
|
||||
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) {
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
|
||||
&pa_end, NULL) {
|
||||
phys_addr_t region_size = pa_end - pa_start;
|
||||
phys_addr_t clip_start = pa_start;
|
||||
|
||||
|
|
|
@ -40,10 +40,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MM_ARCH_HOOKS_H
|
||||
#define _ASM_TILE_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */
|
|
@ -414,8 +414,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
|
|
@ -160,11 +160,6 @@ int pud_huge(pud_t pud)
|
|||
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
||||
unsigned long addr, unsigned long len,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_UM_MM_ARCH_HOOKS_H
|
||||
#define _ASM_UM_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_UM_MM_ARCH_HOOKS_H */
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H
|
||||
#define _ASM_UNICORE32_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */
|
|
@ -26,9 +26,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_MM_ARCH_HOOKS_H
|
||||
#define _ASM_X86_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_X86_MM_ARCH_HOOKS_H */
|
|
@ -805,8 +805,8 @@ static inline int pmd_write(pmd_t pmd)
|
|||
return pmd_flags(pmd) & _PAGE_RW;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
|
||||
|
|
|
@ -91,7 +91,8 @@ void __init setup_bios_corruption_check(void)
|
|||
|
||||
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
|
||||
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) {
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
|
||||
NULL) {
|
||||
start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
|
||||
PAGE_SIZE, corruption_check_size);
|
||||
end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
|
||||
|
|
|
@ -1123,7 +1123,8 @@ void __init memblock_find_dma_reserve(void)
|
|||
nr_pages += end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
for_each_free_mem_range(u, NUMA_NO_NODE, &start, &end, NULL) {
|
||||
for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
|
||||
NULL) {
|
||||
start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
|
||||
end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
|
||||
if (start_pfn < end_pfn)
|
||||
|
|
|
@ -1105,6 +1105,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
memblock_set_current_limit(ISA_END_ADDRESS);
|
||||
memblock_x86_fill();
|
||||
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
efi_find_mirror();
|
||||
|
||||
/*
|
||||
* The EFI specification says that boot service code won't be called
|
||||
* after ExitBootServices(). This is, in fact, a lie.
|
||||
|
|
|
@ -433,7 +433,7 @@ void __init add_highpages_with_active_regions(int nid,
|
|||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
for_each_free_mem_range(i, nid, &start, &end, NULL) {
|
||||
for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
|
||||
unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
|
||||
start_pfn, end_pfn);
|
||||
unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
|
||||
|
|
|
@ -117,6 +117,27 @@ void efi_get_time(struct timespec *now)
|
|||
now->tv_nsec = 0;
|
||||
}
|
||||
|
||||
void __init efi_find_mirror(void)
|
||||
{
|
||||
void *p;
|
||||
u64 mirror_size = 0, total_size = 0;
|
||||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
efi_memory_desc_t *md = p;
|
||||
unsigned long long start = md->phys_addr;
|
||||
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
|
||||
total_size += size;
|
||||
if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
|
||||
memblock_mark_mirror(start, size);
|
||||
mirror_size += size;
|
||||
}
|
||||
}
|
||||
if (mirror_size)
|
||||
pr_info("Memory: %lldM/%lldM mirrored memory\n",
|
||||
mirror_size>>20, total_size>>20);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the kernel about the EFI memory map. This might include
|
||||
* more than the max 128 entries that can fit in the e820 legacy
|
||||
|
|
|
@ -52,14 +52,15 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
}
|
||||
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sg++ ) {
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
|
||||
sg->dma_address = sg_phys(sg);
|
||||
|
@ -124,20 +125,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
|||
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
|
||||
}
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
consistent_sync(sg_virt(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
consistent_sync(sg_virt(sg), sg->length, dir);
|
||||
}
|
||||
static inline int
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Architecture specific mm hooks
|
||||
*
|
||||
* Copyright (C) 2015, IBM Corporation
|
||||
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H
|
||||
#define _ASM_XTENSA_MM_ARCH_HOOKS_H
|
||||
|
||||
#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */
|
|
@ -165,7 +165,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
|
|||
* infrastructure. There is no real reason why the selected
|
||||
* task should have access to the memory reserves.
|
||||
*/
|
||||
mark_tsk_oom_victim(selected);
|
||||
mark_oom_victim(selected);
|
||||
send_sig(SIGKILL, selected, 0);
|
||||
rem += selected_tasksize;
|
||||
}
|
||||
|
|
|
@ -353,9 +353,11 @@ static struct sysrq_key_op sysrq_term_op = {
|
|||
|
||||
static void moom_callback(struct work_struct *ignored)
|
||||
{
|
||||
mutex_lock(&oom_lock);
|
||||
if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL),
|
||||
GFP_KERNEL, 0, NULL, true))
|
||||
pr_info("OOM request ignored because killer is disabled\n");
|
||||
mutex_unlock(&oom_lock);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(moom_work, moom_callback);
|
||||
|
|
|
@ -381,15 +381,9 @@ static int __init xen_tmem_init(void)
|
|||
#ifdef CONFIG_FRONTSWAP
|
||||
if (tmem_enabled && frontswap) {
|
||||
char *s = "";
|
||||
struct frontswap_ops *old_ops;
|
||||
|
||||
tmem_frontswap_poolid = -1;
|
||||
old_ops = frontswap_register_ops(&tmem_frontswap_ops);
|
||||
if (IS_ERR(old_ops) || old_ops) {
|
||||
if (IS_ERR(old_ops))
|
||||
return PTR_ERR(old_ops);
|
||||
s = " (WARNING: frontswap_ops overridden)";
|
||||
}
|
||||
frontswap_register_ops(&tmem_frontswap_ops);
|
||||
pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
|
||||
s);
|
||||
}
|
||||
|
|
|
@ -47,12 +47,11 @@ static void config_item_release(struct kref *kref);
|
|||
* config_item_init - initialize item.
|
||||
* @item: item in question.
|
||||
*/
|
||||
void config_item_init(struct config_item *item)
|
||||
static void config_item_init(struct config_item *item)
|
||||
{
|
||||
kref_init(&item->ci_kref);
|
||||
INIT_LIST_HEAD(&item->ci_entry);
|
||||
}
|
||||
EXPORT_SYMBOL(config_item_init);
|
||||
|
||||
/**
|
||||
* config_item_set_name - Set the name of an item
|
||||
|
|
|
@ -130,7 +130,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
goto out;
|
||||
|
||||
ret = 0;
|
||||
hugetlb_prefault_arch_hook(vma->vm_mm);
|
||||
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
|
||||
inode->i_size = len;
|
||||
out:
|
||||
|
|
|
@ -525,7 +525,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
|
|||
}
|
||||
}
|
||||
err = add_to_page_cache_lru(*cached_page, mapping,
|
||||
index, GFP_KERNEL);
|
||||
index,
|
||||
GFP_KERNEL & mapping_gfp_mask(mapping));
|
||||
if (unlikely(err)) {
|
||||
if (err == -EEXIST)
|
||||
continue;
|
||||
|
|
|
@ -85,12 +85,7 @@ static inline void *ntfs_malloc_nofs_nofail(unsigned long size)
|
|||
|
||||
static inline void ntfs_free(void *addr)
|
||||
{
|
||||
if (!is_vmalloc_addr(addr)) {
|
||||
kfree(addr);
|
||||
/* free_page((unsigned long)addr); */
|
||||
return;
|
||||
}
|
||||
vfree(addr);
|
||||
kvfree(addr);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_NTFS_MALLOC_H */
|
||||
|
|
|
@ -2925,7 +2925,8 @@ static int __ocfs2_rotate_tree_left(handle_t *handle,
|
|||
struct ocfs2_path *right_path = NULL;
|
||||
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
|
||||
|
||||
BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])));
|
||||
if (!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])))
|
||||
return 0;
|
||||
|
||||
*empty_extent_path = NULL;
|
||||
|
||||
|
@ -4311,13 +4312,13 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static enum ocfs2_contig_type
|
||||
ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
||||
static int ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
||||
struct ocfs2_path *path,
|
||||
struct ocfs2_extent_list *el, int index,
|
||||
struct ocfs2_extent_rec *split_rec)
|
||||
struct ocfs2_extent_rec *split_rec,
|
||||
struct ocfs2_merge_ctxt *ctxt)
|
||||
{
|
||||
int status;
|
||||
int status = 0;
|
||||
enum ocfs2_contig_type ret = CONTIG_NONE;
|
||||
u32 left_cpos, right_cpos;
|
||||
struct ocfs2_extent_rec *rec = NULL;
|
||||
|
@ -4336,8 +4337,11 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
|
||||
if (left_cpos != 0) {
|
||||
left_path = ocfs2_new_path_from_path(path);
|
||||
if (!left_path)
|
||||
if (!left_path) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
status = ocfs2_find_path(et->et_ci, left_path,
|
||||
left_cpos);
|
||||
|
@ -4392,8 +4396,11 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
goto free_left_path;
|
||||
|
||||
right_path = ocfs2_new_path_from_path(path);
|
||||
if (!right_path)
|
||||
if (!right_path) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
goto free_left_path;
|
||||
}
|
||||
|
||||
status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
|
||||
if (status)
|
||||
|
@ -4433,7 +4440,10 @@ free_right_path:
|
|||
free_left_path:
|
||||
ocfs2_free_path(left_path);
|
||||
exit:
|
||||
return ret;
|
||||
if (status == 0)
|
||||
ctxt->c_contig_type = ret;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
|
||||
|
@ -5039,9 +5049,14 @@ int ocfs2_split_extent(handle_t *handle,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el,
|
||||
ret = ocfs2_figure_merge_contig_type(et, path, el,
|
||||
split_index,
|
||||
split_rec);
|
||||
split_rec,
|
||||
&ctxt);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The core merge / split code wants to know how much room is
|
||||
|
|
|
@ -523,7 +523,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
|
|||
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
|
||||
unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
|
||||
unsigned long len = bh_result->b_size;
|
||||
unsigned int clusters_to_alloc = 0;
|
||||
unsigned int clusters_to_alloc = 0, contig_clusters = 0;
|
||||
|
||||
cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock);
|
||||
|
||||
|
@ -560,8 +560,10 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
|
|||
/* fill hole, allocate blocks can't be larger than the size
|
||||
* of the hole */
|
||||
clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
|
||||
if (clusters_to_alloc > contig_blocks)
|
||||
clusters_to_alloc = contig_blocks;
|
||||
contig_clusters = ocfs2_clusters_for_blocks(inode->i_sb,
|
||||
contig_blocks);
|
||||
if (clusters_to_alloc > contig_clusters)
|
||||
clusters_to_alloc = contig_clusters;
|
||||
|
||||
/* allocate extent and insert them into the extent tree */
|
||||
ret = ocfs2_extend_allocation(inode, cpos,
|
||||
|
@ -619,9 +621,6 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
|
|||
/* this io's submitter should not have unlocked this before we could */
|
||||
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
|
||||
|
||||
if (ocfs2_iocb_is_sem_locked(iocb))
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
if (ocfs2_iocb_is_unaligned_aio(iocb)) {
|
||||
ocfs2_iocb_clear_unaligned_aio(iocb);
|
||||
|
||||
|
@ -925,13 +924,23 @@ clean_orphan:
|
|||
int update_isize = written > 0 ? 1 : 0;
|
||||
loff_t end = update_isize ? offset + written : 0;
|
||||
|
||||
tmp_ret = ocfs2_del_inode_from_orphan(osb, inode,
|
||||
tmp_ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
||||
if (tmp_ret < 0) {
|
||||
ret = tmp_ret;
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
|
||||
update_isize, end);
|
||||
if (tmp_ret < 0) {
|
||||
ret = tmp_ret;
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
|
||||
tmp_ret = jbd2_journal_force_commit(journal);
|
||||
if (tmp_ret < 0) {
|
||||
ret = tmp_ret;
|
||||
|
|
|
@ -79,7 +79,6 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
|
|||
enum ocfs2_iocb_lock_bits {
|
||||
OCFS2_IOCB_RW_LOCK = 0,
|
||||
OCFS2_IOCB_RW_LOCK_LEVEL,
|
||||
OCFS2_IOCB_SEM,
|
||||
OCFS2_IOCB_UNALIGNED_IO,
|
||||
OCFS2_IOCB_NUM_LOCKS
|
||||
};
|
||||
|
@ -88,12 +87,6 @@ enum ocfs2_iocb_lock_bits {
|
|||
clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_rw_locked_level(iocb) \
|
||||
test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_set_sem_locked(iocb) \
|
||||
set_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_clear_sem_locked(iocb) \
|
||||
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
#define ocfs2_iocb_is_sem_locked(iocb) \
|
||||
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
|
||||
|
||||
#define ocfs2_iocb_set_unaligned_aio(iocb) \
|
||||
set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
|
||||
|
|
|
@ -64,6 +64,40 @@ static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
|
|||
return count;
|
||||
}
|
||||
|
||||
void __mlog_printk(const u64 *mask, const char *func, int line,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
const char *level;
|
||||
const char *prefix = "";
|
||||
|
||||
if (!__mlog_test_u64(*mask, mlog_and_bits) ||
|
||||
__mlog_test_u64(*mask, mlog_not_bits))
|
||||
return;
|
||||
|
||||
if (*mask & ML_ERROR) {
|
||||
level = KERN_ERR;
|
||||
prefix = "ERROR: ";
|
||||
} else if (*mask & ML_NOTICE) {
|
||||
level = KERN_NOTICE;
|
||||
} else {
|
||||
level = KERN_INFO;
|
||||
}
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
printk("%s(%s,%u,%u):%s:%d %s%pV",
|
||||
level, current->comm, task_pid_nr(current),
|
||||
raw_smp_processor_id(), func, line, prefix, &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mlog_printk);
|
||||
|
||||
struct mlog_attribute {
|
||||
struct attribute attr;
|
||||
u64 mask;
|
||||
|
|
|
@ -162,38 +162,20 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
|
|||
|
||||
#endif
|
||||
|
||||
__printf(4, 5)
|
||||
void __mlog_printk(const u64 *m, const char *func, int line,
|
||||
const char *fmt, ...);
|
||||
|
||||
/*
|
||||
* smp_processor_id() "helpfully" screams when called outside preemptible
|
||||
* regions in current kernels. sles doesn't have the variants that don't
|
||||
* scream. just do this instead of trying to guess which we're building
|
||||
* against.. *sigh*.
|
||||
* Testing before the __mlog_printk call lets the compiler eliminate the
|
||||
* call completely when (m & ML_ALLOWED_BITS) is 0.
|
||||
*/
|
||||
#define __mlog_cpu_guess ({ \
|
||||
unsigned long _cpu = get_cpu(); \
|
||||
put_cpu(); \
|
||||
_cpu; \
|
||||
})
|
||||
|
||||
/* In the following two macros, the whitespace after the ',' just
|
||||
* before ##args is intentional. Otherwise, gcc 2.95 will eat the
|
||||
* previous token if args expands to nothing.
|
||||
*/
|
||||
#define __mlog_printk(level, fmt, args...) \
|
||||
printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
|
||||
task_pid_nr(current), __mlog_cpu_guess, \
|
||||
__PRETTY_FUNCTION__, __LINE__ , ##args)
|
||||
|
||||
#define mlog(mask, fmt, args...) do { \
|
||||
u64 __m = MLOG_MASK_PREFIX | (mask); \
|
||||
if ((__m & ML_ALLOWED_BITS) && \
|
||||
__mlog_test_u64(__m, mlog_and_bits) && \
|
||||
!__mlog_test_u64(__m, mlog_not_bits)) { \
|
||||
if (__m & ML_ERROR) \
|
||||
__mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
|
||||
else if (__m & ML_NOTICE) \
|
||||
__mlog_printk(KERN_NOTICE, fmt , ##args); \
|
||||
else __mlog_printk(KERN_INFO, fmt , ##args); \
|
||||
} \
|
||||
#define mlog(mask, fmt, ...) \
|
||||
do { \
|
||||
u64 _m = MLOG_MASK_PREFIX | (mask); \
|
||||
if (_m & ML_ALLOWED_BITS) \
|
||||
__mlog_printk(&_m, __func__, __LINE__, fmt, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define mlog_errno(st) ({ \
|
||||
|
|
|
@ -2204,7 +2204,7 @@ out:
|
|||
kfree(o2net_hand);
|
||||
kfree(o2net_keep_req);
|
||||
kfree(o2net_keep_resp);
|
||||
|
||||
o2net_debugfs_exit();
|
||||
o2quo_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -1617,7 +1617,7 @@ int __ocfs2_add_entry(handle_t *handle,
|
|||
struct ocfs2_dir_entry *de, *de1;
|
||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
|
||||
struct super_block *sb = dir->i_sb;
|
||||
int retval, status;
|
||||
int retval;
|
||||
unsigned int size = sb->s_blocksize;
|
||||
struct buffer_head *insert_bh = lookup->dl_leaf_bh;
|
||||
char *data_start = insert_bh->b_data;
|
||||
|
@ -1695,26 +1695,26 @@ int __ocfs2_add_entry(handle_t *handle,
|
|||
}
|
||||
|
||||
if (insert_bh == parent_fe_bh)
|
||||
status = ocfs2_journal_access_di(handle,
|
||||
retval = ocfs2_journal_access_di(handle,
|
||||
INODE_CACHE(dir),
|
||||
insert_bh,
|
||||
OCFS2_JOURNAL_ACCESS_WRITE);
|
||||
else {
|
||||
status = ocfs2_journal_access_db(handle,
|
||||
retval = ocfs2_journal_access_db(handle,
|
||||
INODE_CACHE(dir),
|
||||
insert_bh,
|
||||
OCFS2_JOURNAL_ACCESS_WRITE);
|
||||
|
||||
if (ocfs2_dir_indexed(dir)) {
|
||||
status = ocfs2_dx_dir_insert(dir,
|
||||
if (!retval && ocfs2_dir_indexed(dir))
|
||||
retval = ocfs2_dx_dir_insert(dir,
|
||||
handle,
|
||||
lookup);
|
||||
if (status) {
|
||||
mlog_errno(status);
|
||||
}
|
||||
|
||||
if (retval) {
|
||||
mlog_errno(retval);
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* By now the buffer is marked for journaling */
|
||||
offset += le16_to_cpu(de->rec_len);
|
||||
|
@ -3543,13 +3543,10 @@ static void dx_leaf_sort_swap(void *a, void *b, int size)
|
|||
{
|
||||
struct ocfs2_dx_entry *entry1 = a;
|
||||
struct ocfs2_dx_entry *entry2 = b;
|
||||
struct ocfs2_dx_entry tmp;
|
||||
|
||||
BUG_ON(size != sizeof(*entry1));
|
||||
|
||||
tmp = *entry1;
|
||||
*entry1 = *entry2;
|
||||
*entry2 = tmp;
|
||||
swap(*entry1, *entry2);
|
||||
}
|
||||
|
||||
static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
|
||||
|
|
|
@ -1014,7 +1014,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
|
|||
|
||||
/* will exit holding res->spinlock, but may drop in function */
|
||||
void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
|
||||
void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
|
||||
|
||||
/* will exit holding res->spinlock, but may drop in function */
|
||||
static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
|
||||
|
|
|
@ -2250,7 +2250,7 @@ out:
|
|||
static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
|
||||
struct iov_iter *from)
|
||||
{
|
||||
int direct_io, appending, rw_level, have_alloc_sem = 0;
|
||||
int direct_io, appending, rw_level;
|
||||
int can_do_direct, has_refcount = 0;
|
||||
ssize_t written = 0;
|
||||
ssize_t ret;
|
||||
|
@ -2279,16 +2279,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
|
|||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
relock:
|
||||
/* to match setattr's i_mutex -> rw_lock ordering */
|
||||
if (direct_io) {
|
||||
have_alloc_sem = 1;
|
||||
/* communicate with ocfs2_dio_end_io */
|
||||
ocfs2_iocb_set_sem_locked(iocb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Concurrent O_DIRECT writes are allowed with
|
||||
* mount_option "coherency=buffered".
|
||||
|
@ -2298,7 +2289,7 @@ relock:
|
|||
ret = ocfs2_rw_lock(inode, rw_level);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out_sems;
|
||||
goto out_mutex;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2347,7 +2338,6 @@ relock:
|
|||
if (direct_io && !can_do_direct) {
|
||||
ocfs2_rw_unlock(inode, rw_level);
|
||||
|
||||
have_alloc_sem = 0;
|
||||
rw_level = -1;
|
||||
|
||||
direct_io = 0;
|
||||
|
@ -2416,7 +2406,6 @@ no_sync:
|
|||
*/
|
||||
if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
|
||||
rw_level = -1;
|
||||
have_alloc_sem = 0;
|
||||
unaligned_dio = 0;
|
||||
}
|
||||
|
||||
|
@ -2429,10 +2418,7 @@ out:
|
|||
if (rw_level != -1)
|
||||
ocfs2_rw_unlock(inode, rw_level);
|
||||
|
||||
out_sems:
|
||||
if (have_alloc_sem)
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
out_mutex:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
if (written)
|
||||
|
@ -2473,7 +2459,7 @@ bail:
|
|||
static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
|
||||
struct iov_iter *to)
|
||||
{
|
||||
int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
|
||||
int ret = 0, rw_level = -1, lock_level = 0;
|
||||
struct file *filp = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(filp);
|
||||
|
||||
|
@ -2490,16 +2476,11 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
/*
|
||||
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
|
||||
* need locks to protect pending reads from racing with truncate.
|
||||
*/
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
have_alloc_sem = 1;
|
||||
ocfs2_iocb_set_sem_locked(iocb);
|
||||
|
||||
ret = ocfs2_rw_lock(inode, 0);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
|
@ -2535,13 +2516,9 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
|
|||
/* see ocfs2_file_write_iter */
|
||||
if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
|
||||
rw_level = -1;
|
||||
have_alloc_sem = 0;
|
||||
}
|
||||
|
||||
bail:
|
||||
if (have_alloc_sem)
|
||||
ocfs2_iocb_clear_sem_locked(iocb);
|
||||
|
||||
if (rw_level != -1)
|
||||
ocfs2_rw_unlock(inode, rw_level);
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ struct ocfs2_replay_map {
|
|||
unsigned char rm_replay_slots[0];
|
||||
};
|
||||
|
||||
void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
|
||||
static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
|
||||
{
|
||||
if (!osb->replay_map)
|
||||
return;
|
||||
|
@ -153,7 +153,7 @@ int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
|
||||
static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
|
||||
enum ocfs2_orphan_reco_type orphan_reco_type)
|
||||
{
|
||||
struct ocfs2_replay_map *replay_map = osb->replay_map;
|
||||
|
@ -173,7 +173,7 @@ void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
|
|||
replay_map->rm_state = REPLAY_DONE;
|
||||
}
|
||||
|
||||
void ocfs2_free_replay_slots(struct ocfs2_super *osb)
|
||||
static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
|
||||
{
|
||||
struct ocfs2_replay_map *replay_map = osb->replay_map;
|
||||
|
||||
|
@ -571,9 +571,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
|
|||
(unsigned long)bh,
|
||||
(unsigned long long)bh->b_blocknr);
|
||||
|
||||
/* We aren't guaranteed to have the superblock here - but if we
|
||||
* don't, it'll just crash. */
|
||||
ocfs2_error(bh->b_assoc_map->host->i_sb,
|
||||
ocfs2_error(bh->b_bdev->bd_super,
|
||||
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
|
||||
}
|
||||
|
||||
|
@ -775,7 +773,20 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
|
|||
trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
|
||||
|
||||
status = jbd2_journal_dirty_metadata(handle, bh);
|
||||
BUG_ON(status);
|
||||
if (status) {
|
||||
mlog_errno(status);
|
||||
if (!is_handle_aborted(handle)) {
|
||||
journal_t *journal = handle->h_transaction->t_journal;
|
||||
struct super_block *sb = bh->b_bdev->bd_super;
|
||||
|
||||
mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
|
||||
"Aborting transaction and journal.\n");
|
||||
handle->h_err = status;
|
||||
jbd2_journal_abort_handle(handle);
|
||||
jbd2_journal_abort(journal, status);
|
||||
ocfs2_abort(sb, "Journal already aborted.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
|
||||
|
@ -1884,7 +1895,7 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
|
|||
* hasn't happened. The node queues a scan and increments the
|
||||
* sequence number in the LVB.
|
||||
*/
|
||||
void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
|
||||
static void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
|
||||
{
|
||||
struct ocfs2_orphan_scan *os;
|
||||
int status, i;
|
||||
|
@ -1933,7 +1944,7 @@ out:
|
|||
}
|
||||
|
||||
/* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
|
||||
void ocfs2_orphan_scan_work(struct work_struct *work)
|
||||
static void ocfs2_orphan_scan_work(struct work_struct *work)
|
||||
{
|
||||
struct ocfs2_orphan_scan *os;
|
||||
struct ocfs2_super *osb;
|
||||
|
@ -2137,6 +2148,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
struct inode *inode = NULL;
|
||||
struct inode *iter;
|
||||
struct ocfs2_inode_info *oi;
|
||||
struct buffer_head *di_bh = NULL;
|
||||
struct ocfs2_dinode *di = NULL;
|
||||
|
||||
trace_ocfs2_recover_orphans(slot);
|
||||
|
||||
|
@ -2157,16 +2170,22 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
iter = oi->ip_next_orphan;
|
||||
oi->ip_next_orphan = NULL;
|
||||
|
||||
ret = ocfs2_rw_lock(inode, 1);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto next;
|
||||
}
|
||||
/*
|
||||
* We need to take and drop the inode lock to
|
||||
* force read inode from disk.
|
||||
*/
|
||||
ret = ocfs2_inode_lock(inode, NULL, 0);
|
||||
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto next;
|
||||
goto unlock_rw;
|
||||
}
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
|
||||
di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||
|
||||
if (inode->i_nlink == 0) {
|
||||
spin_lock(&oi->ip_lock);
|
||||
|
@ -2174,43 +2193,30 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
* ocfs2_delete_inode. */
|
||||
oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
|
||||
spin_unlock(&oi->ip_lock);
|
||||
} else if (orphan_reco_type == ORPHAN_NEED_TRUNCATE) {
|
||||
struct buffer_head *di_bh = NULL;
|
||||
|
||||
ret = ocfs2_rw_lock(inode, 1);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto next;
|
||||
}
|
||||
|
||||
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
||||
if (ret < 0) {
|
||||
ocfs2_rw_unlock(inode, 1);
|
||||
mlog_errno(ret);
|
||||
goto next;
|
||||
}
|
||||
|
||||
} else if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) &&
|
||||
(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
|
||||
ret = ocfs2_truncate_file(inode, di_bh,
|
||||
i_size_read(inode));
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
ocfs2_rw_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
if (ret < 0) {
|
||||
if (ret != -ENOSPC)
|
||||
mlog_errno(ret);
|
||||
goto next;
|
||||
goto unlock_inode;
|
||||
}
|
||||
|
||||
ret = ocfs2_del_inode_from_orphan(osb, inode, 0, 0);
|
||||
ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
|
||||
if (ret)
|
||||
mlog_errno(ret);
|
||||
|
||||
wake_up(&OCFS2_I(inode)->append_dio_wq);
|
||||
} /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */
|
||||
|
||||
unlock_inode:
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
unlock_rw:
|
||||
ocfs2_rw_unlock(inode, 1);
|
||||
next:
|
||||
iput(inode);
|
||||
|
||||
brelse(di_bh);
|
||||
di_bh = NULL;
|
||||
inode = iter;
|
||||
}
|
||||
|
||||
|
|
|
@ -1116,8 +1116,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|||
int inode1_is_ancestor, inode2_is_ancestor;
|
||||
struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
|
||||
struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
|
||||
struct buffer_head **tmpbh;
|
||||
struct inode *tmpinode;
|
||||
|
||||
trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
|
||||
(unsigned long long)oi2->ip_blkno);
|
||||
|
@ -1148,13 +1146,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
|
|||
(oi1->ip_blkno < oi2->ip_blkno &&
|
||||
inode2_is_ancestor == 0)) {
|
||||
/* switch id1 and id2 around */
|
||||
tmpbh = bh2;
|
||||
bh2 = bh1;
|
||||
bh1 = tmpbh;
|
||||
|
||||
tmpinode = inode2;
|
||||
inode2 = inode1;
|
||||
inode1 = tmpinode;
|
||||
swap(bh2, bh1);
|
||||
swap(inode2, inode1);
|
||||
}
|
||||
/* lock id2 */
|
||||
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
|
||||
|
@ -2670,30 +2663,22 @@ bail:
|
|||
}
|
||||
|
||||
int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
|
||||
struct inode *inode, int update_isize,
|
||||
loff_t end)
|
||||
struct inode *inode, struct buffer_head *di_bh,
|
||||
int update_isize, loff_t end)
|
||||
{
|
||||
struct inode *orphan_dir_inode = NULL;
|
||||
struct buffer_head *orphan_dir_bh = NULL;
|
||||
struct buffer_head *di_bh = NULL;
|
||||
struct ocfs2_dinode *di = NULL;
|
||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||
handle_t *handle = NULL;
|
||||
int status = 0;
|
||||
|
||||
status = ocfs2_inode_lock(inode, &di_bh, 1);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
}
|
||||
di = (struct ocfs2_dinode *) di_bh->b_data;
|
||||
|
||||
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
|
||||
ORPHAN_DIR_SYSTEM_INODE,
|
||||
le16_to_cpu(di->i_dio_orphaned_slot));
|
||||
if (!orphan_dir_inode) {
|
||||
status = -ENOENT;
|
||||
mlog_errno(status);
|
||||
goto bail_unlock_inode;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
mutex_lock(&orphan_dir_inode->i_mutex);
|
||||
|
@ -2702,7 +2687,7 @@ int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
|
|||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
iput(orphan_dir_inode);
|
||||
mlog_errno(status);
|
||||
goto bail_unlock_inode;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
handle = ocfs2_start_trans(osb,
|
||||
|
@ -2749,10 +2734,6 @@ bail_unlock_orphan:
|
|||
brelse(orphan_dir_bh);
|
||||
iput(orphan_dir_inode);
|
||||
|
||||
bail_unlock_inode:
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
|
||||
bail:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -42,8 +42,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
|
|||
int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
|
||||
struct inode *inode);
|
||||
int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb,
|
||||
struct inode *inode, int update_isize,
|
||||
loff_t end);
|
||||
struct inode *inode, struct buffer_head *di_bh,
|
||||
int update_isize, loff_t end);
|
||||
int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
|
||||
struct inode *new_inode,
|
||||
struct dentry *new_dentry);
|
||||
|
|
|
@ -717,6 +717,16 @@ static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb,
|
|||
return (u64)clusters << c_to_b_bits;
|
||||
}
|
||||
|
||||
static inline u32 ocfs2_clusters_for_blocks(struct super_block *sb,
|
||||
u64 blocks)
|
||||
{
|
||||
int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
|
||||
sb->s_blocksize_bits;
|
||||
|
||||
blocks += (1 << b_to_c_bits) - 1;
|
||||
return (u32)(blocks >> b_to_c_bits);
|
||||
}
|
||||
|
||||
static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb,
|
||||
u64 blocks)
|
||||
{
|
||||
|
|
|
@ -1406,11 +1406,9 @@ static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
|
|||
|
||||
static void swap_refcount_rec(void *a, void *b, int size)
|
||||
{
|
||||
struct ocfs2_refcount_rec *l = a, *r = b, tmp;
|
||||
struct ocfs2_refcount_rec *l = a, *r = b;
|
||||
|
||||
tmp = *l;
|
||||
*l = *r;
|
||||
*r = tmp;
|
||||
swap(*l, *r);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -7271,7 +7271,7 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
|
||||
static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
|
||||
void *fs_info)
|
||||
{
|
||||
const struct xattr *xattr;
|
||||
|
|
|
@ -126,6 +126,14 @@ static inline const char *get_task_state(struct task_struct *tsk)
|
|||
{
|
||||
unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT;
|
||||
|
||||
/*
|
||||
* Parked tasks do not run; they sit in __kthread_parkme().
|
||||
* Without this check, we would report them as running, which is
|
||||
* clearly wrong, so we report them as sleeping instead.
|
||||
*/
|
||||
if (tsk->state == TASK_PARKED)
|
||||
state = TASK_INTERRUPTIBLE;
|
||||
|
||||
BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1);
|
||||
|
||||
return task_state_array[fls(state)];
|
||||
|
|
|
@ -360,7 +360,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
|
|||
break;
|
||||
|
||||
error = add_to_page_cache_lru(page, mapping, index,
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL & mapping_gfp_mask(mapping));
|
||||
if (unlikely(error)) {
|
||||
page_cache_release(page);
|
||||
if (error == -EEXIST)
|
||||
|
|
|
@ -96,9 +96,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
@ -109,13 +109,13 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
|||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
|
||||
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
|
||||
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp,
|
||||
int full)
|
||||
{
|
||||
return pmdp_get_and_clear(mm, address, pmdp);
|
||||
return pmdp_huge_get_and_clear(mm, address, pmdp);
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
#endif
|
||||
|
@ -152,8 +152,8 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
|
|||
pte_t *ptep);
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
|
||||
extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
|
||||
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
|
||||
extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
#endif
|
||||
|
@ -189,6 +189,22 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
|
|||
unsigned long address, pmd_t *pmdp);
|
||||
#endif
|
||||
|
||||
#ifndef pmdp_collapse_flush
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
#else
|
||||
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
BUILD_BUG();
|
||||
return *pmdp;
|
||||
}
|
||||
#define pmdp_collapse_flush pmdp_collapse_flush
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
|
||||
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
pgtable_t pgtable);
|
||||
|
|
|
@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
|
|||
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
|
||||
* sufficient vmalloc space.
|
||||
*/
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
|
||||
#define HASHDIST_DEFAULT 1
|
||||
#else
|
||||
#define HASHDIST_DEFAULT 0
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
|
||||
extern int hashdist; /* Distribute hashes across NUMA nodes? */
|
||||
#else
|
||||
#define hashdist (0)
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* _LINUX_BOOTMEM_H */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue