Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - fsnotify updates - ocfs2 updates - all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (127 commits) console: don't prefer first registered if DT specifies stdout-path cred: simpler, 1D supplementary groups CREDITS: update Pavel's information, add GPG key, remove snail mail address mailmap: add Johan Hovold .gitattributes: set git diff driver for C source code files uprobes: remove function declarations from arch/{mips,s390} spelling.txt: "modeled" is spelt correctly nmi_backtrace: generate one-line reports for idle cpus arch/tile: adopt the new nmi_backtrace framework nmi_backtrace: do a local dump_stack() instead of a self-NMI nmi_backtrace: add more trigger_*_cpu_backtrace() methods min/max: remove sparse warnings when they're nested Documentation/filesystems/proc.txt: add more description for maps/smaps mm, proc: fix region lost in /proc/self/smaps proc: fix timerslack_ns CAP_SYS_NICE check when adjusting self proc: add LSM hook checks to /proc/<tid>/timerslack_ns proc: relax /proc/<tid>/timerslack_ns capability requirements meminfo: break apart a very long seq_printf with #ifdefs seq/proc: modify seq_put_decimal_[u]ll to take a const char *, not char proc: faster /proc/*/status ...
This commit is contained in:
commit
b66484cd74
|
@ -0,0 +1,2 @@
|
|||
*.c diff=cpp
|
||||
*.h diff=cpp
|
2
.mailmap
2
.mailmap
|
@ -75,6 +75,8 @@ Jean Tourrilhes <jt@hpl.hp.com>
|
|||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
Jens Axboe <axboe@suse.de>
|
||||
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
|
||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
<josh@joshtriplett.org> <josh@freedesktop.org>
|
||||
|
|
8
CREDITS
8
CREDITS
|
@ -2296,11 +2296,11 @@ D: Initial implementation of VC's, pty's and select()
|
|||
|
||||
N: Pavel Machek
|
||||
E: pavel@ucw.cz
|
||||
D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd
|
||||
P: 4096R/92DFCE96 4FA7 9EEF FCD4 C44F C585 B8C7 C060 2241 92DF CE96
|
||||
D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd,
|
||||
D: sun4/330 port, capabilities for elf, speedup for rm on ext2, USB,
|
||||
D: work on suspend-to-ram/disk, killing duplicates from ioctl32
|
||||
S: Volkova 1131
|
||||
S: 198 00 Praha 9
|
||||
D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
|
||||
D: Altera SoCFPGA and Nokia N900 support.
|
||||
S: Czech Republic
|
||||
|
||||
N: Paul Mackerras
|
||||
|
|
|
@ -515,6 +515,18 @@ be vanished or the reverse -- new added.
|
|||
This file is only present if the CONFIG_MMU kernel configuration option is
|
||||
enabled.
|
||||
|
||||
Note: reading /proc/PID/maps or /proc/PID/smaps is inherently racy (consistent
|
||||
output can be achieved only in the single read call).
|
||||
This typically manifests when doing partial reads of these files while the
|
||||
memory map is being modified. Despite the races, we do provide the following
|
||||
guarantees:
|
||||
|
||||
1) The mapped addresses never go backwards, which implies no two
|
||||
regions will ever overlap.
|
||||
2) If there is something at a given vaddr during the entirety of the
|
||||
life of the smaps/maps walk, there will be some output for it.
|
||||
|
||||
|
||||
The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG
|
||||
bits on both physical and virtual pages associated with a process, and the
|
||||
soft-dirty bit on pte (see Documentation/vm/soft-dirty.txt for details).
|
||||
|
|
|
@ -15,7 +15,6 @@ config ALPHA
|
|||
select GENERIC_IRQ_SHOW
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select AUDIT_ARCH
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
|
|
|
@ -22,6 +22,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -89,6 +89,7 @@ SECTIONS
|
|||
_text = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.fixup)
|
||||
|
|
|
@ -2,7 +2,6 @@ config ARM
|
|||
bool
|
||||
default y
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
|
|
@ -36,8 +36,9 @@ extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void arch_trigger_all_cpu_backtrace(bool);
|
||||
#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
|
||||
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
#endif
|
||||
|
||||
static inline int nr_legacy_irqs(void)
|
||||
|
|
|
@ -748,19 +748,10 @@ core_initcall(register_cpufreq_notifier);
|
|||
|
||||
static void raise_nmi(cpumask_t *mask)
|
||||
{
|
||||
/*
|
||||
* Generate the backtrace directly if we are running in a calling
|
||||
* context that is not preemptible by the backtrace IPI. Note
|
||||
* that nmi_cpu_backtrace() automatically removes the current cpu
|
||||
* from mask.
|
||||
*/
|
||||
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
|
||||
nmi_cpu_backtrace(NULL);
|
||||
|
||||
smp_cross_call(mask, IPI_CPU_BACKTRACE);
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool include_self)
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ SECTIONS
|
|||
IRQENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -111,6 +111,7 @@ SECTIONS
|
|||
SOFTIRQENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
HYPERVISOR_TEXT
|
||||
KPROBES_TEXT
|
||||
|
|
|
@ -8,9 +8,9 @@ config ARM64
|
|||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
|
|
@ -122,6 +122,7 @@ SECTIONS
|
|||
ENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
HYPERVISOR_TEXT
|
||||
|
|
|
@ -52,6 +52,7 @@ SECTIONS
|
|||
KPROBES_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -33,6 +33,7 @@ SECTIONS
|
|||
#ifndef CONFIG_SCHEDULE_L1
|
||||
SCHED_TEXT
|
||||
#endif
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
|
|
|
@ -70,6 +70,7 @@ SECTIONS
|
|||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
|
|
|
@ -43,6 +43,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.text.__*)
|
||||
|
|
|
@ -63,6 +63,7 @@ SECTIONS
|
|||
*(.text..tlbmiss)
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
#ifdef CONFIG_DEBUG_INFO
|
||||
INIT_TEXT
|
||||
|
|
|
@ -29,6 +29,7 @@ SECTIONS
|
|||
_stext = . ;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
#if defined(CONFIG_ROMKERNEL)
|
||||
*(.int_redirect)
|
||||
|
|
|
@ -50,6 +50,7 @@ SECTIONS
|
|||
_text = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.fixup)
|
||||
|
|
|
@ -269,6 +269,22 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long c, old, dec;
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return dec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically add I to V and return TRUE if the resulting value is
|
||||
* negative.
|
||||
|
|
|
@ -46,6 +46,7 @@ SECTIONS {
|
|||
__end_ivt_text = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.gnu.linkonce.t*)
|
||||
|
|
|
@ -31,6 +31,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -45,6 +45,7 @@ SECTIONS {
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
. = ALIGN(16);
|
||||
|
|
|
@ -16,6 +16,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -16,6 +16,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
@ -21,6 +21,7 @@ SECTIONS
|
|||
.text : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -33,6 +33,7 @@ SECTIONS {
|
|||
EXIT_TEXT
|
||||
EXIT_CALL
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -30,7 +30,6 @@ config MIPS
|
|||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
|
||||
select RTC_LIB if !MACH_LOONGSON64
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select GENERIC_IRQ_PROBE
|
||||
|
|
|
@ -51,7 +51,8 @@ extern int cp0_fdc_irq;
|
|||
|
||||
extern int get_c0_fdc_int(void);
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
|
||||
#endif /* _ASM_IRQ_H */
|
||||
|
|
|
@ -673,8 +673,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -42,16 +42,4 @@ struct arch_uprobe_task {
|
|||
unsigned long saved_trap_nr;
|
||||
};
|
||||
|
||||
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
|
||||
struct mm_struct *mm, unsigned long addr);
|
||||
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
|
||||
extern int arch_uprobe_exception_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup,
|
||||
struct pt_regs *regs);
|
||||
extern unsigned long arch_uretprobe_hijack_return_addr(
|
||||
unsigned long trampoline_vaddr, struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASM_UPROBES_H */
|
||||
|
|
|
@ -569,9 +569,16 @@ static void arch_dump_stack(void *info)
|
|||
dump_stack();
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool include_self)
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
smp_call_function(arch_dump_stack, NULL, 1);
|
||||
long this_cpu = get_cpu();
|
||||
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
dump_stack();
|
||||
|
||||
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
|
|
@ -55,6 +55,7 @@ SECTIONS
|
|||
.text : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -30,6 +30,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.fixup)
|
||||
|
|
|
@ -37,6 +37,7 @@ SECTIONS
|
|||
.text : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
|
|
|
@ -47,6 +47,7 @@ SECTIONS
|
|||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -16,7 +16,6 @@ config PARISC
|
|||
select BUILDTIME_EXTABLE_SORT
|
||||
select HAVE_PERF_EVENTS
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PCI_IOMAP
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
|
|
|
@ -69,6 +69,7 @@ SECTIONS
|
|||
.text ALIGN(PAGE_SIZE) : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -108,7 +108,6 @@ config PPC
|
|||
select HAVE_DEBUG_KMEMLEAK
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select GENERIC_ATOMIC64 if PPC32
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
|
|
|
@ -41,6 +41,9 @@ u64 memory_hotplug_max(void);
|
|||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif /* CONFIG_NEED_MULTIPLE_NODES */
|
||||
#ifdef CONFIG_FA_DUMP
|
||||
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_MMZONE_H_ */
|
||||
|
|
|
@ -333,6 +333,11 @@ int __init fadump_reserve_mem(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
unsigned long __init arch_reserved_kernel_pages(void)
|
||||
{
|
||||
return memblock_reserved_size() / PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Look for fadump= cmdline option. */
|
||||
static int __init early_fadump_param(char *p)
|
||||
{
|
||||
|
|
|
@ -99,6 +99,7 @@ SECTIONS
|
|||
/* careful! __ftr_alt_* sections need to be close to .text */
|
||||
*(.text .fixup __ftr_alt_* .ref.text)
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -67,10 +67,10 @@ config DEBUG_RODATA
|
|||
|
||||
config S390
|
||||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
|
|
|
@ -29,14 +29,4 @@ struct arch_uprobe {
|
|||
struct arch_uprobe_task {
|
||||
};
|
||||
|
||||
int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
|
||||
unsigned long addr);
|
||||
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
|
||||
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
|
||||
void *data);
|
||||
void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
|
||||
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
|
||||
struct pt_regs *regs);
|
||||
#endif /* _ASM_UPROBES_H */
|
||||
|
|
|
@ -189,7 +189,7 @@ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info
|
|||
kgid_t kgid;
|
||||
|
||||
for (i = 0; i < group_info->ngroups; i++) {
|
||||
kgid = GROUP_AT(group_info, i);
|
||||
kgid = group_info->gid[i];
|
||||
group = (u16)from_kgid_munged(user_ns, kgid);
|
||||
if (put_user(group, grouplist+i))
|
||||
return -EFAULT;
|
||||
|
@ -213,7 +213,7 @@ static int groups16_from_user(struct group_info *group_info, u16 __user *groupli
|
|||
if (!gid_valid(kgid))
|
||||
return -EINVAL;
|
||||
|
||||
GROUP_AT(group_info, i) = kgid;
|
||||
group_info->gid[i] = kgid;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -35,6 +35,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -40,6 +40,7 @@ SECTIONS
|
|||
_text = .; /* Text and read-only data */
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.text.*)
|
||||
|
|
|
@ -36,6 +36,7 @@ SECTIONS
|
|||
TEXT_TEXT
|
||||
EXTRA_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -22,7 +22,6 @@ config SPARC
|
|||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_EXIT_THREAD
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_M48T59
|
||||
select RTC_SYSTOHC
|
||||
|
|
|
@ -86,8 +86,9 @@ static inline unsigned long get_softint(void)
|
|||
return retval;
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
|
||||
extern void *hardirq_stack[NR_CPUS];
|
||||
extern void *softirq_stack[NR_CPUS];
|
||||
|
|
|
@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
|
|||
}
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool include_self)
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
struct thread_info *tp = current_thread_info();
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
|
@ -255,15 +255,15 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
|
|||
|
||||
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
||||
|
||||
if (include_self)
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
__global_reg_self(tp, regs, this_cpu);
|
||||
|
||||
smp_fetch_global_regs();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct global_reg_snapshot *gp;
|
||||
|
||||
if (!include_self && cpu == this_cpu)
|
||||
if (exclude_self && cpu == this_cpu)
|
||||
continue;
|
||||
|
||||
gp = &global_cpu_snapshot[cpu].reg;
|
||||
|
@ -300,7 +300,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
|
|||
|
||||
static void sysrq_handle_globreg(int key)
|
||||
{
|
||||
arch_trigger_all_cpu_backtrace(true);
|
||||
trigger_all_cpu_backtrace();
|
||||
}
|
||||
|
||||
static struct sysrq_key_op sparc_globalreg_op = {
|
||||
|
|
|
@ -49,6 +49,7 @@ SECTIONS
|
|||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
config TILE
|
||||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
|
|
|
@ -79,8 +79,9 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type);
|
|||
void setup_irq_regs(void);
|
||||
|
||||
#ifdef __tilegx__
|
||||
void arch_trigger_all_cpu_backtrace(bool self);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_IRQ_H */
|
||||
|
|
|
@ -50,7 +50,7 @@ STD_ENTRY(smp_nap)
|
|||
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
|
||||
* as a result return to the function that called _cpu_idle().
|
||||
*/
|
||||
STD_ENTRY(_cpu_idle)
|
||||
STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
|
||||
movei r1, 1
|
||||
IRQ_ENABLE_LOAD(r2, r3)
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pmc.h>
|
||||
|
@ -29,9 +28,7 @@ int handle_perf_interrupt(struct pt_regs *regs, int fault)
|
|||
if (!perf_irq)
|
||||
panic("Unexpected PERF_COUNT interrupt %d\n", fault);
|
||||
|
||||
nmi_enter();
|
||||
retval = perf_irq(regs, fault);
|
||||
nmi_exit();
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/tracehook.h>
|
||||
|
@ -594,66 +594,18 @@ void show_regs(struct pt_regs *regs)
|
|||
tile_show_stack(&kbt);
|
||||
}
|
||||
|
||||
/* To ensure stack dump on tiles occurs one by one. */
|
||||
static DEFINE_SPINLOCK(backtrace_lock);
|
||||
/* To ensure no backtrace occurs before all of the stack dump are done. */
|
||||
static atomic_t backtrace_cpus;
|
||||
/* The cpu mask to avoid reentrance. */
|
||||
static struct cpumask backtrace_mask;
|
||||
|
||||
void do_nmi_dump_stack(struct pt_regs *regs)
|
||||
{
|
||||
int is_idle = is_idle_task(current) && !in_interrupt();
|
||||
int cpu;
|
||||
|
||||
nmi_enter();
|
||||
cpu = smp_processor_id();
|
||||
if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
|
||||
goto done;
|
||||
|
||||
spin_lock(&backtrace_lock);
|
||||
if (is_idle)
|
||||
pr_info("CPU: %d idle\n", cpu);
|
||||
else
|
||||
show_regs(regs);
|
||||
spin_unlock(&backtrace_lock);
|
||||
atomic_dec(&backtrace_cpus);
|
||||
done:
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
void arch_trigger_all_cpu_backtrace(bool self)
|
||||
void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
|
||||
{
|
||||
struct cpumask mask;
|
||||
HV_Coord tile;
|
||||
unsigned int timeout;
|
||||
int cpu;
|
||||
int ongoing;
|
||||
HV_NMI_Info info[NR_CPUS];
|
||||
|
||||
ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
|
||||
if (ongoing != 0) {
|
||||
pr_err("Trying to do all-cpu backtrace.\n");
|
||||
pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
|
||||
ongoing);
|
||||
if (self) {
|
||||
pr_err("Reporting the stack on this cpu only.\n");
|
||||
dump_stack();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
cpumask_copy(&backtrace_mask, &mask);
|
||||
|
||||
/* Backtrace for myself first. */
|
||||
if (self)
|
||||
dump_stack();
|
||||
|
||||
/* Tentatively dump stack on remote tiles via NMI. */
|
||||
timeout = 100;
|
||||
cpumask_copy(&mask, in_mask);
|
||||
while (!cpumask_empty(&mask) && timeout) {
|
||||
for_each_cpu(cpu, &mask) {
|
||||
tile.x = cpu_x(cpu);
|
||||
|
@ -664,12 +616,17 @@ void arch_trigger_all_cpu_backtrace(bool self)
|
|||
}
|
||||
|
||||
mdelay(10);
|
||||
touch_softlockup_watchdog();
|
||||
timeout--;
|
||||
}
|
||||
|
||||
/* Warn about cpus stuck in ICS and decrement their counts here. */
|
||||
/* Warn about cpus stuck in ICS. */
|
||||
if (!cpumask_empty(&mask)) {
|
||||
for_each_cpu(cpu, &mask) {
|
||||
|
||||
/* Clear the bit as if nmi_cpu_backtrace() ran. */
|
||||
cpumask_clear_cpu(cpu, in_mask);
|
||||
|
||||
switch (info[cpu].result) {
|
||||
case HV_NMI_RESULT_FAIL_ICS:
|
||||
pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
|
||||
|
@ -680,16 +637,20 @@ void arch_trigger_all_cpu_backtrace(bool self)
|
|||
cpu);
|
||||
break;
|
||||
case HV_ENOSYS:
|
||||
pr_warn("Hypervisor too old to allow remote stack dumps.\n");
|
||||
goto skip_for_each;
|
||||
WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
|
||||
break;
|
||||
default: /* should not happen */
|
||||
pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
|
||||
cpu, info[cpu].result, info[cpu].pc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
skip_for_each:
|
||||
atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self,
|
||||
nmi_raise_cpu_backtrace);
|
||||
}
|
||||
#endif /* __tilegx_ */
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include <linux/reboot.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <asm/stack.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -392,14 +394,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
|||
|
||||
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
|
||||
{
|
||||
nmi_enter();
|
||||
switch (reason) {
|
||||
#ifdef arch_trigger_cpumask_backtrace
|
||||
case TILE_NMI_DUMP_STACK:
|
||||
do_nmi_dump_stack(regs);
|
||||
nmi_cpu_backtrace(regs);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
panic("Unexpected do_nmi type %ld", reason);
|
||||
return;
|
||||
}
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
/* Deprecated function currently only used here. */
|
||||
|
|
|
@ -42,6 +42,7 @@ SECTIONS
|
|||
.text : AT (ADDR(.text) - LOAD_OFFSET) {
|
||||
HEAD_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
|
|
|
@ -68,6 +68,7 @@ SECTIONS
|
|||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
*(.stub .text.* .gnu.linkonce.t.*)
|
||||
|
|
|
@ -28,6 +28,7 @@ SECTIONS
|
|||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
*(.fixup)
|
||||
/* .gnu.warning sections are handled specially by elf32.em. */
|
||||
|
|
|
@ -37,6 +37,7 @@ SECTIONS
|
|||
.text : { /* Real text segment */
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
|
||||
*(.fixup)
|
||||
|
|
|
@ -23,11 +23,11 @@ config X86
|
|||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE if X86_64
|
||||
select ARCH_HAS_KCOV if X86_64
|
||||
select ARCH_HAS_PMEM_API if X86_64
|
||||
select ARCH_HAS_MMIO_FLUSH
|
||||
|
|
|
@ -50,8 +50,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
|
|||
extern void init_ISA_irqs(void);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
void arch_trigger_all_cpu_backtrace(bool);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IRQ_H */
|
||||
|
|
|
@ -4,6 +4,10 @@
|
|||
#include <asm/processor-flags.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
|
||||
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
|
||||
|
||||
/*
|
||||
* Interrupt control:
|
||||
*/
|
||||
|
@ -44,12 +48,12 @@ static inline void native_irq_enable(void)
|
|||
asm volatile("sti": : :"memory");
|
||||
}
|
||||
|
||||
static inline void native_safe_halt(void)
|
||||
static inline __cpuidle void native_safe_halt(void)
|
||||
{
|
||||
asm volatile("sti; hlt": : :"memory");
|
||||
}
|
||||
|
||||
static inline void native_halt(void)
|
||||
static inline __cpuidle void native_halt(void)
|
||||
{
|
||||
asm volatile("hlt": : :"memory");
|
||||
}
|
||||
|
@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void)
|
|||
* Used in the idle loop; sti takes one instruction cycle
|
||||
* to complete:
|
||||
*/
|
||||
static inline void arch_safe_halt(void)
|
||||
static inline __cpuidle void arch_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
|
@ -95,7 +99,7 @@ static inline void arch_safe_halt(void)
|
|||
* Used when interrupts are already enabled or to
|
||||
* shutdown the processor:
|
||||
*/
|
||||
static inline void halt(void)
|
||||
static inline __cpuidle void halt(void)
|
||||
{
|
||||
native_halt();
|
||||
}
|
||||
|
|
|
@ -439,8 +439,6 @@ extern pgprot_t pgprot_writethrough(pgprot_t prot);
|
|||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot);
|
||||
|
||||
/* Install a pte for a particular vaddr in kernel space. */
|
||||
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
|
||||
|
|
|
@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
|
||||
|
||||
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
|
||||
void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct cstate_entry *percpu_entry;
|
||||
|
|
|
@ -26,32 +26,32 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef arch_trigger_all_cpu_backtrace
|
||||
#ifdef arch_trigger_cpumask_backtrace
|
||||
static void nmi_raise_cpu_backtrace(cpumask_t *mask)
|
||||
{
|
||||
apic->send_IPI_mask(mask, NMI_VECTOR);
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool include_self)
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
nmi_trigger_all_cpu_backtrace(include_self, nmi_raise_cpu_backtrace);
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self,
|
||||
nmi_raise_cpu_backtrace);
|
||||
}
|
||||
|
||||
static int
|
||||
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
|
||||
static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
|
||||
{
|
||||
if (nmi_cpu_backtrace(regs))
|
||||
return NMI_HANDLED;
|
||||
|
||||
return NMI_DONE;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
|
||||
NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler);
|
||||
|
||||
static int __init register_trigger_all_cpu_backtrace(void)
|
||||
static int __init register_nmi_cpu_backtrace_handler(void)
|
||||
{
|
||||
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
|
||||
register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler,
|
||||
0, "arch_bt");
|
||||
return 0;
|
||||
}
|
||||
early_initcall(register_trigger_all_cpu_backtrace);
|
||||
early_initcall(register_nmi_cpu_backtrace_handler);
|
||||
#endif
|
||||
|
|
|
@ -302,7 +302,7 @@ void arch_cpu_idle(void)
|
|||
/*
|
||||
* We use this if we don't have any better idle routine..
|
||||
*/
|
||||
void default_idle(void)
|
||||
void __cpuidle default_idle(void)
|
||||
{
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
safe_halt();
|
||||
|
@ -417,7 +417,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|||
* with interrupts enabled and no flags, which is backwards compatible with the
|
||||
* original MWAIT implementation.
|
||||
*/
|
||||
static void mwait_idle(void)
|
||||
static __cpuidle void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
|
|
|
@ -97,6 +97,7 @@ SECTIONS
|
|||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
ENTRY_TEXT
|
||||
|
|
|
@ -89,6 +89,9 @@ SECTIONS
|
|||
VMLINUX_SYMBOL(__sched_text_start) = .;
|
||||
*(.sched.literal .sched.text)
|
||||
VMLINUX_SYMBOL(__sched_text_end) = .;
|
||||
VMLINUX_SYMBOL(__cpuidle_text_start) = .;
|
||||
*(.cpuidle.literal .cpuidle.text)
|
||||
VMLINUX_SYMBOL(__cpuidle_text_end) = .;
|
||||
VMLINUX_SYMBOL(__lock_text_start) = .;
|
||||
*(.spinlock.literal .spinlock.text)
|
||||
VMLINUX_SYMBOL(__lock_text_end) = .;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/sched.h> /* need_resched() */
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <acpi/processor.h>
|
||||
|
||||
/*
|
||||
|
@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
|
|||
* Callers should disable interrupts before the call and enable
|
||||
* interrupts after return.
|
||||
*/
|
||||
static void acpi_safe_halt(void)
|
||||
static void __cpuidle acpi_safe_halt(void)
|
||||
{
|
||||
if (!tif_need_resched()) {
|
||||
safe_halt();
|
||||
|
@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void)
|
|||
*
|
||||
* Caller disables interrupt before call and enables interrupt after return.
|
||||
*/
|
||||
static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||
static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||
{
|
||||
if (cx->entry_method == ACPI_CSTATE_FFH) {
|
||||
/* Call into architectural FFH based C-state */
|
||||
|
|
|
@ -361,8 +361,11 @@ store_mem_state(struct device *dev,
|
|||
err:
|
||||
unlock_device_hotplug();
|
||||
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
|
@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
|
||||
static int poll_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
static int __cpuidle poll_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
local_irq_enable();
|
||||
if (!current_set_polling_and_test()) {
|
||||
|
|
|
@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = {
|
|||
*
|
||||
* Must be called under local_irq_disable().
|
||||
*/
|
||||
static int intel_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
static __cpuidle int intel_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
unsigned long ecx = 1; /* break on interrupt flag */
|
||||
struct cpuidle_state *state = &drv->states[index];
|
||||
|
|
|
@ -2077,6 +2077,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
|
|||
name = of_get_property(of_aliases, "stdout", NULL);
|
||||
if (name)
|
||||
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
|
||||
if (of_stdout)
|
||||
console_set_by_of();
|
||||
}
|
||||
|
||||
if (!of_aliases)
|
||||
|
|
|
@ -2220,7 +2220,7 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
|
|||
task_lock(current);
|
||||
if (pud->pud_ngroups > current_ngroups)
|
||||
pud->pud_ngroups = current_ngroups;
|
||||
memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
|
||||
memcpy(pud->pud_groups, current_cred()->group_info->gid,
|
||||
pud->pud_ngroups * sizeof(__u32));
|
||||
task_unlock(current);
|
||||
|
||||
|
|
|
@ -200,6 +200,9 @@ config HUGETLBFS
|
|||
config HUGETLB_PAGE
|
||||
def_bool HUGETLBFS
|
||||
|
||||
config ARCH_HAS_GIGANTIC_PAGE
|
||||
bool
|
||||
|
||||
source "fs/configfs/Kconfig"
|
||||
source "fs/efivarfs/Kconfig"
|
||||
|
||||
|
|
2
fs/dax.c
2
fs/dax.c
|
@ -1036,7 +1036,7 @@ int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
if (!write && !buffer_mapped(&bh)) {
|
||||
spinlock_t *ptl;
|
||||
pmd_t entry;
|
||||
struct page *zero_page = get_huge_zero_page();
|
||||
struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm);
|
||||
|
||||
if (unlikely(!zero_page)) {
|
||||
dax_pmd_dbg(&bh, address, "no zero page");
|
||||
|
|
|
@ -234,6 +234,7 @@ const struct file_operations ext2_file_operations = {
|
|||
.open = dquot_file_open,
|
||||
.release = ext2_release_file,
|
||||
.fsync = ext2_fsync,
|
||||
.get_unmapped_area = thp_get_unmapped_area,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
};
|
||||
|
|
|
@ -697,6 +697,7 @@ const struct file_operations ext4_file_operations = {
|
|||
.open = ext4_file_open,
|
||||
.release = ext4_release_file,
|
||||
.fsync = ext4_sync_file,
|
||||
.get_unmapped_area = thp_get_unmapped_area,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fallocate = ext4_fallocate,
|
||||
|
|
|
@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|||
|
||||
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
||||
struct page *page = pvec.pages[i];
|
||||
bool rsv_on_error;
|
||||
u32 hash;
|
||||
|
||||
/*
|
||||
|
@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|||
* cache (remove_huge_page) BEFORE removing the
|
||||
* region/reserve map (hugetlb_unreserve_pages). In
|
||||
* rare out of memory conditions, removal of the
|
||||
* region/reserve map could fail. Before free'ing
|
||||
* the page, note PagePrivate which is used in case
|
||||
* of error.
|
||||
* region/reserve map could fail. Correspondingly,
|
||||
* the subpool and global reserve usage count can need
|
||||
* to be adjusted.
|
||||
*/
|
||||
rsv_on_error = !PagePrivate(page);
|
||||
VM_BUG_ON(PagePrivate(page));
|
||||
remove_huge_page(page);
|
||||
freed++;
|
||||
if (!truncate_op) {
|
||||
if (unlikely(hugetlb_unreserve_pages(inode,
|
||||
next, next + 1, 1)))
|
||||
hugetlb_fix_reserve_counts(inode,
|
||||
rsv_on_error);
|
||||
hugetlb_fix_reserve_counts(inode);
|
||||
}
|
||||
|
||||
unlock_page(page);
|
||||
|
|
|
@ -679,11 +679,11 @@ unsigned int nfs_page_length(struct page *page)
|
|||
loff_t i_size = i_size_read(page_file_mapping(page)->host);
|
||||
|
||||
if (i_size > 0) {
|
||||
pgoff_t page_index = page_file_index(page);
|
||||
pgoff_t index = page_index(page);
|
||||
pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
|
||||
if (page_index < end_index)
|
||||
if (index < end_index)
|
||||
return PAGE_SIZE;
|
||||
if (page_index == end_index)
|
||||
if (index == end_index)
|
||||
return ((i_size - 1) & ~PAGE_MASK) + 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
|
|||
* update_nfs_request below if the region is not locked. */
|
||||
req->wb_page = page;
|
||||
if (page) {
|
||||
req->wb_index = page_file_index(page);
|
||||
req->wb_index = page_index(page);
|
||||
get_page(page);
|
||||
}
|
||||
req->wb_offset = offset;
|
||||
|
|
|
@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
|
|||
int error;
|
||||
|
||||
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
|
||||
page, PAGE_SIZE, page_file_index(page));
|
||||
page, PAGE_SIZE, page_index(page));
|
||||
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
|
||||
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
|
|||
spin_lock(&inode->i_lock);
|
||||
i_size = i_size_read(inode);
|
||||
end_index = (i_size - 1) >> PAGE_SHIFT;
|
||||
if (i_size > 0 && page_file_index(page) < end_index)
|
||||
if (i_size > 0 && page_index(page) < end_index)
|
||||
goto out;
|
||||
end = page_file_offset(page) + ((loff_t)offset+count);
|
||||
if (i_size >= end)
|
||||
|
@ -603,7 +603,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
|
|||
{
|
||||
int ret;
|
||||
|
||||
nfs_pageio_cond_complete(pgio, page_file_index(page));
|
||||
nfs_pageio_cond_complete(pgio, page_index(page));
|
||||
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
|
||||
launder);
|
||||
if (ret == -EAGAIN) {
|
||||
|
|
|
@ -55,10 +55,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
|
|||
goto oom;
|
||||
|
||||
for (i = 0; i < rqgi->ngroups; i++) {
|
||||
if (gid_eq(GLOBAL_ROOT_GID, GROUP_AT(rqgi, i)))
|
||||
GROUP_AT(gi, i) = exp->ex_anon_gid;
|
||||
if (gid_eq(GLOBAL_ROOT_GID, rqgi->gid[i]))
|
||||
gi->gid[i] = exp->ex_anon_gid;
|
||||
else
|
||||
GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
|
||||
gi->gid[i] = rqgi->gid[i];
|
||||
}
|
||||
} else {
|
||||
gi = get_group_info(rqgi);
|
||||
|
|
|
@ -1903,7 +1903,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
|
|||
if (g1->ngroups != g2->ngroups)
|
||||
return false;
|
||||
for (i=0; i<g1->ngroups; i++)
|
||||
if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
|
||||
if (!gid_eq(g1->gid[i], g2->gid[i]))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -49,12 +49,12 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
|
|||
* enough to fit in "count". Return an error pointer if the count
|
||||
* is not large enough.
|
||||
*
|
||||
* Called with the group->notification_mutex held.
|
||||
* Called with the group->notification_lock held.
|
||||
*/
|
||||
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
||||
size_t count)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&group->notification_mutex));
|
||||
assert_spin_locked(&group->notification_lock);
|
||||
|
||||
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
|
||||
|
||||
|
@ -64,7 +64,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
|||
if (FAN_EVENT_METADATA_LEN > count)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* held the notification_mutex the whole time, so this is the
|
||||
/* held the notification_lock the whole time, so this is the
|
||||
* same event we peeked above */
|
||||
return fsnotify_remove_first_event(group);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static struct fanotify_perm_event_info *dequeue_event(
|
|||
{
|
||||
struct fanotify_perm_event_info *event, *return_e = NULL;
|
||||
|
||||
spin_lock(&group->fanotify_data.access_lock);
|
||||
spin_lock(&group->notification_lock);
|
||||
list_for_each_entry(event, &group->fanotify_data.access_list,
|
||||
fae.fse.list) {
|
||||
if (event->fd != fd)
|
||||
|
@ -157,7 +157,7 @@ static struct fanotify_perm_event_info *dequeue_event(
|
|||
return_e = event;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&group->fanotify_data.access_lock);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
pr_debug("%s: found return_re=%p\n", __func__, return_e);
|
||||
|
||||
|
@ -244,10 +244,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
|
|||
int ret = 0;
|
||||
|
||||
poll_wait(file, &group->notification_waitq, wait);
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
if (!fsnotify_notify_queue_is_empty(group))
|
||||
ret = POLLIN | POLLRDNORM;
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -268,9 +268,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
|
|||
|
||||
add_wait_queue(&group->notification_waitq, &wait);
|
||||
while (1) {
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
kevent = get_one_event(group, count);
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
if (IS_ERR(kevent)) {
|
||||
ret = PTR_ERR(kevent);
|
||||
|
@ -309,10 +309,10 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
|
|||
wake_up(&group->fanotify_data.access_waitq);
|
||||
break;
|
||||
}
|
||||
spin_lock(&group->fanotify_data.access_lock);
|
||||
spin_lock(&group->notification_lock);
|
||||
list_add_tail(&kevent->list,
|
||||
&group->fanotify_data.access_list);
|
||||
spin_unlock(&group->fanotify_data.access_lock);
|
||||
spin_unlock(&group->notification_lock);
|
||||
#endif
|
||||
}
|
||||
buf += ret;
|
||||
|
@ -371,7 +371,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
|||
* Process all permission events on access_list and notification queue
|
||||
* and simulate reply from userspace.
|
||||
*/
|
||||
spin_lock(&group->fanotify_data.access_lock);
|
||||
spin_lock(&group->notification_lock);
|
||||
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
|
||||
fae.fse.list) {
|
||||
pr_debug("%s: found group=%p event=%p\n", __func__, group,
|
||||
|
@ -380,22 +380,22 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
|||
list_del_init(&event->fae.fse.list);
|
||||
event->response = FAN_ALLOW;
|
||||
}
|
||||
spin_unlock(&group->fanotify_data.access_lock);
|
||||
|
||||
/*
|
||||
* Destroy all non-permission events. For permission events just
|
||||
* dequeue them and set the response. They will be freed once the
|
||||
* response is consumed and fanotify_get_response() returns.
|
||||
*/
|
||||
mutex_lock(&group->notification_mutex);
|
||||
while (!fsnotify_notify_queue_is_empty(group)) {
|
||||
fsn_event = fsnotify_remove_first_event(group);
|
||||
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
|
||||
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
|
||||
spin_unlock(&group->notification_lock);
|
||||
fsnotify_destroy_event(group, fsn_event);
|
||||
else
|
||||
spin_lock(&group->notification_lock);
|
||||
} else
|
||||
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
|
||||
}
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
/* Response for all permission events it set, wakeup waiters */
|
||||
wake_up(&group->fanotify_data.access_waitq);
|
||||
|
@ -421,10 +421,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
|
|||
|
||||
switch (cmd) {
|
||||
case FIONREAD:
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
list_for_each_entry(fsn_event, &group->notification_list, list)
|
||||
send_len += FAN_EVENT_METADATA_LEN;
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
ret = put_user(send_len, (int __user *) p);
|
||||
break;
|
||||
}
|
||||
|
@ -765,7 +765,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
|
|||
event_f_flags |= O_LARGEFILE;
|
||||
group->fanotify_data.f_flags = event_f_flags;
|
||||
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
||||
spin_lock_init(&group->fanotify_data.access_lock);
|
||||
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
||||
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
||||
#endif
|
||||
|
|
|
@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
|
|||
*/
|
||||
void fsnotify_group_stop_queueing(struct fsnotify_group *group)
|
||||
{
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
group->shutdown = true;
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
|
|||
atomic_set(&group->refcnt, 1);
|
||||
atomic_set(&group->num_marks, 0);
|
||||
|
||||
mutex_init(&group->notification_mutex);
|
||||
spin_lock_init(&group->notification_lock);
|
||||
INIT_LIST_HEAD(&group->notification_list);
|
||||
init_waitqueue_head(&group->notification_waitq);
|
||||
group->max_events = UINT_MAX;
|
||||
|
|
|
@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
|
|||
int ret = 0;
|
||||
|
||||
poll_wait(file, &group->notification_waitq, wait);
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
if (!fsnotify_notify_queue_is_empty(group))
|
||||
ret = POLLIN | POLLRDNORM;
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
|
|||
* enough to fit in "count". Return an error pointer if
|
||||
* not large enough.
|
||||
*
|
||||
* Called with the group->notification_mutex held.
|
||||
* Called with the group->notification_lock held.
|
||||
*/
|
||||
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
||||
size_t count)
|
||||
|
@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
|||
if (event_size > count)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* held the notification_mutex the whole time, so this is the
|
||||
/* held the notification_lock the whole time, so this is the
|
||||
* same event we peeked above */
|
||||
fsnotify_remove_first_event(group);
|
||||
|
||||
|
@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
|
|||
|
||||
add_wait_queue(&group->notification_waitq, &wait);
|
||||
while (1) {
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
kevent = get_one_event(group, count);
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
|
||||
|
||||
|
@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
|
|||
|
||||
switch (cmd) {
|
||||
case FIONREAD:
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
list_for_each_entry(fsn_event, &group->notification_list,
|
||||
list) {
|
||||
send_len += sizeof(struct inotify_event);
|
||||
send_len += round_event_name_len(fsn_event);
|
||||
}
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
ret = put_user(send_len, (int __user *) p);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
|
|||
/* return true if the notify queue is empty, false otherwise */
|
||||
bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&group->notification_mutex));
|
||||
assert_spin_locked(&group->notification_lock);
|
||||
return list_empty(&group->notification_list) ? true : false;
|
||||
}
|
||||
|
||||
|
@ -73,8 +73,17 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
|
|||
/* Overflow events are per-group and we don't want to free them */
|
||||
if (!event || event->mask == FS_Q_OVERFLOW)
|
||||
return;
|
||||
/* If the event is still queued, we have a problem... */
|
||||
WARN_ON(!list_empty(&event->list));
|
||||
/*
|
||||
* If the event is still queued, we have a problem... Do an unreliable
|
||||
* lockless check first to avoid locking in the common case. The
|
||||
* locking may be necessary for permission events which got removed
|
||||
* from the list by a different CPU than the one freeing the event.
|
||||
*/
|
||||
if (!list_empty(&event->list)) {
|
||||
spin_lock(&group->notification_lock);
|
||||
WARN_ON(!list_empty(&event->list));
|
||||
spin_unlock(&group->notification_lock);
|
||||
}
|
||||
group->ops->free_event(event);
|
||||
}
|
||||
|
||||
|
@ -95,10 +104,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
|
|||
|
||||
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
||||
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
|
||||
if (group->shutdown) {
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
return 2;
|
||||
}
|
||||
|
||||
|
@ -106,7 +115,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
|
|||
ret = 2;
|
||||
/* Queue overflow event only if it isn't already queued */
|
||||
if (!list_empty(&group->overflow_event->list)) {
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
return ret;
|
||||
}
|
||||
event = group->overflow_event;
|
||||
|
@ -116,7 +125,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
|
|||
if (!list_empty(list) && merge) {
|
||||
ret = merge(list, event);
|
||||
if (ret) {
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +133,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
|
|||
queue:
|
||||
group->q_len++;
|
||||
list_add_tail(&event->list, list);
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
|
||||
wake_up(&group->notification_waitq);
|
||||
kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
|
||||
|
@ -139,7 +148,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
|
|||
{
|
||||
struct fsnotify_event *event;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&group->notification_mutex));
|
||||
assert_spin_locked(&group->notification_lock);
|
||||
|
||||
pr_debug("%s: group=%p\n", __func__, group);
|
||||
|
||||
|
@ -161,7 +170,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
|
|||
*/
|
||||
struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&group->notification_mutex));
|
||||
assert_spin_locked(&group->notification_lock);
|
||||
|
||||
return list_first_entry(&group->notification_list,
|
||||
struct fsnotify_event, list);
|
||||
|
@ -175,12 +184,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
|
|||
{
|
||||
struct fsnotify_event *event;
|
||||
|
||||
mutex_lock(&group->notification_mutex);
|
||||
spin_lock(&group->notification_lock);
|
||||
while (!fsnotify_notify_queue_is_empty(group)) {
|
||||
event = fsnotify_remove_first_event(group);
|
||||
spin_unlock(&group->notification_lock);
|
||||
fsnotify_destroy_event(group, event);
|
||||
spin_lock(&group->notification_lock);
|
||||
}
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
spin_unlock(&group->notification_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2104,7 +2104,7 @@ int o2net_start_listening(struct o2nm_node *node)
|
|||
BUG_ON(o2net_listen_sock != NULL);
|
||||
|
||||
mlog(ML_KTHREAD, "starting o2net thread...\n");
|
||||
o2net_wq = create_singlethread_workqueue("o2net");
|
||||
o2net_wq = alloc_ordered_workqueue("o2net", WQ_MEM_RECLAIM);
|
||||
if (o2net_wq == NULL) {
|
||||
mlog(ML_ERROR, "unable to launch o2net thread\n");
|
||||
return -ENOMEM; /* ? */
|
||||
|
|
|
@ -1904,7 +1904,7 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
|
|||
}
|
||||
|
||||
snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
|
||||
dlm->dlm_worker = create_singlethread_workqueue(wq_name);
|
||||
dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
|
||||
if (!dlm->dlm_worker) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
|
|
|
@ -646,7 +646,7 @@ static int __init init_dlmfs_fs(void)
|
|||
}
|
||||
cleanup_inode = 1;
|
||||
|
||||
user_dlm_worker = create_singlethread_workqueue("user_dlm");
|
||||
user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
|
||||
if (!user_dlm_worker) {
|
||||
status = -ENOMEM;
|
||||
goto bail;
|
||||
|
|
|
@ -123,8 +123,6 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
|
|||
#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
|
||||
#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
|
||||
|
||||
extern struct kmem_cache *ocfs2_inode_cache;
|
||||
|
||||
extern const struct address_space_operations ocfs2_aops;
|
||||
extern const struct ocfs2_caching_operations ocfs2_inode_caching_ops;
|
||||
|
||||
|
|
|
@ -2329,7 +2329,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||
}
|
||||
cleancache_init_shared_fs(sb);
|
||||
|
||||
osb->ocfs2_wq = create_singlethread_workqueue("ocfs2_wq");
|
||||
osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
|
||||
if (!osb->ocfs2_wq) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
|
|
187
fs/proc/array.c
187
fs/proc/array.c
|
@ -186,51 +186,45 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
|
|||
task_unlock(p);
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_printf(m,
|
||||
"State:\t%s\n"
|
||||
"Tgid:\t%d\n"
|
||||
"Ngid:\t%d\n"
|
||||
"Pid:\t%d\n"
|
||||
"PPid:\t%d\n"
|
||||
"TracerPid:\t%d\n"
|
||||
"Uid:\t%d\t%d\t%d\t%d\n"
|
||||
"Gid:\t%d\t%d\t%d\t%d\n"
|
||||
"FDSize:\t%d\nGroups:\t",
|
||||
get_task_state(p),
|
||||
tgid, ngid, pid_nr_ns(pid, ns), ppid, tpid,
|
||||
from_kuid_munged(user_ns, cred->uid),
|
||||
from_kuid_munged(user_ns, cred->euid),
|
||||
from_kuid_munged(user_ns, cred->suid),
|
||||
from_kuid_munged(user_ns, cred->fsuid),
|
||||
from_kgid_munged(user_ns, cred->gid),
|
||||
from_kgid_munged(user_ns, cred->egid),
|
||||
from_kgid_munged(user_ns, cred->sgid),
|
||||
from_kgid_munged(user_ns, cred->fsgid),
|
||||
max_fds);
|
||||
seq_printf(m, "State:\t%s", get_task_state(p));
|
||||
|
||||
seq_put_decimal_ull(m, "\nTgid:\t", tgid);
|
||||
seq_put_decimal_ull(m, "\nNgid:\t", ngid);
|
||||
seq_put_decimal_ull(m, "\nPid:\t", pid_nr_ns(pid, ns));
|
||||
seq_put_decimal_ull(m, "\nPPid:\t", ppid);
|
||||
seq_put_decimal_ull(m, "\nTracerPid:\t", tpid);
|
||||
seq_put_decimal_ull(m, "\nUid:\t", from_kuid_munged(user_ns, cred->uid));
|
||||
seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->euid));
|
||||
seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->suid));
|
||||
seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->fsuid));
|
||||
seq_put_decimal_ull(m, "\nGid:\t", from_kgid_munged(user_ns, cred->gid));
|
||||
seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->egid));
|
||||
seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->sgid));
|
||||
seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->fsgid));
|
||||
seq_put_decimal_ull(m, "\nFDSize:\t", max_fds);
|
||||
|
||||
seq_puts(m, "\nGroups:\t");
|
||||
group_info = cred->group_info;
|
||||
for (g = 0; g < group_info->ngroups; g++)
|
||||
seq_printf(m, "%d ",
|
||||
from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
|
||||
seq_put_decimal_ull(m, g ? " " : "",
|
||||
from_kgid_munged(user_ns, group_info->gid[g]));
|
||||
put_cred(cred);
|
||||
/* Trailing space shouldn't have been added in the first place. */
|
||||
seq_putc(m, ' ');
|
||||
|
||||
#ifdef CONFIG_PID_NS
|
||||
seq_puts(m, "\nNStgid:");
|
||||
for (g = ns->level; g <= pid->level; g++)
|
||||
seq_printf(m, "\t%d",
|
||||
task_tgid_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_puts(m, "\nNSpid:");
|
||||
for (g = ns->level; g <= pid->level; g++)
|
||||
seq_printf(m, "\t%d",
|
||||
task_pid_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_puts(m, "\nNSpgid:");
|
||||
for (g = ns->level; g <= pid->level; g++)
|
||||
seq_printf(m, "\t%d",
|
||||
task_pgrp_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_puts(m, "\nNSsid:");
|
||||
for (g = ns->level; g <= pid->level; g++)
|
||||
seq_printf(m, "\t%d",
|
||||
task_session_nr_ns(p, pid->numbers[g].ns));
|
||||
seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns));
|
||||
#endif
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
@ -299,11 +293,12 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
|
|||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
|
||||
seq_printf(m, "Threads:\t%d\n", num_threads);
|
||||
seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
|
||||
seq_put_decimal_ull(m, "Threads:\t", num_threads);
|
||||
seq_put_decimal_ull(m, "\nSigQ:\t", qsize);
|
||||
seq_put_decimal_ull(m, "/", qlim);
|
||||
|
||||
/* render them all */
|
||||
render_sigset_t(m, "SigPnd:\t", &pending);
|
||||
render_sigset_t(m, "\nSigPnd:\t", &pending);
|
||||
render_sigset_t(m, "ShdPnd:\t", &shpending);
|
||||
render_sigset_t(m, "SigBlk:\t", &blocked);
|
||||
render_sigset_t(m, "SigIgn:\t", &ignored);
|
||||
|
@ -348,17 +343,17 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
|
|||
static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_SECCOMP
|
||||
seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
|
||||
seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
|
||||
seq_putc(m, '\n');
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void task_context_switch_counts(struct seq_file *m,
|
||||
struct task_struct *p)
|
||||
{
|
||||
seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
|
||||
"nonvoluntary_ctxt_switches:\t%lu\n",
|
||||
p->nvcsw,
|
||||
p->nivcsw);
|
||||
seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw);
|
||||
seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
|
||||
|
@ -490,41 +485,41 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
start_time = nsec_to_clock_t(task->real_start_time);
|
||||
|
||||
seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
|
||||
seq_put_decimal_ll(m, ' ', ppid);
|
||||
seq_put_decimal_ll(m, ' ', pgid);
|
||||
seq_put_decimal_ll(m, ' ', sid);
|
||||
seq_put_decimal_ll(m, ' ', tty_nr);
|
||||
seq_put_decimal_ll(m, ' ', tty_pgrp);
|
||||
seq_put_decimal_ull(m, ' ', task->flags);
|
||||
seq_put_decimal_ull(m, ' ', min_flt);
|
||||
seq_put_decimal_ull(m, ' ', cmin_flt);
|
||||
seq_put_decimal_ull(m, ' ', maj_flt);
|
||||
seq_put_decimal_ull(m, ' ', cmaj_flt);
|
||||
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime));
|
||||
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime));
|
||||
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime));
|
||||
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime));
|
||||
seq_put_decimal_ll(m, ' ', priority);
|
||||
seq_put_decimal_ll(m, ' ', nice);
|
||||
seq_put_decimal_ll(m, ' ', num_threads);
|
||||
seq_put_decimal_ull(m, ' ', 0);
|
||||
seq_put_decimal_ull(m, ' ', start_time);
|
||||
seq_put_decimal_ull(m, ' ', vsize);
|
||||
seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
|
||||
seq_put_decimal_ull(m, ' ', rsslim);
|
||||
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
|
||||
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
|
||||
seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
|
||||
seq_put_decimal_ull(m, ' ', esp);
|
||||
seq_put_decimal_ull(m, ' ', eip);
|
||||
seq_put_decimal_ll(m, " ", ppid);
|
||||
seq_put_decimal_ll(m, " ", pgid);
|
||||
seq_put_decimal_ll(m, " ", sid);
|
||||
seq_put_decimal_ll(m, " ", tty_nr);
|
||||
seq_put_decimal_ll(m, " ", tty_pgrp);
|
||||
seq_put_decimal_ull(m, " ", task->flags);
|
||||
seq_put_decimal_ull(m, " ", min_flt);
|
||||
seq_put_decimal_ull(m, " ", cmin_flt);
|
||||
seq_put_decimal_ull(m, " ", maj_flt);
|
||||
seq_put_decimal_ull(m, " ", cmaj_flt);
|
||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
|
||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
|
||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
|
||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
|
||||
seq_put_decimal_ll(m, " ", priority);
|
||||
seq_put_decimal_ll(m, " ", nice);
|
||||
seq_put_decimal_ll(m, " ", num_threads);
|
||||
seq_put_decimal_ull(m, " ", 0);
|
||||
seq_put_decimal_ull(m, " ", start_time);
|
||||
seq_put_decimal_ull(m, " ", vsize);
|
||||
seq_put_decimal_ull(m, " ", mm ? get_mm_rss(mm) : 0);
|
||||
seq_put_decimal_ull(m, " ", rsslim);
|
||||
seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->start_code : 1) : 0);
|
||||
seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->end_code : 1) : 0);
|
||||
seq_put_decimal_ull(m, " ", (permitted && mm) ? mm->start_stack : 0);
|
||||
seq_put_decimal_ull(m, " ", esp);
|
||||
seq_put_decimal_ull(m, " ", eip);
|
||||
/* The signal information here is obsolete.
|
||||
* It must be decimal for Linux 2.0 compatibility.
|
||||
* Use /proc/#/status for real-time signals.
|
||||
*/
|
||||
seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, " ", sigign.sig[0] & 0x7fffffffUL);
|
||||
seq_put_decimal_ull(m, " ", sigcatch.sig[0] & 0x7fffffffUL);
|
||||
|
||||
/*
|
||||
* We used to output the absolute kernel address, but that's an
|
||||
|
@ -538,31 +533,31 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
else
|
||||
seq_puts(m, " 0");
|
||||
|
||||
seq_put_decimal_ull(m, ' ', 0);
|
||||
seq_put_decimal_ull(m, ' ', 0);
|
||||
seq_put_decimal_ll(m, ' ', task->exit_signal);
|
||||
seq_put_decimal_ll(m, ' ', task_cpu(task));
|
||||
seq_put_decimal_ull(m, ' ', task->rt_priority);
|
||||
seq_put_decimal_ull(m, ' ', task->policy);
|
||||
seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
|
||||
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
|
||||
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
|
||||
seq_put_decimal_ull(m, " ", 0);
|
||||
seq_put_decimal_ull(m, " ", 0);
|
||||
seq_put_decimal_ll(m, " ", task->exit_signal);
|
||||
seq_put_decimal_ll(m, " ", task_cpu(task));
|
||||
seq_put_decimal_ull(m, " ", task->rt_priority);
|
||||
seq_put_decimal_ull(m, " ", task->policy);
|
||||
seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
|
||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime));
|
||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime));
|
||||
|
||||
if (mm && permitted) {
|
||||
seq_put_decimal_ull(m, ' ', mm->start_data);
|
||||
seq_put_decimal_ull(m, ' ', mm->end_data);
|
||||
seq_put_decimal_ull(m, ' ', mm->start_brk);
|
||||
seq_put_decimal_ull(m, ' ', mm->arg_start);
|
||||
seq_put_decimal_ull(m, ' ', mm->arg_end);
|
||||
seq_put_decimal_ull(m, ' ', mm->env_start);
|
||||
seq_put_decimal_ull(m, ' ', mm->env_end);
|
||||
seq_put_decimal_ull(m, " ", mm->start_data);
|
||||
seq_put_decimal_ull(m, " ", mm->end_data);
|
||||
seq_put_decimal_ull(m, " ", mm->start_brk);
|
||||
seq_put_decimal_ull(m, " ", mm->arg_start);
|
||||
seq_put_decimal_ull(m, " ", mm->arg_end);
|
||||
seq_put_decimal_ull(m, " ", mm->env_start);
|
||||
seq_put_decimal_ull(m, " ", mm->env_end);
|
||||
} else
|
||||
seq_printf(m, " 0 0 0 0 0 0 0");
|
||||
seq_puts(m, " 0 0 0 0 0 0 0");
|
||||
|
||||
if (permitted)
|
||||
seq_put_decimal_ll(m, ' ', task->exit_code);
|
||||
seq_put_decimal_ll(m, " ", task->exit_code);
|
||||
else
|
||||
seq_put_decimal_ll(m, ' ', 0);
|
||||
seq_puts(m, " 0");
|
||||
|
||||
seq_putc(m, '\n');
|
||||
if (mm)
|
||||
|
@ -598,13 +593,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
|
|||
* seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
|
||||
* size, resident, shared, text, data);
|
||||
*/
|
||||
seq_put_decimal_ull(m, 0, size);
|
||||
seq_put_decimal_ull(m, ' ', resident);
|
||||
seq_put_decimal_ull(m, ' ', shared);
|
||||
seq_put_decimal_ull(m, ' ', text);
|
||||
seq_put_decimal_ull(m, ' ', 0);
|
||||
seq_put_decimal_ull(m, ' ', data);
|
||||
seq_put_decimal_ull(m, ' ', 0);
|
||||
seq_put_decimal_ull(m, "", size);
|
||||
seq_put_decimal_ull(m, " ", resident);
|
||||
seq_put_decimal_ull(m, " ", shared);
|
||||
seq_put_decimal_ull(m, " ", text);
|
||||
seq_put_decimal_ull(m, " ", 0);
|
||||
seq_put_decimal_ull(m, " ", data);
|
||||
seq_put_decimal_ull(m, " ", 0);
|
||||
seq_putc(m, '\n');
|
||||
|
||||
return 0;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue