2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/kernel/fork.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'fork.c' contains the help-routines for the 'fork' system call
|
|
|
|
* (see also entry.S and others).
|
|
|
|
* Fork is rather simple, once you get the hang of it, but the memory
|
|
|
|
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/completion.h>
|
2006-12-08 18:37:56 +08:00
|
|
|
#include <linux/mnt_namespace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/mempolicy.h>
|
|
|
|
#include <linux/sem.h>
|
|
|
|
#include <linux/file.h>
|
2008-04-24 19:44:08 +08:00
|
|
|
#include <linux/fdtable.h>
|
2008-07-01 02:42:08 +08:00
|
|
|
#include <linux/iocontext.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/key.h>
|
|
|
|
#include <linux/binfmts.h>
|
|
|
|
#include <linux/mman.h>
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-29 06:46:29 +08:00
|
|
|
#include <linux/mmu_notifier.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/fs.h>
|
2006-10-02 17:18:06 +08:00
|
|
|
#include <linux/nsproxy.h>
|
2006-01-12 04:17:46 +08:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/cpu.h>
|
2007-10-19 14:39:33 +08:00
|
|
|
#include <linux/cgroup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/security.h>
|
2008-07-24 12:27:23 +08:00
|
|
|
#include <linux/hugetlb.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/jiffies.h>
|
2008-07-26 10:45:47 +08:00
|
|
|
#include <linux/tracehook.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/futex.h>
|
2008-11-16 02:20:36 +08:00
|
|
|
#include <linux/compat.h>
|
2006-12-10 18:19:19 +08:00
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2005-09-10 04:04:13 +08:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/audit.h>
|
2008-02-07 16:13:51 +08:00
|
|
|
#include <linux/memcontrol.h>
|
2008-11-23 13:22:56 +08:00
|
|
|
#include <linux/ftrace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/rmap.h>
|
|
|
|
#include <linux/acct.h>
|
2006-10-01 14:28:59 +08:00
|
|
|
#include <linux/tsacct_kern.h>
|
2005-11-07 16:59:16 +08:00
|
|
|
#include <linux/cn_proc.h>
|
2007-05-24 04:57:25 +08:00
|
|
|
#include <linux/freezer.h>
|
2006-07-14 15:24:36 +08:00
|
|
|
#include <linux/delayacct.h>
|
2006-07-14 15:24:44 +08:00
|
|
|
#include <linux/taskstats_kern.h>
|
2006-09-26 16:52:38 +08:00
|
|
|
#include <linux/random.h>
|
Audit: add TTY input auditing
Add TTY input auditing, used to audit system administrator's actions. This is
required by various security standards such as DCID 6/3 and PCI to provide
non-repudiation of administrator's actions and to allow a review of past
actions if the administrator seems to overstep their duties or if the system
becomes misconfigured for unknown reasons. These requirements do not make it
necessary to audit TTY output as well.
Compared to an user-space keylogger, this approach records TTY input using the
audit subsystem, correlated with other audit events, and it is completely
transparent to the user-space application (e.g. the console ioctls still
work).
TTY input auditing works on a higher level than auditing all system calls
within the session, which would produce an overwhelming amount of mostly
useless audit events.
Add an "audit_tty" attribute, inherited across fork (). Data read from TTYs
by process with the attribute is sent to the audit subsystem by the kernel.
The audit netlink interface is extended to allow modifying the audit_tty
attribute, and to allow sending explanatory audit events from user-space (for
example, a shell might send an event containing the final command, after the
interactive command-line editing and history expansion is performed, which
might be difficult to decipher from the TTY input alone).
Because the "audit_tty" attribute is inherited across fork (), it would be set
e.g. for sshd restarted within an audited session. To prevent this, the
audit_tty attribute is cleared when a process with no open TTY file
descriptors (e.g. after daemon startup) opens a TTY.
See https://www.redhat.com/archives/linux-audit/2007-June/msg00000.html for a
more detailed rationale document for an older version of this patch.
[akpm@linux-foundation.org: build fix]
Signed-off-by: Miloslav Trmac <mitr@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Paul Fulghum <paulkf@microgate.com>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 14:40:56 +08:00
|
|
|
#include <linux/tty.h>
|
2007-10-19 14:40:11 +08:00
|
|
|
#include <linux/proc_fs.h>
|
2008-01-24 15:52:45 +08:00
|
|
|
#include <linux/blkdev.h>
|
tracing, sched: LTTng instrumentation - scheduler
Instrument the scheduler activity (sched_switch, migration, wakeups,
wait for a task, signal delivery) and process/thread
creation/destruction (fork, exit, kthread stop). Actually, kthread
creation is not instrumented in this patch because it is architecture
dependent. It allows to connect tracers such as ftrace which detects
scheduling latencies, good/bad scheduler decisions. Tools like LTTng can
export this scheduler information along with instrumentation of the rest
of the kernel activity to perform post-mortem analysis on the scheduler
activity.
About the performance impact of tracepoints (which is comparable to
markers), even without immediate values optimizations, tests done by
Hideo Aoki on ia64 show no regression. His test case was using hackbench
on a kernel where scheduler instrumentation (about 5 events in code
scheduler code) was added. See the "Tracepoints" patch header for
performance result detail.
Changelog :
- Change instrumentation location and parameter to match ftrace
instrumentation, previously done with kernel markers.
[ mingo@elte.hu: conflict resolutions ]
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: 'Peter Zijlstra' <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-19 00:16:17 +08:00
|
|
|
#include <trace/sched.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protected counters by write_lock_irq(&tasklist_lock)
|
|
|
|
*/
|
|
|
|
unsigned long total_forks; /* Handle normal Linux uptimes. */
|
|
|
|
int nr_threads; /* The idle threads do not count.. */
|
|
|
|
|
|
|
|
int max_threads; /* tunable limit on nr_threads */
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
|
|
|
|
2006-07-10 19:45:40 +08:00
|
|
|
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-15 06:47:47 +08:00
|
|
|
DEFINE_TRACE(sched_process_fork);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int nr_processes(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
int total = 0;
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
total += per_cpu(process_counts, cpu);
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
|
|
|
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
|
|
|
|
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *task_struct_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2008-07-25 16:45:40 +08:00
|
|
|
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
|
|
|
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
|
|
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
|
|
|
#else
|
|
|
|
gfp_t mask = GFP_KERNEL;
|
|
|
|
#endif
|
|
|
|
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_thread_info(struct thread_info *ti)
|
|
|
|
{
|
|
|
|
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* SLAB cache for signal_struct structures (tsk->signal) */
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *signal_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* SLAB cache for sighand_struct structures (tsk->sighand) */
|
2006-12-07 12:33:20 +08:00
|
|
|
struct kmem_cache *sighand_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* SLAB cache for files_struct structures (tsk->files) */
|
2006-12-07 12:33:20 +08:00
|
|
|
struct kmem_cache *files_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* SLAB cache for fs_struct structures (tsk->fs) */
|
2006-12-07 12:33:20 +08:00
|
|
|
struct kmem_cache *fs_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* SLAB cache for vm_area_struct structures */
|
2006-12-07 12:33:20 +08:00
|
|
|
struct kmem_cache *vm_area_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* SLAB cache for mm_struct structures (tsk->mm) */
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *mm_cachep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void free_task(struct task_struct *tsk)
|
|
|
|
{
|
2007-10-17 14:25:50 +08:00
|
|
|
prop_local_destroy_single(&tsk->dirties);
|
2007-05-09 17:35:17 +08:00
|
|
|
free_thread_info(tsk->stack);
|
2006-06-27 17:54:53 +08:00
|
|
|
rt_mutex_debug_task_free(tsk);
|
2008-11-26 04:07:04 +08:00
|
|
|
ftrace_graph_exit_task(tsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
free_task_struct(tsk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(free_task);
|
|
|
|
|
2006-03-31 18:31:34 +08:00
|
|
|
void __put_task_struct(struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-10-19 14:40:38 +08:00
|
|
|
WARN_ON(!tsk->exit_state);
|
2005-04-17 06:20:36 +08:00
|
|
|
WARN_ON(atomic_read(&tsk->usage));
|
|
|
|
WARN_ON(tsk == current);
|
|
|
|
|
2008-11-14 07:39:26 +08:00
|
|
|
put_cred(tsk->real_cred);
|
2008-11-14 07:39:17 +08:00
|
|
|
put_cred(tsk->cred);
|
2006-09-01 12:27:38 +08:00
|
|
|
delayacct_tsk_free(tsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!profile_handoff_task(tsk))
|
|
|
|
free_task(tsk);
|
|
|
|
}
|
|
|
|
|
2008-04-16 16:25:35 +08:00
|
|
|
/*
|
|
|
|
* macro override instead of weak attribute alias, to workaround
|
|
|
|
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
|
|
|
|
*/
|
|
|
|
#ifndef arch_task_cache_init
|
|
|
|
#define arch_task_cache_init()
|
|
|
|
#endif
|
2008-03-11 06:28:04 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void __init fork_init(unsigned long mempages)
|
|
|
|
{
|
|
|
|
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
|
|
|
#ifndef ARCH_MIN_TASKALIGN
|
|
|
|
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
|
|
|
|
#endif
|
|
|
|
/* create a slab on which task_structs can be allocated */
|
|
|
|
task_struct_cachep =
|
|
|
|
kmem_cache_create("task_struct", sizeof(struct task_struct),
|
2007-07-20 09:11:58 +08:00
|
|
|
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2008-03-11 06:28:04 +08:00
|
|
|
/* do the arch specific task caches init */
|
|
|
|
arch_task_cache_init();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* The default maximum number of threads is set to a safe
|
|
|
|
* value: the thread structures can take up at most half
|
|
|
|
* of memory.
|
|
|
|
*/
|
|
|
|
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we need to allow at least 20 threads to boot a system
|
|
|
|
*/
|
|
|
|
if(max_threads < 20)
|
|
|
|
max_threads = 20;
|
|
|
|
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
|
|
|
|
init_task.signal->rlim[RLIMIT_SIGPENDING] =
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC];
|
|
|
|
}
|
|
|
|
|
2008-03-11 06:28:04 +08:00
|
|
|
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
|
|
|
|
struct task_struct *src)
|
|
|
|
{
|
|
|
|
*dst = *src;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk;
|
|
|
|
struct thread_info *ti;
|
2007-10-17 14:25:50 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
prepare_to_copy(orig);
|
|
|
|
|
|
|
|
tsk = alloc_task_struct();
|
|
|
|
if (!tsk)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ti = alloc_thread_info(tsk);
|
|
|
|
if (!ti) {
|
|
|
|
free_task_struct(tsk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-03-11 06:28:04 +08:00
|
|
|
err = arch_dup_task_struct(tsk, orig);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2007-05-09 17:35:17 +08:00
|
|
|
tsk->stack = ti;
|
2007-10-17 14:25:50 +08:00
|
|
|
|
|
|
|
err = prop_local_init_single(&tsk->dirties);
|
2008-03-11 06:28:04 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2007-10-17 14:25:50 +08:00
|
|
|
|
2005-11-14 08:06:56 +08:00
|
|
|
setup_thread_stack(tsk, orig);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-26 16:52:38 +08:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
tsk->stack_canary = get_random_int();
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* One for us, one for whoever does the "release_task()" (usually parent) */
|
|
|
|
atomic_set(&tsk->usage,2);
|
2005-09-10 04:01:22 +08:00
|
|
|
atomic_set(&tsk->fs_excl, 0);
|
2006-09-29 16:59:40 +08:00
|
|
|
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
2006-03-24 03:00:26 +08:00
|
|
|
tsk->btrace_seq = 0;
|
2006-09-29 16:59:40 +08:00
|
|
|
#endif
|
2006-04-20 19:05:33 +08:00
|
|
|
tsk->splice_pipe = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
return tsk;
|
2008-03-11 06:28:04 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
free_thread_info(ti);
|
|
|
|
free_task_struct(tsk);
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
2007-10-19 14:41:10 +08:00
|
|
|
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-30 09:16:06 +08:00
|
|
|
struct vm_area_struct *mpnt, *tmp, **pprev;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rb_node **rb_link, *rb_parent;
|
|
|
|
int retval;
|
|
|
|
unsigned long charge;
|
|
|
|
struct mempolicy *pol;
|
|
|
|
|
|
|
|
down_write(&oldmm->mmap_sem);
|
2006-12-13 01:14:57 +08:00
|
|
|
flush_cache_dup_mm(oldmm);
|
2006-07-03 15:25:15 +08:00
|
|
|
/*
|
|
|
|
* Not linked in yet - no deadlock potential:
|
|
|
|
*/
|
|
|
|
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
|
2005-10-30 09:16:08 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
mm->locked_vm = 0;
|
|
|
|
mm->mmap = NULL;
|
|
|
|
mm->mmap_cache = NULL;
|
|
|
|
mm->free_area_cache = oldmm->mmap_base;
|
2005-06-22 08:14:49 +08:00
|
|
|
mm->cached_hole_size = ~0UL;
|
2005-04-17 06:20:36 +08:00
|
|
|
mm->map_count = 0;
|
|
|
|
cpus_clear(mm->cpu_vm_mask);
|
|
|
|
mm->mm_rb = RB_ROOT;
|
|
|
|
rb_link = &mm->mm_rb.rb_node;
|
|
|
|
rb_parent = NULL;
|
|
|
|
pprev = &mm->mmap;
|
|
|
|
|
2005-10-30 09:16:06 +08:00
|
|
|
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct file *file;
|
|
|
|
|
|
|
|
if (mpnt->vm_flags & VM_DONTCOPY) {
|
2005-07-13 04:58:09 +08:00
|
|
|
long pages = vma_pages(mpnt);
|
|
|
|
mm->total_vm -= pages;
|
2005-10-30 09:15:56 +08:00
|
|
|
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
|
2005-07-13 04:58:09 +08:00
|
|
|
-pages);
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
charge = 0;
|
|
|
|
if (mpnt->vm_flags & VM_ACCOUNT) {
|
|
|
|
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
|
|
|
|
if (security_vm_enough_memory(len))
|
|
|
|
goto fail_nomem;
|
|
|
|
charge = len;
|
|
|
|
}
|
2006-12-07 12:33:17 +08:00
|
|
|
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!tmp)
|
|
|
|
goto fail_nomem;
|
|
|
|
*tmp = *mpnt;
|
2008-04-28 17:13:09 +08:00
|
|
|
pol = mpol_dup(vma_policy(mpnt));
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = PTR_ERR(pol);
|
|
|
|
if (IS_ERR(pol))
|
|
|
|
goto fail_nomem_policy;
|
|
|
|
vma_set_policy(tmp, pol);
|
|
|
|
tmp->vm_flags &= ~VM_LOCKED;
|
|
|
|
tmp->vm_mm = mm;
|
|
|
|
tmp->vm_next = NULL;
|
|
|
|
anon_vma_link(tmp);
|
|
|
|
file = tmp->vm_file;
|
|
|
|
if (file) {
|
2006-12-08 18:36:43 +08:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-11 04:48:52 +08:00
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
get_file(file);
|
|
|
|
if (tmp->vm_flags & VM_DENYWRITE)
|
|
|
|
atomic_dec(&inode->i_writecount);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-11 04:48:52 +08:00
|
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
|
|
if (tmp->vm_flags & VM_SHARED)
|
|
|
|
mapping->i_mmap_writable++;
|
2005-04-17 06:20:36 +08:00
|
|
|
tmp->vm_truncate_count = mpnt->vm_truncate_count;
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-11 04:48:52 +08:00
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
|
|
/* insert tmp into the share list, just after mpnt */
|
2005-04-17 06:20:36 +08:00
|
|
|
vma_prio_tree_add(tmp, mpnt);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-11 04:48:52 +08:00
|
|
|
flush_dcache_mmap_unlock(mapping);
|
|
|
|
spin_unlock(&mapping->i_mmap_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-07-24 12:27:23 +08:00
|
|
|
/*
|
|
|
|
* Clear hugetlb-related page reserves for children. This only
|
|
|
|
* affects MAP_PRIVATE mappings. Faults generated by the child
|
|
|
|
* are not guaranteed to succeed, even if read-only
|
|
|
|
*/
|
|
|
|
if (is_vm_hugetlb_page(tmp))
|
|
|
|
reset_vma_resv_huge_pages(tmp);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-10-30 09:16:08 +08:00
|
|
|
* Link in the new vma and copy the page table entries.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
*pprev = tmp;
|
|
|
|
pprev = &tmp->vm_next;
|
|
|
|
|
|
|
|
__vma_link_rb(mm, tmp, rb_link, rb_parent);
|
|
|
|
rb_link = &tmp->vm_rb.rb_right;
|
|
|
|
rb_parent = &tmp->vm_rb;
|
|
|
|
|
|
|
|
mm->map_count++;
|
2005-11-22 13:32:20 +08:00
|
|
|
retval = copy_page_range(mm, oldmm, mpnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (tmp->vm_ops && tmp->vm_ops->open)
|
|
|
|
tmp->vm_ops->open(tmp);
|
|
|
|
|
|
|
|
if (retval)
|
|
|
|
goto out;
|
|
|
|
}
|
2007-05-03 01:27:14 +08:00
|
|
|
/* a new mm has just been created */
|
|
|
|
arch_dup_mmap(oldmm, mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = 0;
|
|
|
|
out:
|
2005-10-30 09:16:08 +08:00
|
|
|
up_write(&mm->mmap_sem);
|
2005-10-30 09:16:06 +08:00
|
|
|
flush_tlb_mm(oldmm);
|
2005-04-17 06:20:36 +08:00
|
|
|
up_write(&oldmm->mmap_sem);
|
|
|
|
return retval;
|
|
|
|
fail_nomem_policy:
|
|
|
|
kmem_cache_free(vm_area_cachep, tmp);
|
|
|
|
fail_nomem:
|
|
|
|
retval = -ENOMEM;
|
|
|
|
vm_unacct_memory(charge);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mm_alloc_pgd(struct mm_struct * mm)
|
|
|
|
{
|
|
|
|
mm->pgd = pgd_alloc(mm);
|
|
|
|
if (unlikely(!mm->pgd))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mm_free_pgd(struct mm_struct * mm)
|
|
|
|
{
|
2008-02-05 14:29:14 +08:00
|
|
|
pgd_free(mm, mm->pgd);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define dup_mmap(mm, oldmm) (0)
|
|
|
|
#define mm_alloc_pgd(mm) (0)
|
|
|
|
#define mm_free_pgd(mm)
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
|
2007-10-18 18:06:07 +08:00
|
|
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-07 12:33:17 +08:00
|
|
|
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
|
2005-04-17 06:20:36 +08:00
|
|
|
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
|
|
|
|
|
2009-01-07 06:42:47 +08:00
|
|
|
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
|
|
|
|
|
|
|
|
static int __init coredump_filter_setup(char *s)
|
|
|
|
{
|
|
|
|
default_dump_filter =
|
|
|
|
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
|
|
|
|
MMF_DUMP_FILTER_MASK;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("coredump_filter=", coredump_filter_setup);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init_task.h>
|
|
|
|
|
2008-02-07 16:13:51 +08:00
|
|
|
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
atomic_set(&mm->mm_users, 1);
|
|
|
|
atomic_set(&mm->mm_count, 1);
|
|
|
|
init_rwsem(&mm->mmap_sem);
|
|
|
|
INIT_LIST_HEAD(&mm->mmlist);
|
2009-01-07 06:42:47 +08:00
|
|
|
mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
|
2008-07-25 16:47:41 +08:00
|
|
|
mm->core_state = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
mm->nr_ptes = 0;
|
2005-10-30 09:16:05 +08:00
|
|
|
set_mm_counter(mm, file_rss, 0);
|
2005-10-30 09:16:04 +08:00
|
|
|
set_mm_counter(mm, anon_rss, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock_init(&mm->page_table_lock);
|
2008-12-09 15:11:22 +08:00
|
|
|
spin_lock_init(&mm->ioctx_lock);
|
|
|
|
INIT_HLIST_HEAD(&mm->ioctx_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
mm->free_area_cache = TASK_UNMAPPED_BASE;
|
2005-06-22 08:14:49 +08:00
|
|
|
mm->cached_hole_size = ~0UL;
|
cgroups: add an owner to the mm_struct
Remove the mem_cgroup member from mm_struct and instead adds an owner.
This approach was suggested by Paul Menage. The advantage of this approach
is that, once the mm->owner is known, using the subsystem id, the cgroup
can be determined. It also allows several control groups that are
virtually grouped by mm_struct, to exist independent of the memory
controller i.e., without adding mem_cgroup's for each controller, to
mm_struct.
A new config option CONFIG_MM_OWNER is added and the memory resource
controller selects this config option.
This patch also adds cgroup callbacks to notify subsystems when mm->owner
changes. The mm_cgroup_changed callback is called with the task_lock() of
the new task held and is called just prior to changing the mm->owner.
I am indebted to Paul Menage for the several reviews of this patchset and
helping me make it lighter and simpler.
This patch was tested on a powerpc box, it was compiled with both the
MM_OWNER config turned on and off.
After the thread group leader exits, it's moved to init_css_state by
cgroup_exit(), thus all future charges from runnings threads would be
redirected to the init_css_set's subsystem.
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Sudhir Kumar <skumar@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: David Rientjes <rientjes@google.com>,
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Reviewed-by: Paul Menage <menage@google.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 16:00:16 +08:00
|
|
|
mm_init_owner(mm, p);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (likely(!mm_alloc_pgd(mm))) {
|
|
|
|
mm->def_flags = 0;
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-29 06:46:29 +08:00
|
|
|
mmu_notifier_mm_init(mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
return mm;
|
|
|
|
}
|
2008-02-07 16:13:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
free_mm(mm);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and initialize an mm_struct.
|
|
|
|
*/
|
|
|
|
struct mm_struct * mm_alloc(void)
|
|
|
|
{
|
|
|
|
struct mm_struct * mm;
|
|
|
|
|
|
|
|
mm = allocate_mm();
|
|
|
|
if (mm) {
|
|
|
|
memset(mm, 0, sizeof(*mm));
|
2008-02-07 16:13:51 +08:00
|
|
|
mm = mm_init(mm, current);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when the last reference to the mm
|
|
|
|
* is dropped: either by a lazy thread or by
|
|
|
|
* mmput. Free the page directory and the mm.
|
|
|
|
*/
|
2008-02-08 20:19:53 +08:00
|
|
|
void __mmdrop(struct mm_struct *mm)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
BUG_ON(mm == &init_mm);
|
|
|
|
mm_free_pgd(mm);
|
|
|
|
destroy_context(mm);
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-29 06:46:29 +08:00
|
|
|
mmu_notifier_mm_destroy(mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
free_mm(mm);
|
|
|
|
}
|
2007-11-21 22:41:05 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__mmdrop);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement the use count and release all resources for an mm.
|
|
|
|
*/
|
|
|
|
void mmput(struct mm_struct *mm)
|
|
|
|
{
|
2006-06-23 17:05:15 +08:00
|
|
|
might_sleep();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (atomic_dec_and_test(&mm->mm_users)) {
|
|
|
|
exit_aio(mm);
|
|
|
|
exit_mmap(mm);
|
2008-04-29 16:01:36 +08:00
|
|
|
set_mm_exe_file(mm, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!list_empty(&mm->mmlist)) {
|
|
|
|
spin_lock(&mmlist_lock);
|
|
|
|
list_del(&mm->mmlist);
|
|
|
|
spin_unlock(&mmlist_lock);
|
|
|
|
}
|
|
|
|
put_swap_token(mm);
|
|
|
|
mmdrop(mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmput);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_task_mm - acquire a reference to the task's mm
|
|
|
|
*
|
2008-07-25 16:47:38 +08:00
|
|
|
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
|
2005-04-17 06:20:36 +08:00
|
|
|
* this kernel workthread has transiently adopted a user mm with use_mm,
|
|
|
|
* to do its AIO) is not set and if so returns a reference to it, after
|
|
|
|
* bumping up the use count. User must release the mm via mmput()
|
|
|
|
* after use. Typically used by /proc and ptrace.
|
|
|
|
*/
|
|
|
|
struct mm_struct *get_task_mm(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
task_lock(task);
|
|
|
|
mm = task->mm;
|
|
|
|
if (mm) {
|
2008-07-25 16:47:38 +08:00
|
|
|
if (task->flags & PF_KTHREAD)
|
2005-04-17 06:20:36 +08:00
|
|
|
mm = NULL;
|
|
|
|
else
|
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
|
}
|
|
|
|
task_unlock(task);
|
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_task_mm);
|
|
|
|
|
|
|
|
/* Please note the differences between mmput and mm_release.
|
|
|
|
* mmput is called whenever we stop holding onto a mm_struct,
|
|
|
|
* error success whatever.
|
|
|
|
*
|
|
|
|
* mm_release is called after a mm_struct has been removed
|
|
|
|
* from the current process.
|
|
|
|
*
|
|
|
|
* This difference is important for error handling, when we
|
|
|
|
* only half set up a mm_struct for a new process and need to restore
|
|
|
|
* the old one. Because we mmput the new mm_struct before
|
|
|
|
* restoring the old one. . .
|
|
|
|
* Eric Biederman 10 January 1998
|
|
|
|
*/
|
|
|
|
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct completion *vfork_done = tsk->vfork_done;
|
|
|
|
|
2008-11-16 02:20:36 +08:00
|
|
|
/* Get rid of any futexes when releasing the mm */
|
|
|
|
#ifdef CONFIG_FUTEX
|
|
|
|
if (unlikely(tsk->robust_list))
|
|
|
|
exit_robust_list(tsk);
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
if (unlikely(tsk->compat_robust_list))
|
|
|
|
compat_exit_robust_list(tsk);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Get rid of any cached register state */
|
|
|
|
deactivate_mm(tsk, mm);
|
|
|
|
|
|
|
|
/* notify parent sleeping on vfork() */
|
|
|
|
if (vfork_done) {
|
|
|
|
tsk->vfork_done = NULL;
|
|
|
|
complete(vfork_done);
|
|
|
|
}
|
2006-12-07 12:36:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're exiting normally, clear a user-space tid field if
|
|
|
|
* requested. We leave this alone when dying by signal, to leave
|
|
|
|
* the value intact in a core dump, and to save the unnecessary
|
|
|
|
* trouble otherwise. Userland only wants this done for a sys_exit.
|
|
|
|
*/
|
|
|
|
if (tsk->clear_child_tid
|
|
|
|
&& !(tsk->flags & PF_SIGNALED)
|
|
|
|
&& atomic_read(&mm->mm_users) > 1) {
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 __user * tidptr = tsk->clear_child_tid;
|
|
|
|
tsk->clear_child_tid = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't check the error code - if userspace has
|
|
|
|
* not set up a proper pointer then tough luck.
|
|
|
|
*/
|
|
|
|
put_user(0, tidptr);
|
|
|
|
sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-08 04:59:01 +08:00
|
|
|
/*
|
|
|
|
* Allocate a new mm structure and copy contents from the
|
|
|
|
* mm structure of the passed in task structure.
|
|
|
|
*/
|
2008-03-26 01:47:10 +08:00
|
|
|
struct mm_struct *dup_mm(struct task_struct *tsk)
|
2006-02-08 04:59:01 +08:00
|
|
|
{
|
|
|
|
struct mm_struct *mm, *oldmm = current->mm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!oldmm)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mm = allocate_mm();
|
|
|
|
if (!mm)
|
|
|
|
goto fail_nomem;
|
|
|
|
|
|
|
|
memcpy(mm, oldmm, sizeof(*mm));
|
|
|
|
|
2006-12-07 12:31:57 +08:00
|
|
|
/* Initializing for Swap token stuff */
|
|
|
|
mm->token_priority = 0;
|
|
|
|
mm->last_interval = 0;
|
|
|
|
|
2008-02-07 16:13:51 +08:00
|
|
|
if (!mm_init(mm, tsk))
|
2006-02-08 04:59:01 +08:00
|
|
|
goto fail_nomem;
|
|
|
|
|
|
|
|
if (init_new_context(tsk, mm))
|
|
|
|
goto fail_nocontext;
|
|
|
|
|
2008-04-29 16:01:36 +08:00
|
|
|
dup_mm_exe_file(oldmm, mm);
|
|
|
|
|
2006-02-08 04:59:01 +08:00
|
|
|
err = dup_mmap(mm, oldmm);
|
|
|
|
if (err)
|
|
|
|
goto free_pt;
|
|
|
|
|
|
|
|
mm->hiwater_rss = get_mm_rss(mm);
|
|
|
|
mm->hiwater_vm = mm->total_vm;
|
|
|
|
|
|
|
|
return mm;
|
|
|
|
|
|
|
|
free_pt:
|
|
|
|
mmput(mm);
|
|
|
|
|
|
|
|
fail_nomem:
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
fail_nocontext:
|
|
|
|
/*
|
|
|
|
* If init_new_context() failed, we cannot use mmput() to free the mm
|
|
|
|
* because it calls destroy_context()
|
|
|
|
*/
|
|
|
|
mm_free_pgd(mm);
|
|
|
|
free_mm(mm);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
|
|
|
|
{
|
|
|
|
struct mm_struct * mm, *oldmm;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
tsk->min_flt = tsk->maj_flt = 0;
|
|
|
|
tsk->nvcsw = tsk->nivcsw = 0;
|
|
|
|
|
|
|
|
tsk->mm = NULL;
|
|
|
|
tsk->active_mm = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we cloning a kernel thread?
|
|
|
|
*
|
|
|
|
* We need to steal a active VM for that..
|
|
|
|
*/
|
|
|
|
oldmm = current->mm;
|
|
|
|
if (!oldmm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (clone_flags & CLONE_VM) {
|
|
|
|
atomic_inc(&oldmm->mm_users);
|
|
|
|
mm = oldmm;
|
|
|
|
goto good_mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = -ENOMEM;
|
2006-02-08 04:59:01 +08:00
|
|
|
mm = dup_mm(tsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!mm)
|
|
|
|
goto fail_nomem;
|
|
|
|
|
|
|
|
good_mm:
|
2006-12-07 12:31:57 +08:00
|
|
|
/* Initializing for Swap token stuff */
|
|
|
|
mm->token_priority = 0;
|
|
|
|
mm->last_interval = 0;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
tsk->mm = mm;
|
|
|
|
tsk->active_mm = mm;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_nomem:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
|
|
|
|
/* We don't need to lock fs - think why ;-) */
|
|
|
|
if (fs) {
|
|
|
|
atomic_set(&fs->count, 1);
|
|
|
|
rwlock_init(&fs->lock);
|
|
|
|
fs->umask = old->umask;
|
|
|
|
read_lock(&old->lock);
|
2008-02-15 11:34:38 +08:00
|
|
|
fs->root = old->root;
|
|
|
|
path_get(&old->root);
|
|
|
|
fs->pwd = old->pwd;
|
|
|
|
path_get(&old->pwd);
|
2005-04-17 06:20:36 +08:00
|
|
|
read_unlock(&old->lock);
|
|
|
|
}
|
|
|
|
return fs;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
|
|
|
{
|
|
|
|
return __copy_fs_struct(old);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(copy_fs_struct);
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (clone_flags & CLONE_FS) {
|
|
|
|
atomic_inc(¤t->fs->count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
tsk->fs = __copy_fs_struct(current->fs);
|
|
|
|
if (!tsk->fs)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-02-08 04:59:02 +08:00
|
|
|
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
|
|
|
|
{
|
|
|
|
struct files_struct *oldf, *newf;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A background process may not have any files ...
|
|
|
|
*/
|
|
|
|
oldf = current->files;
|
|
|
|
if (!oldf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (clone_flags & CLONE_FILES) {
|
|
|
|
atomic_inc(&oldf->count);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
newf = dup_fd(oldf, &error);
|
|
|
|
if (!newf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tsk->files = newf;
|
|
|
|
error = 0;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2008-01-24 15:54:47 +08:00
|
|
|
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
2008-01-24 15:52:45 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
struct io_context *ioc = current->io_context;
|
|
|
|
|
|
|
|
if (!ioc)
|
|
|
|
return 0;
|
2008-01-24 15:54:47 +08:00
|
|
|
/*
|
|
|
|
* Share io context with parent, if CLONE_IO is set
|
|
|
|
*/
|
|
|
|
if (clone_flags & CLONE_IO) {
|
|
|
|
tsk->io_context = ioc_task_link(ioc);
|
|
|
|
if (unlikely(!tsk->io_context))
|
|
|
|
return -ENOMEM;
|
|
|
|
} else if (ioprio_valid(ioc->ioprio)) {
|
2008-01-24 15:52:45 +08:00
|
|
|
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
|
|
|
|
if (unlikely(!tsk->io_context))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
tsk->io_context->ioprio = ioc->ioprio;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sighand_struct *sig;
|
|
|
|
|
2009-01-07 06:40:46 +08:00
|
|
|
if (clone_flags & CLONE_SIGHAND) {
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_inc(¤t->sighand->count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
|
2006-01-08 17:01:37 +08:00
|
|
|
rcu_assign_pointer(tsk->sighand, sig);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sig)
|
|
|
|
return -ENOMEM;
|
|
|
|
atomic_set(&sig->count, 1);
|
|
|
|
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-29 08:11:27 +08:00
|
|
|
void __cleanup_sighand(struct sighand_struct *sighand)
|
2006-03-29 08:11:17 +08:00
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&sighand->count))
|
|
|
|
kmem_cache_free(sighand_cachep, sighand);
|
|
|
|
}
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-13 00:54:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize POSIX timer handling for a thread group.
|
|
|
|
*/
|
|
|
|
static void posix_cpu_timers_init_group(struct signal_struct *sig)
|
|
|
|
{
|
|
|
|
/* Thread group counters. */
|
|
|
|
thread_group_cputime_init(sig);
|
|
|
|
|
|
|
|
/* Expiration times and increments. */
|
|
|
|
sig->it_virt_expires = cputime_zero;
|
|
|
|
sig->it_virt_incr = cputime_zero;
|
|
|
|
sig->it_prof_expires = cputime_zero;
|
|
|
|
sig->it_prof_incr = cputime_zero;
|
|
|
|
|
|
|
|
/* Cached expiration times. */
|
|
|
|
sig->cputime_expires.prof_exp = cputime_zero;
|
|
|
|
sig->cputime_expires.virt_exp = cputime_zero;
|
|
|
|
sig->cputime_expires.sched_exp = 0;
|
|
|
|
|
|
|
|
/* The timer lists. */
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[0]);
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[1]);
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[2]);
|
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct signal_struct *sig;
|
|
|
|
|
|
|
|
if (clone_flags & CLONE_THREAD) {
|
2008-11-25 00:06:57 +08:00
|
|
|
atomic_inc(¤t->signal->count);
|
|
|
|
atomic_inc(¤t->signal->live);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
|
2008-11-25 00:06:57 +08:00
|
|
|
|
|
|
|
if (sig)
|
|
|
|
posix_cpu_timers_init_group(sig);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
tsk->signal = sig;
|
|
|
|
if (!sig)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
atomic_set(&sig->count, 1);
|
|
|
|
atomic_set(&sig->live, 1);
|
|
|
|
init_waitqueue_head(&sig->wait_chldexit);
|
|
|
|
sig->flags = 0;
|
|
|
|
sig->group_exit_code = 0;
|
|
|
|
sig->group_exit_task = NULL;
|
|
|
|
sig->group_stop_count = 0;
|
2008-04-30 15:52:52 +08:00
|
|
|
sig->curr_target = tsk;
|
2005-04-17 06:20:36 +08:00
|
|
|
init_sigpending(&sig->shared_pending);
|
|
|
|
INIT_LIST_HEAD(&sig->posix_timers);
|
|
|
|
|
2007-02-16 17:27:49 +08:00
|
|
|
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
2006-01-10 12:52:34 +08:00
|
|
|
sig->it_real_incr.tv64 = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
sig->real_timer.function = it_real_fn;
|
|
|
|
|
|
|
|
sig->leader = 0; /* session leadership doesn't inherit */
|
2007-02-12 16:53:00 +08:00
|
|
|
sig->tty_old_pgrp = NULL;
|
2008-10-13 17:37:26 +08:00
|
|
|
sig->tty = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-13 00:54:39 +08:00
|
|
|
sig->cutime = sig->cstime = cputime_zero;
|
2007-10-15 23:00:19 +08:00
|
|
|
sig->gtime = cputime_zero;
|
|
|
|
sig->cgtime = cputime_zero;
|
2005-04-17 06:20:36 +08:00
|
|
|
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
|
|
|
|
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
|
2007-05-11 13:22:37 +08:00
|
|
|
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
|
2008-07-27 23:29:15 +08:00
|
|
|
task_io_accounting_init(&sig->ioac);
|
2006-07-14 15:24:44 +08:00
|
|
|
taskstats_tgid_init(sig);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
task_lock(current->group_leader);
|
|
|
|
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
|
|
|
|
task_unlock(current->group_leader);
|
|
|
|
|
2006-06-25 20:49:24 +08:00
|
|
|
acct_init_pacct(&sig->pacct);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
Audit: add TTY input auditing
Add TTY input auditing, used to audit system administrator's actions. This is
required by various security standards such as DCID 6/3 and PCI to provide
non-repudiation of administrator's actions and to allow a review of past
actions if the administrator seems to overstep their duties or if the system
becomes misconfigured for unknown reasons. These requirements do not make it
necessary to audit TTY output as well.
Compared to an user-space keylogger, this approach records TTY input using the
audit subsystem, correlated with other audit events, and it is completely
transparent to the user-space application (e.g. the console ioctls still
work).
TTY input auditing works on a higher level than auditing all system calls
within the session, which would produce an overwhelming amount of mostly
useless audit events.
Add an "audit_tty" attribute, inherited across fork (). Data read from TTYs
by process with the attribute is sent to the audit subsystem by the kernel.
The audit netlink interface is extended to allow modifying the audit_tty
attribute, and to allow sending explanatory audit events from user-space (for
example, a shell might send an event containing the final command, after the
interactive command-line editing and history expansion is performed, which
might be difficult to decipher from the TTY input alone).
Because the "audit_tty" attribute is inherited across fork (), it would be set
e.g. for sshd restarted within an audited session. To prevent this, the
audit_tty attribute is cleared when a process with no open TTY file
descriptors (e.g. after daemon startup) opens a TTY.
See https://www.redhat.com/archives/linux-audit/2007-June/msg00000.html for a
more detailed rationale document for an older version of this patch.
[akpm@linux-foundation.org: build fix]
Signed-off-by: Miloslav Trmac <mitr@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Paul Fulghum <paulkf@microgate.com>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 14:40:56 +08:00
|
|
|
tty_audit_fork(sig);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-29 08:11:16 +08:00
|
|
|
void __cleanup_signal(struct signal_struct *sig)
|
|
|
|
{
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-13 00:54:39 +08:00
|
|
|
thread_group_cputime_free(sig);
|
2008-10-13 17:37:26 +08:00
|
|
|
tty_kref_put(sig->tty);
|
2006-03-29 08:11:16 +08:00
|
|
|
kmem_cache_free(signal_cachep, sig);
|
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static void cleanup_signal(struct task_struct *tsk)
|
2006-03-29 08:11:16 +08:00
|
|
|
{
|
|
|
|
struct signal_struct *sig = tsk->signal;
|
|
|
|
|
|
|
|
atomic_dec(&sig->live);
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&sig->count))
|
|
|
|
__cleanup_signal(sig);
|
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long new_flags = p->flags;
|
|
|
|
|
2007-07-17 19:03:35 +08:00
|
|
|
new_flags &= ~PF_SUPERPRIV;
|
2005-04-17 06:20:36 +08:00
|
|
|
new_flags |= PF_FORKNOEXEC;
|
2008-07-26 10:45:47 +08:00
|
|
|
new_flags |= PF_STARTING;
|
2005-04-17 06:20:36 +08:00
|
|
|
p->flags = new_flags;
|
2007-10-18 18:04:45 +08:00
|
|
|
clear_freeze_flag(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-14 21:14:10 +08:00
|
|
|
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
current->clear_child_tid = tidptr;
|
|
|
|
|
2007-10-19 14:40:14 +08:00
|
|
|
return task_pid_vnr(current);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-19 14:41:10 +08:00
|
|
|
static void rt_mutex_init_task(struct task_struct *p)
|
2006-06-27 17:54:53 +08:00
|
|
|
{
|
|
|
|
spin_lock_init(&p->pi_lock);
|
2007-03-17 05:38:34 +08:00
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
2006-06-27 17:54:53 +08:00
|
|
|
plist_head_init(&p->pi_waiters, &p->pi_lock);
|
|
|
|
p->pi_blocked_on = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
cgroups: add an owner to the mm_struct
Remove the mem_cgroup member from mm_struct and instead adds an owner.
This approach was suggested by Paul Menage. The advantage of this approach
is that, once the mm->owner is known, using the subsystem id, the cgroup
can be determined. It also allows several control groups that are
virtually grouped by mm_struct, to exist independent of the memory
controller i.e., without adding mem_cgroup's for each controller, to
mm_struct.
A new config option CONFIG_MM_OWNER is added and the memory resource
controller selects this config option.
This patch also adds cgroup callbacks to notify subsystems when mm->owner
changes. The mm_cgroup_changed callback is called with the task_lock() of
the new task held and is called just prior to changing the mm->owner.
I am indebted to Paul Menage for the several reviews of this patchset and
helping me make it lighter and simpler.
This patch was tested on a powerpc box, it was compiled with both the
MM_OWNER config turned on and off.
After the thread group leader exits, it's moved to init_css_state by
cgroup_exit(), thus all future charges from runnings threads would be
redirected to the init_css_set's subsystem.
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Sudhir Kumar <skumar@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: David Rientjes <rientjes@google.com>,
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Reviewed-by: Paul Menage <menage@google.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 16:00:16 +08:00
|
|
|
#ifdef CONFIG_MM_OWNER
|
|
|
|
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
|
|
|
{
|
|
|
|
mm->owner = p;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MM_OWNER */
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-13 00:54:39 +08:00
|
|
|
/*
|
|
|
|
* Initialize POSIX timer handling for a single task.
|
|
|
|
*/
|
|
|
|
static void posix_cpu_timers_init(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
tsk->cputime_expires.prof_exp = cputime_zero;
|
|
|
|
tsk->cputime_expires.virt_exp = cputime_zero;
|
|
|
|
tsk->cputime_expires.sched_exp = 0;
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This creates a new process as a copy of the old one,
|
|
|
|
* but does not actually start it yet.
|
|
|
|
*
|
|
|
|
* It copies the registers, and all the appropriate
|
|
|
|
* parts of the process environment (as per the clone
|
|
|
|
* flags). The actual kick-off is left to the caller.
|
|
|
|
*/
|
2006-07-03 15:25:41 +08:00
|
|
|
static struct task_struct *copy_process(unsigned long clone_flags,
|
|
|
|
unsigned long stack_start,
|
|
|
|
struct pt_regs *regs,
|
|
|
|
unsigned long stack_size,
|
|
|
|
int __user *child_tidptr,
|
2008-07-26 10:45:47 +08:00
|
|
|
struct pid *pid,
|
|
|
|
int trace)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int retval;
|
2007-10-19 14:41:09 +08:00
|
|
|
struct task_struct *p;
|
2007-10-19 14:39:33 +08:00
|
|
|
int cgroup_callbacks_done = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread groups must share signals as well, and detached threads
|
|
|
|
* can only be started up within the thread group.
|
|
|
|
*/
|
|
|
|
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shared signal handlers imply shared VM. By way of the above,
|
|
|
|
* thread groups also imply shared VM. Blocking this case allows
|
|
|
|
* for various simplifications in other code.
|
|
|
|
*/
|
|
|
|
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
retval = security_task_create(clone_flags);
|
|
|
|
if (retval)
|
|
|
|
goto fork_out;
|
|
|
|
|
|
|
|
retval = -ENOMEM;
|
|
|
|
p = dup_task_struct(current);
|
|
|
|
if (!p)
|
|
|
|
goto fork_out;
|
|
|
|
|
2006-10-17 15:10:33 +08:00
|
|
|
rt_mutex_init_task(p);
|
|
|
|
|
2008-07-14 18:09:28 +08:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
2006-07-03 15:24:42 +08:00
|
|
|
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
|
|
|
|
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EAGAIN;
|
2008-11-14 07:39:26 +08:00
|
|
|
if (atomic_read(&p->real_cred->user->processes) >=
|
2005-04-17 06:20:36 +08:00
|
|
|
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
|
|
|
|
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
|
2008-10-16 05:38:45 +08:00
|
|
|
p->real_cred->user != INIT_USER)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto bad_fork_free;
|
|
|
|
}
|
|
|
|
|
2008-11-14 07:39:17 +08:00
|
|
|
retval = copy_creds(p, clone_flags);
|
|
|
|
if (retval < 0)
|
|
|
|
goto bad_fork_free;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If multiple threads are within copy_process(), then this check
|
|
|
|
* triggers too late. This doesn't hurt, the check is only there
|
|
|
|
* to stop root fork bombs.
|
|
|
|
*/
|
2009-02-06 16:17:19 +08:00
|
|
|
retval = -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (nr_threads >= max_threads)
|
|
|
|
goto bad_fork_cleanup_count;
|
|
|
|
|
2005-11-14 08:06:55 +08:00
|
|
|
if (!try_module_get(task_thread_info(p)->exec_domain->module))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto bad_fork_cleanup_count;
|
|
|
|
|
|
|
|
if (p->binfmt && !try_module_get(p->binfmt->module))
|
|
|
|
goto bad_fork_cleanup_put_domain;
|
|
|
|
|
|
|
|
p->did_exec = 0;
|
2006-07-14 15:24:36 +08:00
|
|
|
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
|
2005-04-17 06:20:36 +08:00
|
|
|
copy_flags(clone_flags, p);
|
|
|
|
INIT_LIST_HEAD(&p->children);
|
|
|
|
INIT_LIST_HEAD(&p->sibling);
|
2008-01-26 04:08:24 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_RCU
|
|
|
|
p->rcu_read_lock_nesting = 0;
|
|
|
|
p->rcu_flipctr_idx = 0;
|
|
|
|
#endif /* #ifdef CONFIG_PREEMPT_RCU */
|
2005-04-17 06:20:36 +08:00
|
|
|
p->vfork_done = NULL;
|
|
|
|
spin_lock_init(&p->alloc_lock);
|
|
|
|
|
|
|
|
clear_tsk_thread_flag(p, TIF_SIGPENDING);
|
|
|
|
init_sigpending(&p->pending);
|
|
|
|
|
|
|
|
p->utime = cputime_zero;
|
|
|
|
p->stime = cputime_zero;
|
2007-10-15 23:00:19 +08:00
|
|
|
p->gtime = cputime_zero;
|
2007-10-18 18:06:34 +08:00
|
|
|
p->utimescaled = cputime_zero;
|
|
|
|
p->stimescaled = cputime_zero;
|
2007-10-30 04:18:11 +08:00
|
|
|
p->prev_utime = cputime_zero;
|
2007-10-30 07:26:32 +08:00
|
|
|
p->prev_stime = cputime_zero;
|
2007-07-10 00:52:00 +08:00
|
|
|
|
2008-09-02 06:52:40 +08:00
|
|
|
p->default_timer_slack_ns = current->timer_slack_ns;
|
|
|
|
|
2008-01-26 04:08:02 +08:00
|
|
|
#ifdef CONFIG_DETECT_SOFTLOCKUP
|
|
|
|
p->last_switch_count = 0;
|
|
|
|
p->last_switch_timestamp = 0;
|
|
|
|
#endif
|
|
|
|
|
2008-07-27 23:29:15 +08:00
|
|
|
task_io_accounting_init(&p->ioac);
|
2005-04-17 06:20:36 +08:00
|
|
|
acct_clear_integrals(p);
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-13 00:54:39 +08:00
|
|
|
posix_cpu_timers_init(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
p->lock_depth = -1; /* -1 = no lock */
|
|
|
|
do_posix_clock_monotonic_gettime(&p->start_time);
|
2007-07-16 14:39:42 +08:00
|
|
|
p->real_start_time = p->start_time;
|
|
|
|
monotonic_to_bootbased(&p->real_start_time);
|
2005-04-17 06:20:36 +08:00
|
|
|
p->io_context = NULL;
|
|
|
|
p->audit_context = NULL;
|
2007-10-19 14:39:33 +08:00
|
|
|
cgroup_fork(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-04-28 17:13:09 +08:00
|
|
|
p->mempolicy = mpol_dup(p->mempolicy);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(p->mempolicy)) {
|
|
|
|
retval = PTR_ERR(p->mempolicy);
|
|
|
|
p->mempolicy = NULL;
|
2007-10-19 14:39:33 +08:00
|
|
|
goto bad_fork_cleanup_cgroup;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-03-24 19:16:08 +08:00
|
|
|
mpol_fix_fork_child_flag(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2006-07-03 15:24:42 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
p->irq_events = 0;
|
2006-08-27 19:26:34 +08:00
|
|
|
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
|
|
p->hardirqs_enabled = 1;
|
|
|
|
#else
|
2006-07-03 15:24:42 +08:00
|
|
|
p->hardirqs_enabled = 0;
|
2006-08-27 19:26:34 +08:00
|
|
|
#endif
|
2006-07-03 15:24:42 +08:00
|
|
|
p->hardirq_enable_ip = 0;
|
|
|
|
p->hardirq_enable_event = 0;
|
|
|
|
p->hardirq_disable_ip = _THIS_IP_;
|
|
|
|
p->hardirq_disable_event = 0;
|
|
|
|
p->softirqs_enabled = 1;
|
|
|
|
p->softirq_enable_ip = _THIS_IP_;
|
|
|
|
p->softirq_enable_event = 0;
|
|
|
|
p->softirq_disable_ip = 0;
|
|
|
|
p->softirq_disable_event = 0;
|
|
|
|
p->hardirq_context = 0;
|
|
|
|
p->softirq_context = 0;
|
|
|
|
#endif
|
[PATCH] lockdep: core
Do 'make oldconfig' and accept all the defaults for new config options -
reboot into the kernel and if everything goes well it should boot up fine and
you should have /proc/lockdep and /proc/lockdep_stats files.
Typically if the lock validator finds some problem it will print out
voluminous debug output that begins with "BUG: ..." and which syslog output
can be used by kernel developers to figure out the precise locking scenario.
What does the lock validator do? It "observes" and maps all locking rules as
they occur dynamically (as triggered by the kernel's natural use of spinlocks,
rwlocks, mutexes and rwsems). Whenever the lock validator subsystem detects a
new locking scenario, it validates this new rule against the existing set of
rules. If this new rule is consistent with the existing set of rules then the
new rule is added transparently and the kernel continues as normal. If the
new rule could create a deadlock scenario then this condition is printed out.
When determining validity of locking, all possible "deadlock scenarios" are
considered: assuming arbitrary number of CPUs, arbitrary irq context and task
context constellations, running arbitrary combinations of all the existing
locking scenarios. In a typical system this means millions of separate
scenarios. This is why we call it a "locking correctness" validator - for all
rules that are observed the lock validator proves it with mathematical
certainty that a deadlock could not occur (assuming that the lock validator
implementation itself is correct and its internal data structures are not
corrupted by some other kernel subsystem). [see more details and conditionals
of this statement in include/linux/lockdep.h and
Documentation/lockdep-design.txt]
Furthermore, this "all possible scenarios" property of the validator also
enables the finding of complex, highly unlikely multi-CPU multi-context races
via single single-context rules, increasing the likelyhood of finding bugs
drastically. In practical terms: the lock validator already found a bug in
the upstream kernel that could only occur on systems with 3 or more CPUs, and
which needed 3 very unlikely code sequences to occur at once on the 3 CPUs.
That bug was found and reported on a single-CPU system (!). So in essence a
race will be found "piecemail-wise", triggering all the necessary components
for the race, without having to reproduce the race scenario itself! In its
short existence the lock validator found and reported many bugs before they
actually caused a real deadlock.
To further increase the efficiency of the validator, the mapping is not per
"lock instance", but per "lock-class". For example, all struct inode objects
in the kernel have inode->inotify_mutex. If there are 10,000 inodes cached,
then there are 10,000 lock objects. But ->inotify_mutex is a single "lock
type", and all locking activities that occur against ->inotify_mutex are
"unified" into this single lock-class. The advantage of the lock-class
approach is that all historical ->inotify_mutex uses are mapped into a single
(and as narrow as possible) set of locking rules - regardless of how many
different tasks or inode structures it took to build this set of rules. The
set of rules persist during the lifetime of the kernel.
To see the rough magnitude of checking that the lock validator does, here's a
portion of /proc/lockdep_stats, fresh after bootup:
lock-classes: 694 [max: 2048]
direct dependencies: 1598 [max: 8192]
indirect dependencies: 17896
all direct dependencies: 16206
dependency chains: 1910 [max: 8192]
in-hardirq chains: 17
in-softirq chains: 105
in-process chains: 1065
stack-trace entries: 38761 [max: 131072]
combined max dependencies: 2033928
hardirq-safe locks: 24
hardirq-unsafe locks: 176
softirq-safe locks: 53
softirq-unsafe locks: 137
irq-safe locks: 59
irq-unsafe locks: 176
The lock validator has observed 1598 actual single-thread locking patterns,
and has validated all possible 2033928 distinct locking scenarios.
More details about the design of the lock validator can be found in
Documentation/lockdep-design.txt, which can also found at:
http://redhat.com/~mingo/lockdep-patches/lockdep-design.txt
[bunk@stusta.de: cleanups]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-03 15:24:50 +08:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
p->lockdep_depth = 0; /* no locks held yet */
|
|
|
|
p->curr_chain_key = 0;
|
|
|
|
p->lockdep_recursion = 0;
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-10 07:59:20 +08:00
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
|
p->blocked_on = NULL; /* not blocked yet */
|
|
|
|
#endif
|
2008-12-19 22:10:24 +08:00
|
|
|
if (unlikely(ptrace_reparented(current)))
|
|
|
|
ptrace_fork(p, clone_flags);
|
2006-01-10 07:59:20 +08:00
|
|
|
|
sched: fix copy_namespace() <-> sched_fork() dependency in do_fork
Sukadev Bhattiprolu reported a kernel crash with control groups.
There are couple of problems discovered by Suka's test:
- The test requires the cgroup filesystem to be mounted with
atleast the cpu and ns options (i.e both namespace and cpu
controllers are active in the same hierarchy).
# mkdir /dev/cpuctl
# mount -t cgroup -ocpu,ns none cpuctl
(or simply)
# mount -t cgroup none cpuctl -> Will activate all controllers
in same hierarchy.
- The test invokes clone() with CLONE_NEWNS set. This causes a a new child
to be created, also a new group (do_fork->copy_namespaces->ns_cgroup_clone->
cgroup_clone) and the child is attached to the new group (cgroup_clone->
attach_task->sched_move_task). At this point in time, the child's scheduler
related fields are uninitialized (including its on_rq field, which it has
inherited from parent). As a result sched_move_task thinks its on
runqueue, when it isn't.
As a solution to this problem, I moved sched_fork() call, which
initializes scheduler related fields on a new task, before
copy_namespaces(). I am not sure though whether moving up will
cause other side-effects. Do you see any issue?
- The second problem exposed by this test is that task_new_fair()
assumes that parent and child will be part of the same group (which
needn't be as this test shows). As a result, cfs_rq->curr can be NULL
for the child.
The solution is to test for curr pointer being NULL in
task_new_fair().
With the patch below, I could run ns_exec() fine w/o a crash.
Reported-by: Sukadev Bhattiprolu <sukadev@us.ibm.com>
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2007-11-10 05:39:39 +08:00
|
|
|
/* Perform scheduler related setup. Assign this task to a CPU. */
|
|
|
|
sched_fork(p, clone_flags);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((retval = audit_alloc(p)))
|
2008-11-14 07:39:17 +08:00
|
|
|
goto bad_fork_cleanup_policy;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* copy all the process information */
|
|
|
|
if ((retval = copy_semundo(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_audit;
|
|
|
|
if ((retval = copy_files(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_semundo;
|
|
|
|
if ((retval = copy_fs(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_files;
|
|
|
|
if ((retval = copy_sighand(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_fs;
|
|
|
|
if ((retval = copy_signal(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_sighand;
|
|
|
|
if ((retval = copy_mm(clone_flags, p)))
|
|
|
|
goto bad_fork_cleanup_signal;
|
2006-10-02 17:18:06 +08:00
|
|
|
if ((retval = copy_namespaces(clone_flags, p)))
|
CRED: Inaugurate COW credentials
Inaugurate copy-on-write credentials management. This uses RCU to manage the
credentials pointer in the task_struct with respect to accesses by other tasks.
A process may only modify its own credentials, and so does not need locking to
access or modify its own credentials.
A mutex (cred_replace_mutex) is added to the task_struct to control the effect
of PTRACE_ATTACHED on credential calculations, particularly with respect to
execve().
With this patch, the contents of an active credentials struct may not be
changed directly; rather a new set of credentials must be prepared, modified
and committed using something like the following sequence of events:
struct cred *new = prepare_creds();
int ret = blah(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
There are some exceptions to this rule: the keyrings pointed to by the active
credentials may be instantiated - keyrings violate the COW rule as managing
COW keyrings is tricky, given that it is possible for a task to directly alter
the keys in a keyring in use by another task.
To help enforce this, various pointers to sets of credentials, such as those in
the task_struct, are declared const. The purpose of this is compile-time
discouragement of altering credentials through those pointers. Once a set of
credentials has been made public through one of these pointers, it may not be
modified, except under special circumstances:
(1) Its reference count may incremented and decremented.
(2) The keyrings to which it points may be modified, but not replaced.
The only safe way to modify anything else is to create a replacement and commit
using the functions described in Documentation/credentials.txt (which will be
added by a later patch).
This patch and the preceding patches have been tested with the LTP SELinux
testsuite.
This patch makes several logical sets of alteration:
(1) execve().
This now prepares and commits credentials in various places in the
security code rather than altering the current creds directly.
(2) Temporary credential overrides.
do_coredump() and sys_faccessat() now prepare their own credentials and
temporarily override the ones currently on the acting thread, whilst
preventing interference from other threads by holding cred_replace_mutex
on the thread being dumped.
This will be replaced in a future patch by something that hands down the
credentials directly to the functions being called, rather than altering
the task's objective credentials.
(3) LSM interface.
A number of functions have been changed, added or removed:
(*) security_capset_check(), ->capset_check()
(*) security_capset_set(), ->capset_set()
Removed in favour of security_capset().
(*) security_capset(), ->capset()
New. This is passed a pointer to the new creds, a pointer to the old
creds and the proposed capability sets. It should fill in the new
creds or return an error. All pointers, barring the pointer to the
new creds, are now const.
(*) security_bprm_apply_creds(), ->bprm_apply_creds()
Changed; now returns a value, which will cause the process to be
killed if it's an error.
(*) security_task_alloc(), ->task_alloc_security()
Removed in favour of security_prepare_creds().
(*) security_cred_free(), ->cred_free()
New. Free security data attached to cred->security.
(*) security_prepare_creds(), ->cred_prepare()
New. Duplicate any security data attached to cred->security.
(*) security_commit_creds(), ->cred_commit()
New. Apply any security effects for the upcoming installation of new
security by commit_creds().
(*) security_task_post_setuid(), ->task_post_setuid()
Removed in favour of security_task_fix_setuid().
(*) security_task_fix_setuid(), ->task_fix_setuid()
Fix up the proposed new credentials for setuid(). This is used by
cap_set_fix_setuid() to implicitly adjust capabilities in line with
setuid() changes. Changes are made to the new credentials, rather
than the task itself as in security_task_post_setuid().
(*) security_task_reparent_to_init(), ->task_reparent_to_init()
Removed. Instead the task being reparented to init is referred
directly to init's credentials.
NOTE! This results in the loss of some state: SELinux's osid no
longer records the sid of the thread that forked it.
(*) security_key_alloc(), ->key_alloc()
(*) security_key_permission(), ->key_permission()
Changed. These now take cred pointers rather than task pointers to
refer to the security context.
(4) sys_capset().
This has been simplified and uses less locking. The LSM functions it
calls have been merged.
(5) reparent_to_kthreadd().
This gives the current thread the same credentials as init by simply using
commit_thread() to point that way.
(6) __sigqueue_alloc() and switch_uid()
__sigqueue_alloc() can't stop the target task from changing its creds
beneath it, so this function gets a reference to the currently applicable
user_struct which it then passes into the sigqueue struct it returns if
successful.
switch_uid() is now called from commit_creds(), and possibly should be
folded into that. commit_creds() should take care of protecting
__sigqueue_alloc().
(7) [sg]et[ug]id() and co and [sg]et_current_groups.
The set functions now all use prepare_creds(), commit_creds() and
abort_creds() to build and check a new set of credentials before applying
it.
security_task_set[ug]id() is called inside the prepared section. This
guarantees that nothing else will affect the creds until we've finished.
The calling of set_dumpable() has been moved into commit_creds().
Much of the functionality of set_user() has been moved into
commit_creds().
The get functions all simply access the data directly.
(8) security_task_prctl() and cap_task_prctl().
security_task_prctl() has been modified to return -ENOSYS if it doesn't
want to handle a function, or otherwise return the return value directly
rather than through an argument.
Additionally, cap_task_prctl() now prepares a new set of credentials, even
if it doesn't end up using it.
(9) Keyrings.
A number of changes have been made to the keyrings code:
(a) switch_uid_keyring(), copy_keys(), exit_keys() and suid_keys() have
all been dropped and built in to the credentials functions directly.
They may want separating out again later.
(b) key_alloc() and search_process_keyrings() now take a cred pointer
rather than a task pointer to specify the security context.
(c) copy_creds() gives a new thread within the same thread group a new
thread keyring if its parent had one, otherwise it discards the thread
keyring.
(d) The authorisation key now points directly to the credentials to extend
the search into rather pointing to the task that carries them.
(e) Installing thread, process or session keyrings causes a new set of
credentials to be created, even though it's not strictly necessary for
process or session keyrings (they're shared).
(10) Usermode helper.
The usermode helper code now carries a cred struct pointer in its
subprocess_info struct instead of a new session keyring pointer. This set
of credentials is derived from init_cred and installed on the new process
after it has been cloned.
call_usermodehelper_setup() allocates the new credentials and
call_usermodehelper_freeinfo() discards them if they haven't been used. A
special cred function (prepare_usermodeinfo_creds()) is provided
specifically for call_usermodehelper_setup() to call.
call_usermodehelper_setkeys() adjusts the credentials to sport the
supplied keyring as the new session keyring.
(11) SELinux.
SELinux has a number of changes, in addition to those to support the LSM
interface changes mentioned above:
(a) selinux_setprocattr() no longer does its check for whether the
current ptracer can access processes with the new SID inside the lock
that covers getting the ptracer's SID. Whilst this lock ensures that
the check is done with the ptracer pinned, the result is only valid
until the lock is released, so there's no point doing it inside the
lock.
(12) is_single_threaded().
This function has been extracted from selinux_setprocattr() and put into
a file of its own in the lib/ directory as join_session_keyring() now
wants to use it too.
The code in SELinux just checked to see whether a task shared mm_structs
with other tasks (CLONE_VM), but that isn't good enough. We really want
to know if they're part of the same thread group (CLONE_THREAD).
(13) nfsd.
The NFS server daemon now has to use the COW credentials to set the
credentials it is going to use. It really needs to pass the credentials
down to the functions it calls, but it can't do that until other patches
in this series have been applied.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: James Morris <jmorris@namei.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-11-14 07:39:23 +08:00
|
|
|
goto bad_fork_cleanup_mm;
|
2008-01-24 15:54:47 +08:00
|
|
|
if ((retval = copy_io(clone_flags, p)))
|
2008-01-24 15:52:45 +08:00
|
|
|
goto bad_fork_cleanup_namespaces;
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
|
|
|
|
if (retval)
|
2008-01-24 15:52:45 +08:00
|
|
|
goto bad_fork_cleanup_io;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-19 14:40:07 +08:00
|
|
|
if (pid != &init_struct_pid) {
|
|
|
|
retval = -ENOMEM;
|
2009-01-08 10:08:49 +08:00
|
|
|
pid = alloc_pid(p->nsproxy->pid_ns);
|
2007-10-19 14:40:07 +08:00
|
|
|
if (!pid)
|
2008-01-24 15:52:45 +08:00
|
|
|
goto bad_fork_cleanup_io;
|
2007-10-19 14:40:11 +08:00
|
|
|
|
|
|
|
if (clone_flags & CLONE_NEWPID) {
|
2009-01-08 10:08:49 +08:00
|
|
|
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
|
2007-10-19 14:40:11 +08:00
|
|
|
if (retval < 0)
|
|
|
|
goto bad_fork_free_pid;
|
|
|
|
}
|
2007-10-19 14:40:07 +08:00
|
|
|
}
|
|
|
|
|
2008-12-04 00:04:51 +08:00
|
|
|
ftrace_graph_init_task(p);
|
|
|
|
|
2007-10-19 14:40:07 +08:00
|
|
|
p->pid = pid_nr(pid);
|
|
|
|
p->tgid = p->pid;
|
|
|
|
if (clone_flags & CLONE_THREAD)
|
|
|
|
p->tgid = current->tgid;
|
|
|
|
|
2008-07-25 16:47:06 +08:00
|
|
|
if (current->nsproxy != p->nsproxy) {
|
|
|
|
retval = ns_cgroup_clone(p, pid);
|
|
|
|
if (retval)
|
2008-12-04 00:04:51 +08:00
|
|
|
goto bad_fork_free_graph;
|
2008-07-25 16:47:06 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
|
|
|
/*
|
|
|
|
* Clear TID on mm_release()?
|
|
|
|
*/
|
|
|
|
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
|
2007-10-17 14:27:30 +08:00
|
|
|
#ifdef CONFIG_FUTEX
|
2006-03-27 17:16:27 +08:00
|
|
|
p->robust_list = NULL;
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
p->compat_robust_list = NULL;
|
|
|
|
#endif
|
2006-06-27 17:54:58 +08:00
|
|
|
INIT_LIST_HEAD(&p->pi_state_list);
|
|
|
|
p->pi_state_cache = NULL;
|
2007-10-17 14:27:30 +08:00
|
|
|
#endif
|
[PATCH] Fix sigaltstack corruption among cloned threads
This patch fixes alternate signal stack corruption among cloned threads
with CLONE_SIGHAND (and CLONE_VM) for linux-2.6.16-rc6.
The value of alternate signal stack is currently inherited after a call of
clone(... CLONE_SIGHAND | CLONE_VM). But if sigaltstack is set by a
parent thread, and then if multiple cloned child threads (+ parent threads)
call signal handler at the same time, some threads may be conflicted -
because they share to use the same alternative signal stack region.
Finally they get sigsegv. It's an undesirable race condition. Note that
child threads created from NPTL pthread_create() also hit this conflict
when the parent thread uses sigaltstack, without my patch.
To fix this problem, this patch clears the child threads' sigaltstack
information like exec(). This behavior follows the SUSv3 specification.
In SUSv3, pthread_create() says "The alternate stack shall not be inherited
(when new threads are initialized)". It means that sigaltstack should be
cleared when sigaltstack memory space is shared by cloned threads with
CLONE_SIGHAND.
Note that I chose "if (clone_flags & CLONE_SIGHAND)" line because:
- If clone_flags line is not existed, fork() does not inherit sigaltstack.
- CLONE_VM is another choice, but vfork() does not inherit sigaltstack.
- CLONE_SIGHAND implies CLONE_VM, and it looks suitable.
- CLONE_THREAD is another candidate, and includes CLONE_SIGHAND + CLONE_VM,
but this flag has a bit different semantics.
I decided to use CLONE_SIGHAND.
[ Changed to test for CLONE_VM && !CLONE_VFORK after discussion --Linus ]
Signed-off-by: GOTO Masanori <gotom@sanori.org>
Cc: Roland McGrath <roland@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: Linus Torvalds <torvalds@osdl.org>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Jakub Jelinek <jakub@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-14 13:20:44 +08:00
|
|
|
/*
|
|
|
|
* sigaltstack should be cleared when sharing the same VM
|
|
|
|
*/
|
|
|
|
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
|
|
|
|
p->sas_ss_sp = p->sas_ss_size = 0;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Syscall tracing should be turned off in the child regardless
|
|
|
|
* of CLONE_PTRACE.
|
|
|
|
*/
|
|
|
|
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
|
[PATCH] UML Support - Ptrace: adds the host SYSEMU support, for UML and general usage
Jeff Dike <jdike@addtoit.com>,
Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>,
Bodo Stroesser <bstroesser@fujitsu-siemens.com>
Adds a new ptrace(2) mode, called PTRACE_SYSEMU, resembling PTRACE_SYSCALL
except that the kernel does not execute the requested syscall; this is useful
to improve performance for virtual environments, like UML, which want to run
the syscall on their own.
In fact, using PTRACE_SYSCALL means stopping child execution twice, on entry
and on exit, and each time you also have two context switches; with SYSEMU you
avoid the 2nd stop and so save two context switches per syscall.
Also, some architectures don't have support in the host for changing the
syscall number via ptrace(), which is currently needed to skip syscall
execution (UML turns any syscall into getpid() to avoid it being executed on
the host). Fixing that is hard, while SYSEMU is easier to implement.
* This version of the patch includes some suggestions of Jeff Dike to avoid
adding any instructions to the syscall fast path, plus some other little
changes, by myself, to make it work even when the syscall is executed with
SYSENTER (but I'm unsure about them). It has been widely tested for quite a
lot of time.
* Various fixed were included to handle the various switches between
various states, i.e. when for instance a syscall entry is traced with one of
PT_SYSCALL / _SYSEMU / _SINGLESTEP and another one is used on exit.
Basically, this is done by remembering which one of them was used even after
the call to ptrace_notify().
* We're combining TIF_SYSCALL_EMU with TIF_SYSCALL_TRACE or TIF_SINGLESTEP
to make do_syscall_trace() notice that the current syscall was started with
SYSEMU on entry, so that no notification ought to be done in the exit path;
this is a bit of a hack, so this problem is solved in another way in next
patches.
* Also, the effects of the patch:
"Ptrace - i386: fix Syscall Audit interaction with singlestep"
are cancelled; they are restored back in the last patch of this series.
Detailed descriptions of the patches doing this kind of processing follow (but
I've already summed everything up).
* Fix behaviour when changing interception kind #1.
In do_syscall_trace(), we check the status of the TIF_SYSCALL_EMU flag
only after doing the debugger notification; but the debugger might have
changed the status of this flag because he continued execution with
PTRACE_SYSCALL, so this is wrong. This patch fixes it by saving the flag
status before calling ptrace_notify().
* Fix behaviour when changing interception kind #2:
avoid intercepting syscall on return when using SYSCALL again.
A guest process switching from using PTRACE_SYSEMU to PTRACE_SYSCALL
crashes.
The problem is in arch/i386/kernel/entry.S. The current SYSEMU patch
inhibits the syscall-handler to be called, but does not prevent
do_syscall_trace() to be called after this for syscall completion
interception.
The appended patch fixes this. It reuses the flag TIF_SYSCALL_EMU to
remember "we come from PTRACE_SYSEMU and now are in PTRACE_SYSCALL", since
the flag is unused in the depicted situation.
* Fix behaviour when changing interception kind #3:
avoid intercepting syscall on return when using SINGLESTEP.
When testing 2.6.9 and the skas3.v6 patch, with my latest patch and had
problems with singlestepping on UML in SKAS with SYSEMU. It looped
receiving SIGTRAPs without moving forward. EIP of the traced process was
the same for all SIGTRAPs.
What's missing is to handle switching from PTRACE_SYSCALL_EMU to
PTRACE_SINGLESTEP in a way very similar to what is done for the change from
PTRACE_SYSCALL_EMU to PTRACE_SYSCALL_TRACE.
I.e., after calling ptrace(PTRACE_SYSEMU), on the return path, the debugger is
notified and then wake ups the process; the syscall is executed (or skipped,
when do_syscall_trace() returns 0, i.e. when using PTRACE_SYSEMU), and
do_syscall_trace() is called again. Since we are on the return path of a
SYSEMU'd syscall, if the wake up is performed through ptrace(PTRACE_SYSCALL),
we must still avoid notifying the parent of the syscall exit. Now, this
behaviour is extended even to resuming with PTRACE_SINGLESTEP.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-04 06:57:18 +08:00
|
|
|
#ifdef TIF_SYSCALL_EMU
|
|
|
|
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
|
|
|
|
#endif
|
2008-01-26 04:08:34 +08:00
|
|
|
clear_all_latency_tracing(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Our parent execution domain becomes current domain
|
|
|
|
These must match for thread signalling to apply */
|
|
|
|
p->parent_exec_id = p->self_exec_id;
|
|
|
|
|
|
|
|
/* ok, now we should be set up.. */
|
|
|
|
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
|
|
|
|
p->pdeath_signal = 0;
|
|
|
|
p->exit_state = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, make it visible to the rest of the system.
|
|
|
|
* We dont wake it up yet.
|
|
|
|
*/
|
|
|
|
p->group_leader = p;
|
2006-03-29 08:11:25 +08:00
|
|
|
INIT_LIST_HEAD(&p->thread_group);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-19 14:39:33 +08:00
|
|
|
/* Now that the task is set up, run cgroup callbacks if
|
|
|
|
* necessary. We need to run them before the task is visible
|
|
|
|
* on the tasklist. */
|
|
|
|
cgroup_fork_callbacks(p);
|
|
|
|
cgroup_callbacks_done = 1;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Need tasklist lock for parent etc handling! */
|
|
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
|
|
|
|
/*
|
2005-06-26 05:57:29 +08:00
|
|
|
* The task hasn't been attached yet, so its cpus_allowed mask will
|
|
|
|
* not be changed, nor will its assigned CPU.
|
|
|
|
*
|
|
|
|
* The cpus_allowed mask of the parent may have changed after it was
|
|
|
|
* copied first time - so re-copy it here, then check the child's CPU
|
|
|
|
* to ensure it is on a valid CPU (and if not, just force it back to
|
|
|
|
* parent's CPU). This avoids alot of nasty races.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
p->cpus_allowed = current->cpus_allowed;
|
2008-01-26 04:08:30 +08:00
|
|
|
p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
|
2005-09-17 10:27:40 +08:00
|
|
|
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
|
|
|
|
!cpu_online(task_cpu(p))))
|
2005-06-26 05:57:29 +08:00
|
|
|
set_task_cpu(p, smp_processor_id());
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* CLONE_PARENT re-uses the old parent */
|
|
|
|
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
|
|
|
|
p->real_parent = current->real_parent;
|
|
|
|
else
|
|
|
|
p->real_parent = current;
|
|
|
|
|
2006-02-16 03:13:24 +08:00
|
|
|
spin_lock(¤t->sighand->siglock);
|
2006-03-29 08:11:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Process group and session signals need to be delivered to just the
|
|
|
|
* parent before the fork or both the parent and the child after the
|
|
|
|
* fork. Restart if a signal comes in before we add the new process to
|
|
|
|
* it's process group.
|
|
|
|
* A fatal signal pending means that current will exit, so the new
|
|
|
|
* thread can't slip out of an OOM kill (or normal SIGKILL).
|
|
|
|
*/
|
2007-10-18 18:06:07 +08:00
|
|
|
recalc_sigpending();
|
2006-03-29 08:11:26 +08:00
|
|
|
if (signal_pending(current)) {
|
|
|
|
spin_unlock(¤t->sighand->siglock);
|
|
|
|
write_unlock_irq(&tasklist_lock);
|
|
|
|
retval = -ERESTARTNOINTR;
|
2008-12-04 00:04:51 +08:00
|
|
|
goto bad_fork_free_graph;
|
2006-03-29 08:11:26 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (clone_flags & CLONE_THREAD) {
|
|
|
|
p->group_leader = current->group_leader;
|
2006-03-29 08:11:25 +08:00
|
|
|
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-03-29 08:11:07 +08:00
|
|
|
if (likely(p->pid)) {
|
2008-03-25 09:36:23 +08:00
|
|
|
list_add_tail(&p->sibling, &p->real_parent->children);
|
2008-07-26 10:45:47 +08:00
|
|
|
tracehook_finish_clone(p, clone_flags, trace);
|
2006-03-29 08:11:07 +08:00
|
|
|
|
|
|
|
if (thread_group_leader(p)) {
|
2007-12-05 15:45:04 +08:00
|
|
|
if (clone_flags & CLONE_NEWPID)
|
2007-10-19 14:40:10 +08:00
|
|
|
p->nsproxy->pid_ns->child_reaper = p;
|
2006-03-29 08:11:07 +08:00
|
|
|
|
2008-02-08 20:19:19 +08:00
|
|
|
p->signal->leader_pid = pid;
|
2008-10-13 17:37:26 +08:00
|
|
|
tty_kref_put(p->signal->tty);
|
|
|
|
p->signal->tty = tty_kref_get(current->signal->tty);
|
2007-12-05 15:45:04 +08:00
|
|
|
set_task_pgrp(p, task_pgrp_nr(current));
|
|
|
|
set_task_session(p, task_session_nr(current));
|
|
|
|
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
|
|
|
|
attach_pid(p, PIDTYPE_SID, task_session(current));
|
2006-04-19 13:20:16 +08:00
|
|
|
list_add_tail_rcu(&p->tasks, &init_task.tasks);
|
2005-04-17 06:20:36 +08:00
|
|
|
__get_cpu_var(process_counts)++;
|
2006-03-29 08:11:07 +08:00
|
|
|
}
|
2007-05-11 13:23:03 +08:00
|
|
|
attach_pid(p, PIDTYPE_PID, pid);
|
2006-03-29 08:11:07 +08:00
|
|
|
nr_threads++;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
total_forks++;
|
2006-02-16 03:13:24 +08:00
|
|
|
spin_unlock(¤t->sighand->siglock);
|
2005-04-17 06:20:36 +08:00
|
|
|
write_unlock_irq(&tasklist_lock);
|
2005-11-29 05:43:48 +08:00
|
|
|
proc_fork_connector(p);
|
2007-10-19 14:39:36 +08:00
|
|
|
cgroup_post_fork(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
return p;
|
|
|
|
|
2008-12-04 00:04:51 +08:00
|
|
|
bad_fork_free_graph:
|
|
|
|
ftrace_graph_exit_task(p);
|
2007-10-19 14:40:07 +08:00
|
|
|
bad_fork_free_pid:
|
|
|
|
if (pid != &init_struct_pid)
|
|
|
|
free_pid(pid);
|
2008-01-24 15:52:45 +08:00
|
|
|
bad_fork_cleanup_io:
|
|
|
|
put_io_context(p->io_context);
|
2006-10-02 17:18:06 +08:00
|
|
|
bad_fork_cleanup_namespaces:
|
2007-01-31 05:35:18 +08:00
|
|
|
exit_task_namespaces(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_fork_cleanup_mm:
|
|
|
|
if (p->mm)
|
|
|
|
mmput(p->mm);
|
|
|
|
bad_fork_cleanup_signal:
|
2006-03-29 08:11:16 +08:00
|
|
|
cleanup_signal(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_fork_cleanup_sighand:
|
2006-03-29 08:11:27 +08:00
|
|
|
__cleanup_sighand(p->sighand);
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_fork_cleanup_fs:
|
|
|
|
exit_fs(p); /* blocking */
|
|
|
|
bad_fork_cleanup_files:
|
|
|
|
exit_files(p); /* blocking */
|
|
|
|
bad_fork_cleanup_semundo:
|
|
|
|
exit_sem(p);
|
|
|
|
bad_fork_cleanup_audit:
|
|
|
|
audit_free(p);
|
|
|
|
bad_fork_cleanup_policy:
|
|
|
|
#ifdef CONFIG_NUMA
|
2008-04-28 17:13:08 +08:00
|
|
|
mpol_put(p->mempolicy);
|
2007-10-19 14:39:33 +08:00
|
|
|
bad_fork_cleanup_cgroup:
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2007-10-19 14:39:33 +08:00
|
|
|
cgroup_exit(p, cgroup_callbacks_done);
|
2006-09-01 12:27:38 +08:00
|
|
|
delayacct_tsk_free(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (p->binfmt)
|
|
|
|
module_put(p->binfmt->module);
|
|
|
|
bad_fork_cleanup_put_domain:
|
2005-11-14 08:06:55 +08:00
|
|
|
module_put(task_thread_info(p)->exec_domain->module);
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_fork_cleanup_count:
|
CRED: Inaugurate COW credentials
Inaugurate copy-on-write credentials management. This uses RCU to manage the
credentials pointer in the task_struct with respect to accesses by other tasks.
A process may only modify its own credentials, and so does not need locking to
access or modify its own credentials.
A mutex (cred_replace_mutex) is added to the task_struct to control the effect
of PTRACE_ATTACHED on credential calculations, particularly with respect to
execve().
With this patch, the contents of an active credentials struct may not be
changed directly; rather a new set of credentials must be prepared, modified
and committed using something like the following sequence of events:
struct cred *new = prepare_creds();
int ret = blah(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
There are some exceptions to this rule: the keyrings pointed to by the active
credentials may be instantiated - keyrings violate the COW rule as managing
COW keyrings is tricky, given that it is possible for a task to directly alter
the keys in a keyring in use by another task.
To help enforce this, various pointers to sets of credentials, such as those in
the task_struct, are declared const. The purpose of this is compile-time
discouragement of altering credentials through those pointers. Once a set of
credentials has been made public through one of these pointers, it may not be
modified, except under special circumstances:
(1) Its reference count may incremented and decremented.
(2) The keyrings to which it points may be modified, but not replaced.
The only safe way to modify anything else is to create a replacement and commit
using the functions described in Documentation/credentials.txt (which will be
added by a later patch).
This patch and the preceding patches have been tested with the LTP SELinux
testsuite.
This patch makes several logical sets of alteration:
(1) execve().
This now prepares and commits credentials in various places in the
security code rather than altering the current creds directly.
(2) Temporary credential overrides.
do_coredump() and sys_faccessat() now prepare their own credentials and
temporarily override the ones currently on the acting thread, whilst
preventing interference from other threads by holding cred_replace_mutex
on the thread being dumped.
This will be replaced in a future patch by something that hands down the
credentials directly to the functions being called, rather than altering
the task's objective credentials.
(3) LSM interface.
A number of functions have been changed, added or removed:
(*) security_capset_check(), ->capset_check()
(*) security_capset_set(), ->capset_set()
Removed in favour of security_capset().
(*) security_capset(), ->capset()
New. This is passed a pointer to the new creds, a pointer to the old
creds and the proposed capability sets. It should fill in the new
creds or return an error. All pointers, barring the pointer to the
new creds, are now const.
(*) security_bprm_apply_creds(), ->bprm_apply_creds()
Changed; now returns a value, which will cause the process to be
killed if it's an error.
(*) security_task_alloc(), ->task_alloc_security()
Removed in favour of security_prepare_creds().
(*) security_cred_free(), ->cred_free()
New. Free security data attached to cred->security.
(*) security_prepare_creds(), ->cred_prepare()
New. Duplicate any security data attached to cred->security.
(*) security_commit_creds(), ->cred_commit()
New. Apply any security effects for the upcoming installation of new
security by commit_creds().
(*) security_task_post_setuid(), ->task_post_setuid()
Removed in favour of security_task_fix_setuid().
(*) security_task_fix_setuid(), ->task_fix_setuid()
Fix up the proposed new credentials for setuid(). This is used by
cap_set_fix_setuid() to implicitly adjust capabilities in line with
setuid() changes. Changes are made to the new credentials, rather
than the task itself as in security_task_post_setuid().
(*) security_task_reparent_to_init(), ->task_reparent_to_init()
Removed. Instead the task being reparented to init is referred
directly to init's credentials.
NOTE! This results in the loss of some state: SELinux's osid no
longer records the sid of the thread that forked it.
(*) security_key_alloc(), ->key_alloc()
(*) security_key_permission(), ->key_permission()
Changed. These now take cred pointers rather than task pointers to
refer to the security context.
(4) sys_capset().
This has been simplified and uses less locking. The LSM functions it
calls have been merged.
(5) reparent_to_kthreadd().
This gives the current thread the same credentials as init by simply using
commit_thread() to point that way.
(6) __sigqueue_alloc() and switch_uid()
__sigqueue_alloc() can't stop the target task from changing its creds
beneath it, so this function gets a reference to the currently applicable
user_struct which it then passes into the sigqueue struct it returns if
successful.
switch_uid() is now called from commit_creds(), and possibly should be
folded into that. commit_creds() should take care of protecting
__sigqueue_alloc().
(7) [sg]et[ug]id() and co and [sg]et_current_groups.
The set functions now all use prepare_creds(), commit_creds() and
abort_creds() to build and check a new set of credentials before applying
it.
security_task_set[ug]id() is called inside the prepared section. This
guarantees that nothing else will affect the creds until we've finished.
The calling of set_dumpable() has been moved into commit_creds().
Much of the functionality of set_user() has been moved into
commit_creds().
The get functions all simply access the data directly.
(8) security_task_prctl() and cap_task_prctl().
security_task_prctl() has been modified to return -ENOSYS if it doesn't
want to handle a function, or otherwise return the return value directly
rather than through an argument.
Additionally, cap_task_prctl() now prepares a new set of credentials, even
if it doesn't end up using it.
(9) Keyrings.
A number of changes have been made to the keyrings code:
(a) switch_uid_keyring(), copy_keys(), exit_keys() and suid_keys() have
all been dropped and built in to the credentials functions directly.
They may want separating out again later.
(b) key_alloc() and search_process_keyrings() now take a cred pointer
rather than a task pointer to specify the security context.
(c) copy_creds() gives a new thread within the same thread group a new
thread keyring if its parent had one, otherwise it discards the thread
keyring.
(d) The authorisation key now points directly to the credentials to extend
the search into rather pointing to the task that carries them.
(e) Installing thread, process or session keyrings causes a new set of
credentials to be created, even though it's not strictly necessary for
process or session keyrings (they're shared).
(10) Usermode helper.
The usermode helper code now carries a cred struct pointer in its
subprocess_info struct instead of a new session keyring pointer. This set
of credentials is derived from init_cred and installed on the new process
after it has been cloned.
call_usermodehelper_setup() allocates the new credentials and
call_usermodehelper_freeinfo() discards them if they haven't been used. A
special cred function (prepare_usermodeinfo_creds()) is provided
specifically for call_usermodehelper_setup() to call.
call_usermodehelper_setkeys() adjusts the credentials to sport the
supplied keyring as the new session keyring.
(11) SELinux.
SELinux has a number of changes, in addition to those to support the LSM
interface changes mentioned above:
(a) selinux_setprocattr() no longer does its check for whether the
current ptracer can access processes with the new SID inside the lock
that covers getting the ptracer's SID. Whilst this lock ensures that
the check is done with the ptracer pinned, the result is only valid
until the lock is released, so there's no point doing it inside the
lock.
(12) is_single_threaded().
This function has been extracted from selinux_setprocattr() and put into
a file of its own in the lib/ directory as join_session_keyring() now
wants to use it too.
The code in SELinux just checked to see whether a task shared mm_structs
with other tasks (CLONE_VM), but that isn't good enough. We really want
to know if they're part of the same thread group (CLONE_THREAD).
(13) nfsd.
The NFS server daemon now has to use the COW credentials to set the
credentials it is going to use. It really needs to pass the credentials
down to the functions it calls, but it can't do that until other patches
in this series have been applied.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: James Morris <jmorris@namei.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-11-14 07:39:23 +08:00
|
|
|
atomic_dec(&p->cred->user->processes);
|
2008-11-14 07:39:26 +08:00
|
|
|
put_cred(p->real_cred);
|
2008-11-14 07:39:17 +08:00
|
|
|
put_cred(p->cred);
|
2005-04-17 06:20:36 +08:00
|
|
|
bad_fork_free:
|
|
|
|
free_task(p);
|
2006-01-08 17:04:02 +08:00
|
|
|
fork_out:
|
|
|
|
return ERR_PTR(retval);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-02-06 17:37:55 +08:00
|
|
|
noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
memset(regs, 0, sizeof(struct pt_regs));
|
|
|
|
return regs;
|
|
|
|
}
|
|
|
|
|
2007-02-01 21:52:48 +08:00
|
|
|
struct task_struct * __cpuinit fork_idle(int cpu)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-07-03 15:25:41 +08:00
|
|
|
struct task_struct *task;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pt_regs regs;
|
|
|
|
|
2007-10-19 14:40:10 +08:00
|
|
|
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
|
2008-07-26 10:45:47 +08:00
|
|
|
&init_struct_pid, 0);
|
2006-11-26 03:09:34 +08:00
|
|
|
if (!IS_ERR(task))
|
|
|
|
init_idle(task, cpu);
|
2006-03-29 08:11:07 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, this is the main fork-routine.
|
|
|
|
*
|
|
|
|
* It copies the process, and if successful kick-starts
|
|
|
|
* it and waits for it to finish using the VM if required.
|
|
|
|
*/
|
|
|
|
long do_fork(unsigned long clone_flags,
|
|
|
|
unsigned long stack_start,
|
|
|
|
struct pt_regs *regs,
|
|
|
|
unsigned long stack_size,
|
|
|
|
int __user *parent_tidptr,
|
|
|
|
int __user *child_tidptr)
|
|
|
|
{
|
|
|
|
struct task_struct *p;
|
|
|
|
int trace = 0;
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 18:31:42 +08:00
|
|
|
long nr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-16 05:38:45 +08:00
|
|
|
/*
|
|
|
|
* Do some preliminary argument and permissions checking before we
|
|
|
|
* actually start allocating stuff
|
|
|
|
*/
|
|
|
|
if (clone_flags & CLONE_NEWUSER) {
|
|
|
|
if (clone_flags & CLONE_THREAD)
|
|
|
|
return -EINVAL;
|
|
|
|
/* hopefully this check will go away when userns support is
|
|
|
|
* complete
|
|
|
|
*/
|
2008-12-04 03:17:33 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
|
|
|
|
!capable(CAP_SETGID))
|
2008-10-16 05:38:45 +08:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2008-02-05 14:27:22 +08:00
|
|
|
/*
|
|
|
|
* We hope to recycle these flags after 2.6.26
|
|
|
|
*/
|
|
|
|
if (unlikely(clone_flags & CLONE_STOPPED)) {
|
|
|
|
static int __read_mostly count = 100;
|
|
|
|
|
|
|
|
if (count > 0 && printk_ratelimit()) {
|
|
|
|
char comm[TASK_COMM_LEN];
|
|
|
|
|
|
|
|
count--;
|
|
|
|
printk(KERN_INFO "fork(): process `%s' used deprecated "
|
|
|
|
"clone flags 0x%lx\n",
|
|
|
|
get_task_comm(comm, current),
|
|
|
|
clone_flags & CLONE_STOPPED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-26 10:45:47 +08:00
|
|
|
/*
|
|
|
|
* When called from kernel_thread, don't do user tracing stuff.
|
|
|
|
*/
|
|
|
|
if (likely(user_mode(regs)))
|
|
|
|
trace = tracehook_prepare_clone(clone_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-19 14:39:53 +08:00
|
|
|
p = copy_process(clone_flags, stack_start, regs, stack_size,
|
2008-07-26 10:45:47 +08:00
|
|
|
child_tidptr, NULL, trace);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Do this prior waking up the new thread - the thread pointer
|
|
|
|
* might get invalid after that point, if the thread exits quickly.
|
|
|
|
*/
|
|
|
|
if (!IS_ERR(p)) {
|
|
|
|
struct completion vfork;
|
|
|
|
|
tracing, sched: LTTng instrumentation - scheduler
Instrument the scheduler activity (sched_switch, migration, wakeups,
wait for a task, signal delivery) and process/thread
creation/destruction (fork, exit, kthread stop). Actually, kthread
creation is not instrumented in this patch because it is architecture
dependent. It allows to connect tracers such as ftrace which detects
scheduling latencies, good/bad scheduler decisions. Tools like LTTng can
export this scheduler information along with instrumentation of the rest
of the kernel activity to perform post-mortem analysis on the scheduler
activity.
About the performance impact of tracepoints (which is comparable to
markers), even without immediate values optimizations, tests done by
Hideo Aoki on ia64 show no regression. His test case was using hackbench
on a kernel where scheduler instrumentation (about 5 events in code
scheduler code) was added. See the "Tracepoints" patch header for
performance result detail.
Changelog :
- Change instrumentation location and parameter to match ftrace
instrumentation, previously done with kernel markers.
[ mingo@elte.hu: conflict resolutions ]
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: 'Peter Zijlstra' <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-19 00:16:17 +08:00
|
|
|
trace_sched_process_fork(current, p);
|
|
|
|
|
2008-02-08 20:19:20 +08:00
|
|
|
nr = task_pid_vnr(p);
|
2007-10-19 14:40:10 +08:00
|
|
|
|
|
|
|
if (clone_flags & CLONE_PARENT_SETTID)
|
|
|
|
put_user(nr, parent_tidptr);
|
2007-10-19 14:39:53 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (clone_flags & CLONE_VFORK) {
|
|
|
|
p->vfork_done = &vfork;
|
|
|
|
init_completion(&vfork);
|
|
|
|
}
|
|
|
|
|
2008-11-13 07:37:41 +08:00
|
|
|
audit_finish_fork(p);
|
2008-07-26 10:45:47 +08:00
|
|
|
tracehook_report_clone(trace, regs, clone_flags, nr, p);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We set PF_STARTING at creation in case tracing wants to
|
|
|
|
* use this to distinguish a fully live task from one that
|
|
|
|
* hasn't gotten to tracehook_report_clone() yet. Now we
|
|
|
|
* clear it and set the child going.
|
|
|
|
*/
|
|
|
|
p->flags &= ~PF_STARTING;
|
|
|
|
|
|
|
|
if (unlikely(clone_flags & CLONE_STOPPED)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We'll start up with an immediate SIGSTOP.
|
|
|
|
*/
|
|
|
|
sigaddset(&p->pending.signal, SIGSTOP);
|
|
|
|
set_tsk_thread_flag(p, TIF_SIGPENDING);
|
2008-02-06 17:36:13 +08:00
|
|
|
__set_task_state(p, TASK_STOPPED);
|
2008-07-26 10:45:47 +08:00
|
|
|
} else {
|
|
|
|
wake_up_new_task(p, clone_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-07-26 10:45:47 +08:00
|
|
|
tracehook_report_clone_complete(trace, regs,
|
|
|
|
clone_flags, nr, p);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (clone_flags & CLONE_VFORK) {
|
2007-05-24 04:57:25 +08:00
|
|
|
freezer_do_not_count();
|
2005-04-17 06:20:36 +08:00
|
|
|
wait_for_completion(&vfork);
|
2007-05-24 04:57:25 +08:00
|
|
|
freezer_count();
|
2008-07-26 10:45:47 +08:00
|
|
|
tracehook_report_vfork_done(p, nr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
} else {
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 18:31:42 +08:00
|
|
|
nr = PTR_ERR(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 18:31:42 +08:00
|
|
|
return nr;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-12 05:46:15 +08:00
|
|
|
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
|
|
|
|
#define ARCH_MIN_MMSTRUCT_ALIGN 0
|
|
|
|
#endif
|
|
|
|
|
2008-07-26 10:45:34 +08:00
|
|
|
static void sighand_ctor(void *data)
|
2006-03-29 08:11:12 +08:00
|
|
|
{
|
|
|
|
struct sighand_struct *sighand = data;
|
|
|
|
|
2007-05-17 13:10:57 +08:00
|
|
|
spin_lock_init(&sighand->siglock);
|
2007-09-21 03:40:16 +08:00
|
|
|
init_waitqueue_head(&sighand->signalfd_wqh);
|
2006-03-29 08:11:12 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void __init proc_caches_init(void)
|
|
|
|
{
|
|
|
|
sighand_cachep = kmem_cache_create("sighand_cache",
|
|
|
|
sizeof(struct sighand_struct), 0,
|
2006-03-29 08:11:12 +08:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
|
2007-07-20 09:11:58 +08:00
|
|
|
sighand_ctor);
|
2005-04-17 06:20:36 +08:00
|
|
|
signal_cachep = kmem_cache_create("signal_cache",
|
|
|
|
sizeof(struct signal_struct), 0,
|
2007-07-20 09:11:58 +08:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
|
|
|
files_cachep = kmem_cache_create("files_cache",
|
2005-04-17 06:20:36 +08:00
|
|
|
sizeof(struct files_struct), 0,
|
2007-07-20 09:11:58 +08:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
|
|
|
fs_cachep = kmem_cache_create("fs_cache",
|
2005-04-17 06:20:36 +08:00
|
|
|
sizeof(struct fs_struct), 0,
|
2007-07-20 09:11:58 +08:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
mm_cachep = kmem_cache_create("mm_struct",
|
2006-01-12 05:46:15 +08:00
|
|
|
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
|
2007-07-20 09:11:58 +08:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
2009-01-08 20:04:47 +08:00
|
|
|
mmap_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check constraints on flags passed to the unshare system call and
|
|
|
|
* force unsharing of additional process context as appropriate.
|
|
|
|
*/
|
2007-10-19 14:41:10 +08:00
|
|
|
static void check_unshare_flags(unsigned long *flags_ptr)
|
2006-02-08 04:58:58 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If unsharing a thread from a thread group, must also
|
|
|
|
* unshare vm.
|
|
|
|
*/
|
|
|
|
if (*flags_ptr & CLONE_THREAD)
|
|
|
|
*flags_ptr |= CLONE_VM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If unsharing vm, must also unshare signal handlers.
|
|
|
|
*/
|
|
|
|
if (*flags_ptr & CLONE_VM)
|
|
|
|
*flags_ptr |= CLONE_SIGHAND;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If unsharing signal handlers and the task was created
|
|
|
|
* using CLONE_THREAD, then must unshare the thread
|
|
|
|
*/
|
|
|
|
if ((*flags_ptr & CLONE_SIGHAND) &&
|
|
|
|
(atomic_read(¤t->signal->count) > 1))
|
|
|
|
*flags_ptr |= CLONE_THREAD;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If unsharing namespace, must also unshare filesystem information.
|
|
|
|
*/
|
|
|
|
if (*flags_ptr & CLONE_NEWNS)
|
|
|
|
*flags_ptr |= CLONE_FS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unsharing of tasks created with CLONE_THREAD is not supported yet
|
|
|
|
*/
|
|
|
|
static int unshare_thread(unsigned long unshare_flags)
|
|
|
|
{
|
|
|
|
if (unshare_flags & CLONE_THREAD)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-02-08 04:58:59 +08:00
|
|
|
* Unshare the filesystem structure if it is being shared
|
2006-02-08 04:58:58 +08:00
|
|
|
*/
|
|
|
|
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
|
|
|
|
{
|
|
|
|
struct fs_struct *fs = current->fs;
|
|
|
|
|
|
|
|
if ((unshare_flags & CLONE_FS) &&
|
2006-02-08 04:58:59 +08:00
|
|
|
(fs && atomic_read(&fs->count) > 1)) {
|
|
|
|
*new_fsp = __copy_fs_struct(current->fs);
|
|
|
|
if (!*new_fsp)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-12-08 18:36:09 +08:00
|
|
|
* Unsharing of sighand is not supported yet
|
2006-02-08 04:58:58 +08:00
|
|
|
*/
|
|
|
|
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
|
|
|
|
{
|
|
|
|
struct sighand_struct *sigh = current->sighand;
|
|
|
|
|
2006-12-08 18:36:09 +08:00
|
|
|
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
|
2006-02-08 04:58:58 +08:00
|
|
|
return -EINVAL;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-02-08 04:59:01 +08:00
|
|
|
* Unshare vm if it is being shared
|
2006-02-08 04:58:58 +08:00
|
|
|
*/
|
|
|
|
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
|
|
|
|
if ((unshare_flags & CLONE_VM) &&
|
2006-02-08 04:59:01 +08:00
|
|
|
(mm && atomic_read(&mm->mm_users) > 1)) {
|
2006-03-19 01:41:10 +08:00
|
|
|
return -EINVAL;
|
2006-02-08 04:59:01 +08:00
|
|
|
}
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-02-08 04:59:02 +08:00
|
|
|
* Unshare file descriptor table if it is being shared
|
2006-02-08 04:58:58 +08:00
|
|
|
*/
|
|
|
|
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
|
|
|
|
{
|
|
|
|
struct files_struct *fd = current->files;
|
2006-02-08 04:59:02 +08:00
|
|
|
int error = 0;
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
if ((unshare_flags & CLONE_FILES) &&
|
2006-02-08 04:59:02 +08:00
|
|
|
(fd && atomic_read(&fd->count) > 1)) {
|
|
|
|
*new_fdp = dup_fd(fd, &error);
|
|
|
|
if (!*new_fdp)
|
|
|
|
return error;
|
|
|
|
}
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unshare allows a process to 'unshare' part of the process
|
|
|
|
* context which was originally shared using clone. copy_*
|
|
|
|
* functions used by do_fork() cannot be used here directly
|
|
|
|
* because they modify an inactive task_struct that is being
|
|
|
|
* constructed. Here we are modifying the current, active,
|
|
|
|
* task_struct.
|
|
|
|
*/
|
2009-01-14 21:14:32 +08:00
|
|
|
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
|
2006-02-08 04:58:58 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct fs_struct *fs, *new_fs = NULL;
|
2006-12-08 18:36:09 +08:00
|
|
|
struct sighand_struct *new_sigh = NULL;
|
2006-02-08 04:58:58 +08:00
|
|
|
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
|
|
|
|
struct files_struct *fd, *new_fd = NULL;
|
2007-10-19 14:39:54 +08:00
|
|
|
struct nsproxy *new_nsproxy = NULL;
|
2008-04-29 16:00:57 +08:00
|
|
|
int do_sysvsem = 0;
|
2006-02-08 04:58:58 +08:00
|
|
|
|
|
|
|
check_unshare_flags(&unshare_flags);
|
|
|
|
|
2006-03-22 16:07:40 +08:00
|
|
|
/* Return -EINVAL for all unsupported flags */
|
|
|
|
err = -EINVAL;
|
|
|
|
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
|
2006-10-02 17:18:19 +08:00
|
|
|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
|
2008-10-16 05:38:45 +08:00
|
|
|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
|
2006-03-22 16:07:40 +08:00
|
|
|
goto bad_unshare_out;
|
|
|
|
|
2008-04-29 16:00:59 +08:00
|
|
|
/*
|
|
|
|
* CLONE_NEWIPC must also detach from the undolist: after switching
|
|
|
|
* to a new ipc namespace, the semaphore arrays from the old
|
|
|
|
* namespace are unreachable.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
|
2008-04-29 16:00:57 +08:00
|
|
|
do_sysvsem = 1;
|
2006-02-08 04:58:58 +08:00
|
|
|
if ((err = unshare_thread(unshare_flags)))
|
|
|
|
goto bad_unshare_out;
|
|
|
|
if ((err = unshare_fs(unshare_flags, &new_fs)))
|
|
|
|
goto bad_unshare_cleanup_thread;
|
|
|
|
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
|
2007-05-08 15:25:21 +08:00
|
|
|
goto bad_unshare_cleanup_fs;
|
2006-02-08 04:58:58 +08:00
|
|
|
if ((err = unshare_vm(unshare_flags, &new_mm)))
|
|
|
|
goto bad_unshare_cleanup_sigh;
|
|
|
|
if ((err = unshare_fd(unshare_flags, &new_fd)))
|
|
|
|
goto bad_unshare_cleanup_vm;
|
2007-05-08 15:25:21 +08:00
|
|
|
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
|
|
|
|
new_fs)))
|
2008-04-29 16:00:57 +08:00
|
|
|
goto bad_unshare_cleanup_fd;
|
2006-10-02 17:18:18 +08:00
|
|
|
|
2008-04-29 16:00:57 +08:00
|
|
|
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
|
|
|
|
if (do_sysvsem) {
|
|
|
|
/*
|
|
|
|
* CLONE_SYSVSEM is equivalent to sys_exit().
|
|
|
|
*/
|
|
|
|
exit_sem(current);
|
|
|
|
}
|
2006-10-02 17:18:06 +08:00
|
|
|
|
2006-10-02 17:18:18 +08:00
|
|
|
if (new_nsproxy) {
|
2007-10-19 14:39:54 +08:00
|
|
|
switch_task_namespaces(current, new_nsproxy);
|
|
|
|
new_nsproxy = NULL;
|
2006-10-02 17:18:18 +08:00
|
|
|
}
|
2006-02-08 04:58:58 +08:00
|
|
|
|
2007-10-19 14:39:54 +08:00
|
|
|
task_lock(current);
|
|
|
|
|
2006-02-08 04:58:58 +08:00
|
|
|
if (new_fs) {
|
|
|
|
fs = current->fs;
|
|
|
|
current->fs = new_fs;
|
|
|
|
new_fs = fs;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_mm) {
|
|
|
|
mm = current->mm;
|
|
|
|
active_mm = current->active_mm;
|
|
|
|
current->mm = new_mm;
|
|
|
|
current->active_mm = new_mm;
|
|
|
|
activate_mm(active_mm, new_mm);
|
|
|
|
new_mm = mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_fd) {
|
|
|
|
fd = current->files;
|
|
|
|
current->files = new_fd;
|
|
|
|
new_fd = fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
task_unlock(current);
|
|
|
|
}
|
|
|
|
|
2006-10-02 17:18:18 +08:00
|
|
|
if (new_nsproxy)
|
2007-01-31 05:35:18 +08:00
|
|
|
put_nsproxy(new_nsproxy);
|
2006-10-02 17:18:18 +08:00
|
|
|
|
2006-02-08 04:58:58 +08:00
|
|
|
bad_unshare_cleanup_fd:
|
|
|
|
if (new_fd)
|
|
|
|
put_files_struct(new_fd);
|
|
|
|
|
|
|
|
bad_unshare_cleanup_vm:
|
|
|
|
if (new_mm)
|
|
|
|
mmput(new_mm);
|
|
|
|
|
|
|
|
bad_unshare_cleanup_sigh:
|
|
|
|
if (new_sigh)
|
|
|
|
if (atomic_dec_and_test(&new_sigh->count))
|
|
|
|
kmem_cache_free(sighand_cachep, new_sigh);
|
|
|
|
|
|
|
|
bad_unshare_cleanup_fs:
|
|
|
|
if (new_fs)
|
|
|
|
put_fs_struct(new_fs);
|
|
|
|
|
|
|
|
bad_unshare_cleanup_thread:
|
|
|
|
bad_unshare_out:
|
|
|
|
return err;
|
|
|
|
}
|
2008-04-22 17:31:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper to unshare the files of the current task.
|
|
|
|
* We don't want to expose copy_files internals to
|
|
|
|
* the exec layer of the kernel.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int unshare_files(struct files_struct **displaced)
|
|
|
|
{
|
|
|
|
struct task_struct *task = current;
|
2008-04-26 12:25:00 +08:00
|
|
|
struct files_struct *copy = NULL;
|
2008-04-22 17:31:30 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = unshare_fd(CLONE_FILES, ©);
|
|
|
|
if (error || !copy) {
|
|
|
|
*displaced = NULL;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
*displaced = task->files;
|
|
|
|
task_lock(task);
|
|
|
|
task->files = copy;
|
|
|
|
task_unlock(task);
|
|
|
|
return 0;
|
|
|
|
}
|