From 377747c40657eb35ad98a56439606d96a928425a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 13 May 2013 19:16:34 +0100 Subject: [PATCH 1/4] ARM: entry: allow ARM-private syscalls to be restarted System calls will only be restarted after signal handling if they (a) return an error code indicating that a restart is required and (b) have `why' set to a non-zero value, to indicate that the signal interrupted them. This patch leaves `why' set to a non-zero value for ARM-private syscalls , and only zeroes it for syscalls that are not implemented. Signed-off-by: Will Deacon --- arch/arm/kernel/entry-common.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 94104bf69719..74ad15d1a065 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -442,10 +442,10 @@ local_restart: ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF -2: mov why, #0 @ no longer a real syscall cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back - bcs arm_syscall + bcs arm_syscall +2: mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) From 28256d612726a28a8b9d3c49f2b74198c4423d6a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 13 May 2013 15:21:49 +0100 Subject: [PATCH 2/4] ARM: cacheflush: split user cache-flushing into interruptible chunks Flushing a large, non-faulting VMA from userspace can potentially result in a long time spent flushing the cache line-by-line without preemption occurring (in the case of CONFIG_PREEMPT=n). Whilst this doesn't affect the stability of the system, it can certainly affect the responsiveness and CPU availability for other tasks. This patch splits up the user cacheflush code so that it flushes in chunks of a page. After each chunk has been flushed, we may reschedule if appropriate and, before processing the next chunk, we allow any pending signals to be handled before resuming from where we left off. Signed-off-by: Will Deacon --- arch/arm/include/asm/thread_info.h | 11 +++++ arch/arm/kernel/traps.c | 65 ++++++++++++++++++++++++++---- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 214d4158089a..7d77645128a8 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -43,6 +43,16 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; +struct arm_restart_block { + union { + /* For user cache flushing */ + struct { + unsigned long start; + unsigned long end; + } cache; + }; +}; + /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -68,6 +78,7 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; + struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..4d268d912b0e 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -499,6 +499,54 @@ static int bad_syscall(int n, struct pt_regs *regs) return regs->ARM_r0; } +static long do_cache_op_restart(struct restart_block *); + +static inline int +__do_cache_op(unsigned long start, unsigned long end) +{ + int ret; + unsigned long chunk = PAGE_SIZE; + + do { + if (signal_pending(current)) { + struct thread_info *ti = current_thread_info(); + + ti->restart_block = (struct restart_block) { + .fn = do_cache_op_restart, + }; + + ti->arm_restart_block = (struct arm_restart_block) { + { + .cache = { + .start = start, + .end = end, + }, + }, + }; + + return -ERESTART_RESTARTBLOCK; + } + + ret = flush_cache_user_range(start, start + chunk); + if (ret) + return ret; + + cond_resched(); + start += chunk; + } while (start < end); + + return 0; +} + +static long do_cache_op_restart(struct restart_block *unused) +{ + struct arm_restart_block *restart_block; + + restart_block = ¤t_thread_info()->arm_restart_block; + return __do_cache_op(restart_block->cache.start, + restart_block->cache.end); +} + static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { @@ -510,17 +558,18 @@ do_cache_op(unsigned long start, unsigned long end, int flags) down_read(&mm->mmap_sem); vma = find_vma(mm, start); - if (vma && vma->vm_start < end) { - if (start < vma->vm_start) - start = vma->vm_start; - if (end > vma->vm_end) - end = vma->vm_end; - + if (!vma || vma->vm_start >= end) { up_read(&mm->mmap_sem); - return flush_cache_user_range(start, end); + return -EINVAL; } + + if (start < vma->vm_start) + start = vma->vm_start; + if (end > vma->vm_end) + end = vma->vm_end; up_read(&mm->mmap_sem); - return -EINVAL; + + return __do_cache_op(start, end); } /* From d9524dc32cab52714dee0c8e59c7437ee33a239a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 21 Aug 2012 15:33:19 +0100 Subject: [PATCH 3/4] ARM: cacheflush: don't round address range up to nearest page The flush_cache_user_range macro takes a pair of addresses describing the start and end of the virtual address range to flush. Due to an accidental oversight when flush_cache_range_user was introduced, the address range was rounded up so that the start and end addresses were page-aligned. For historical reference, the interesting commits in history.git are: 10eacf1775e1 ("[ARM] Clean up ARM cache handling interfaces (part 1)") 71432e79b76b ("[ARM] Add flush_cache_user_page() for sys_cacheflush()") This patch removes the alignment code, reducing the amount of flushing required for ranges that are not an exact multiple of PAGE_SIZE. Reviewed-by: Catalin Marinas Reported-by: Jonathan Austin Signed-off-by: Will Deacon --- arch/arm/include/asm/cacheflush.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 17d0ae8672fa..bfd37e58f855 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ -#define flush_cache_user_range(start,end) \ - __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) +#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e) /* * Perform necessary cache operations to ensure that data previously From 97c72d89ce0ec8c73f19d5e35ec1f90f7a14bed7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Aug 2012 11:06:54 +0100 Subject: [PATCH 4/4] ARM: cacheflush: don't bother rounding to nearest vma do_cache_op finds the lowest VMA contained in the specified address range and rounds the range to cover only the mapped addresses. Since commit 4542b6a0fa6b ("ARM: 7365/1: drop unused parameter from flush_cache_user_range") the VMA is not used for anything else in this code and seeing as the low-level cache flushing routines return -EFAULT if the address is not valid, there is no need for this range truncation. This patch removes the VMA handling code from the cacheflushing syscall. Signed-off-by: Will Deacon --- arch/arm/kernel/traps.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 4d268d912b0e..9b2c5d42c143 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -550,24 +550,11 @@ static long do_cache_op_restart(struct restart_block *unused) static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { - struct mm_struct *mm = current->active_mm; - struct vm_area_struct *vma; - if (end < start || flags) return -EINVAL; - down_read(&mm->mmap_sem); - vma = find_vma(mm, start); - if (!vma || vma->vm_start >= end) { - up_read(&mm->mmap_sem); - return -EINVAL; - } - - if (start < vma->vm_start) - start = vma->vm_start; - if (end > vma->vm_end) - end = vma->vm_end; - up_read(&mm->mmap_sem); + if (!access_ok(VERIFY_READ, start, end - start)) + return -EFAULT; return __do_cache_op(start, end); }