arm64: compat: align cacheflush syscall with arch/arm

Update handling of cacheflush syscall with changes made in arch/arm
counterpart:
 - return error to userspace when flushing syscall fails
 - split user cache-flushing into interruptible chunks
 - don't bother rounding to nearest vma

Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
[will: changed internal return value from -EINTR to 0 to match arch/arm/]
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Vladimir Murzin 2014-12-01 10:53:08 +00:00 committed by Will Deacon
parent a1ae65b219
commit a2d25a5391
3 changed files with 35 additions and 22 deletions

View File

@ -73,7 +73,7 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_dcache_area(void *addr, size_t len);
extern void __flush_cache_user_range(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {

View File

@ -28,29 +28,39 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/unistd.h> #include <asm/unistd.h>
static inline void static long
do_compat_cache_op(unsigned long start, unsigned long end, int flags) __do_compat_cache_op(unsigned long start, unsigned long end)
{ {
struct mm_struct *mm = current->active_mm; long ret;
struct vm_area_struct *vma;
if (end < start || flags) do {
return; unsigned long chunk = min(PAGE_SIZE, end - start);
down_read(&mm->mmap_sem); if (fatal_signal_pending(current))
vma = find_vma(mm, start); return 0;
if (vma && vma->vm_start < end) {
if (start < vma->vm_start) ret = __flush_cache_user_range(start, start + chunk);
start = vma->vm_start; if (ret)
if (end > vma->vm_end) return ret;
end = vma->vm_end;
up_read(&mm->mmap_sem); cond_resched();
__flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end)); start += chunk;
return; } while (start < end);
}
up_read(&mm->mmap_sem); return 0;
} }
static inline long
do_compat_cache_op(unsigned long start, unsigned long end, int flags)
{
if (end < start || flags)
return -EINVAL;
if (!access_ok(VERIFY_READ, start, end - start))
return -EFAULT;
return __do_compat_cache_op(start, end);
}
/* /*
* Handle all unrecognised system calls. * Handle all unrecognised system calls.
*/ */
@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs)
* the specified region). * the specified region).
*/ */
case __ARM_NR_compat_cacheflush: case __ARM_NR_compat_cacheflush:
do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
return 0;
case __ARM_NR_compat_set_tls: case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0]; current->thread.tp_value = regs->regs[0];

View File

@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/errno.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
@ -140,9 +141,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
add x4, x4, x2 add x4, x4, x2
cmp x4, x1 cmp x4, x1
b.lo 1b b.lo 1b
9: // ignore any faulting cache operation
dsb ish dsb ish
isb isb
mov x0, #0
ret
9:
mov x0, #-EFAULT
ret ret
ENDPROC(flush_icache_range) ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range) ENDPROC(__flush_cache_user_range)