Merge branch 'for-rmk/cacheflush-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
This commit is contained in:
commit
5cc91e0460
|
@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||||
* Harvard caches are synchronised for the user space address range.
|
* Harvard caches are synchronised for the user space address range.
|
||||||
* This is used for the ARM private sys_cacheflush system call.
|
* This is used for the ARM private sys_cacheflush system call.
|
||||||
*/
|
*/
|
||||||
#define flush_cache_user_range(start,end) \
|
#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
|
||||||
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform necessary cache operations to ensure that data previously
|
* Perform necessary cache operations to ensure that data previously
|
||||||
|
|
|
@ -43,6 +43,16 @@ struct cpu_context_save {
|
||||||
__u32 extra[2]; /* Xscale 'acc' register, etc */
|
__u32 extra[2]; /* Xscale 'acc' register, etc */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct arm_restart_block {
|
||||||
|
union {
|
||||||
|
/* For user cache flushing */
|
||||||
|
struct {
|
||||||
|
unsigned long start;
|
||||||
|
unsigned long end;
|
||||||
|
} cache;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* low level task data that entry.S needs immediate access to.
|
* low level task data that entry.S needs immediate access to.
|
||||||
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
|
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
|
||||||
|
@ -68,6 +78,7 @@ struct thread_info {
|
||||||
unsigned long thumbee_state; /* ThumbEE Handler Base register */
|
unsigned long thumbee_state; /* ThumbEE Handler Base register */
|
||||||
#endif
|
#endif
|
||||||
struct restart_block restart_block;
|
struct restart_block restart_block;
|
||||||
|
struct arm_restart_block arm_restart_block;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define INIT_THREAD_INFO(tsk) \
|
#define INIT_THREAD_INFO(tsk) \
|
||||||
|
|
|
@ -442,10 +442,10 @@ local_restart:
|
||||||
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
||||||
|
|
||||||
add r1, sp, #S_OFF
|
add r1, sp, #S_OFF
|
||||||
2: mov why, #0 @ no longer a real syscall
|
|
||||||
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||||
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
||||||
bcs arm_syscall
|
bcs arm_syscall
|
||||||
|
2: mov why, #0 @ no longer a real syscall
|
||||||
b sys_ni_syscall @ not private func
|
b sys_ni_syscall @ not private func
|
||||||
|
|
||||||
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
|
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
|
||||||
|
|
|
@ -497,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs)
|
||||||
return regs->ARM_r0;
|
return regs->ARM_r0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long do_cache_op_restart(struct restart_block *);
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
__do_cache_op(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned long chunk = PAGE_SIZE;
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (signal_pending(current)) {
|
||||||
|
struct thread_info *ti = current_thread_info();
|
||||||
|
|
||||||
|
ti->restart_block = (struct restart_block) {
|
||||||
|
.fn = do_cache_op_restart,
|
||||||
|
};
|
||||||
|
|
||||||
|
ti->arm_restart_block = (struct arm_restart_block) {
|
||||||
|
{
|
||||||
|
.cache = {
|
||||||
|
.start = start,
|
||||||
|
.end = end,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
return -ERESTART_RESTARTBLOCK;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = flush_cache_user_range(start, start + chunk);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
|
start += chunk;
|
||||||
|
} while (start < end);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long do_cache_op_restart(struct restart_block *unused)
|
||||||
|
{
|
||||||
|
struct arm_restart_block *restart_block;
|
||||||
|
|
||||||
|
restart_block = ¤t_thread_info()->arm_restart_block;
|
||||||
|
return __do_cache_op(restart_block->cache.start,
|
||||||
|
restart_block->cache.end);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
do_cache_op(unsigned long start, unsigned long end, int flags)
|
do_cache_op(unsigned long start, unsigned long end, int flags)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->active_mm;
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
|
|
||||||
if (end < start || flags)
|
if (end < start || flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
if (!access_ok(VERIFY_READ, start, end - start))
|
||||||
vma = find_vma(mm, start);
|
return -EFAULT;
|
||||||
if (vma && vma->vm_start < end) {
|
|
||||||
if (start < vma->vm_start)
|
|
||||||
start = vma->vm_start;
|
|
||||||
if (end > vma->vm_end)
|
|
||||||
end = vma->vm_end;
|
|
||||||
|
|
||||||
up_read(&mm->mmap_sem);
|
return __do_cache_op(start, end);
|
||||||
return flush_cache_user_range(start, end);
|
|
||||||
}
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue