arch/tile: adopt prepare_exit_to_usermode() model from x86
This change is a prerequisite change for TASK_ISOLATION but also
stands on its own for readability and maintainability. The existing
tile do_work_pending() was called in a loop from assembly on
the slow path; this change moves the loop into C code as well.
For the x86 version see commit c5c46f59e4
("x86/entry: Add new,
comprehensible entry and exit handlers written in C").
This change exposes a pre-existing bug on the older tilepro platform;
the singlestep processing is done last, but on tilepro (unlike tilegx)
we enable interrupts while doing that processing, so we could in
theory miss a signal or other asynchronous event. A future change
could fix this by breaking the singlestep work into a "prepare"
step done in the main loop, and a "trigger" step done after exiting
the loop. Since this change is intended as purely a restructuring
change, we call out the bug explicitly now, but don't yet fix it.
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
This commit is contained in:
parent
65a792e84f
commit
583b24a210
|
@ -212,7 +212,7 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
/* Nothing for now */
|
||||
}
|
||||
|
||||
extern int do_work_pending(struct pt_regs *regs, u32 flags);
|
||||
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -140,10 +140,14 @@ extern void _cpu_idle(void);
|
|||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||||
|
||||
/* Work to do as we loop to exit to user space. */
|
||||
#define _TIF_WORK_MASK \
|
||||
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
|
||||
|
||||
/* Work to do on any return to user space. */
|
||||
#define _TIF_ALLWORK_MASK \
|
||||
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
|
||||
_TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
|
||||
(_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
|
||||
|
||||
/* Work to do at syscall entry. */
|
||||
#define _TIF_SYSCALL_ENTRY_WORK \
|
||||
|
|
|
@ -845,18 +845,6 @@ STD_ENTRY(interrupt_return)
|
|||
.Lresume_userspace:
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
|
||||
/*
|
||||
* Use r33 to hold whether we have already loaded the callee-saves
|
||||
* into ptregs. We don't want to do it twice in this loop, since
|
||||
* then we'd clobber whatever changes are made by ptrace, etc.
|
||||
* Get base of stack in r32.
|
||||
*/
|
||||
{
|
||||
GET_THREAD_INFO(r32)
|
||||
movei r33, 0
|
||||
}
|
||||
|
||||
.Lretry_work_pending:
|
||||
/*
|
||||
* Disable interrupts so as to make sure we don't
|
||||
* miss an interrupt that sets any of the thread flags (like
|
||||
|
@ -867,33 +855,27 @@ STD_ENTRY(interrupt_return)
|
|||
IRQ_DISABLE(r20, r21)
|
||||
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
||||
|
||||
|
||||
/* Check to see if there is any work to do before returning to user. */
|
||||
{
|
||||
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
|
||||
moveli r1, lo16(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
{
|
||||
lw r29, r29
|
||||
auli r1, r1, ha16(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
and r1, r29, r1
|
||||
bzt r1, .Lrestore_all
|
||||
|
||||
/*
|
||||
* Make sure we have all the registers saved for signal
|
||||
* handling, notify-resume, or single-step. Call out to C
|
||||
* code to figure out exactly what we need to do for each flag bit,
|
||||
* then if necessary, reload the flags and recheck.
|
||||
* See if there are any work items (including single-shot items)
|
||||
* to do. If so, save the callee-save registers to pt_regs
|
||||
* and then dispatch to C code.
|
||||
*/
|
||||
GET_THREAD_INFO(r21)
|
||||
{
|
||||
addi r22, r21, THREAD_INFO_FLAGS_OFFSET
|
||||
moveli r20, lo16(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
{
|
||||
lw r22, r22
|
||||
auli r20, r20, ha16(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
and r1, r22, r20
|
||||
{
|
||||
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
||||
bnz r33, 1f
|
||||
bzt r1, .Lrestore_all
|
||||
}
|
||||
push_extra_callee_saves r0
|
||||
movei r33, 1
|
||||
1: jal do_work_pending
|
||||
bnz r0, .Lretry_work_pending
|
||||
jal prepare_exit_to_usermode
|
||||
|
||||
/*
|
||||
* In the NMI case we
|
||||
|
@ -1327,7 +1309,7 @@ STD_ENTRY(ret_from_kernel_thread)
|
|||
FEEDBACK_REENTER(ret_from_kernel_thread)
|
||||
{
|
||||
movei r30, 0 /* not an NMI */
|
||||
j .Lresume_userspace /* jump into middle of interrupt_return */
|
||||
j interrupt_return
|
||||
}
|
||||
STD_ENDPROC(ret_from_kernel_thread)
|
||||
|
||||
|
|
|
@ -878,20 +878,6 @@ STD_ENTRY(interrupt_return)
|
|||
.Lresume_userspace:
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
|
||||
/*
|
||||
* Use r33 to hold whether we have already loaded the callee-saves
|
||||
* into ptregs. We don't want to do it twice in this loop, since
|
||||
* then we'd clobber whatever changes are made by ptrace, etc.
|
||||
*/
|
||||
{
|
||||
movei r33, 0
|
||||
move r32, sp
|
||||
}
|
||||
|
||||
/* Get base of stack in r32. */
|
||||
EXTRACT_THREAD_INFO(r32)
|
||||
|
||||
.Lretry_work_pending:
|
||||
/*
|
||||
* Disable interrupts so as to make sure we don't
|
||||
* miss an interrupt that sets any of the thread flags (like
|
||||
|
@ -902,33 +888,28 @@ STD_ENTRY(interrupt_return)
|
|||
IRQ_DISABLE(r20, r21)
|
||||
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
||||
|
||||
|
||||
/* Check to see if there is any work to do before returning to user. */
|
||||
{
|
||||
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
|
||||
moveli r1, hw1_last(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
{
|
||||
ld r29, r29
|
||||
shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
and r1, r29, r1
|
||||
beqzt r1, .Lrestore_all
|
||||
|
||||
/*
|
||||
* Make sure we have all the registers saved for signal
|
||||
* handling or notify-resume. Call out to C code to figure out
|
||||
* exactly what we need to do for each flag bit, then if
|
||||
* necessary, reload the flags and recheck.
|
||||
* See if there are any work items (including single-shot items)
|
||||
* to do. If so, save the callee-save registers to pt_regs
|
||||
* and then dispatch to C code.
|
||||
*/
|
||||
move r21, sp
|
||||
EXTRACT_THREAD_INFO(r21)
|
||||
{
|
||||
addi r22, r21, THREAD_INFO_FLAGS_OFFSET
|
||||
moveli r20, hw1_last(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
{
|
||||
ld r22, r22
|
||||
shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
|
||||
}
|
||||
and r1, r22, r20
|
||||
{
|
||||
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
||||
bnez r33, 1f
|
||||
beqzt r1, .Lrestore_all
|
||||
}
|
||||
push_extra_callee_saves r0
|
||||
movei r33, 1
|
||||
1: jal do_work_pending
|
||||
bnez r0, .Lretry_work_pending
|
||||
jal prepare_exit_to_usermode
|
||||
|
||||
/*
|
||||
* In the NMI case we
|
||||
|
@ -1411,7 +1392,7 @@ STD_ENTRY(ret_from_kernel_thread)
|
|||
FEEDBACK_REENTER(ret_from_kernel_thread)
|
||||
{
|
||||
movei r30, 0 /* not an NMI */
|
||||
j .Lresume_userspace /* jump into middle of interrupt_return */
|
||||
j interrupt_return
|
||||
}
|
||||
STD_ENDPROC(ret_from_kernel_thread)
|
||||
|
||||
|
|
|
@ -462,54 +462,57 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
|||
|
||||
/*
|
||||
* This routine is called on return from interrupt if any of the
|
||||
* TIF_WORK_MASK flags are set in thread_info->flags. It is
|
||||
* entered with interrupts disabled so we don't miss an event
|
||||
* that modified the thread_info flags. If any flag is set, we
|
||||
* handle it and return, and the calling assembly code will
|
||||
* re-disable interrupts, reload the thread flags, and call back
|
||||
* if more flags need to be handled.
|
||||
*
|
||||
* We return whether we need to check the thread_info flags again
|
||||
* or not. Note that we don't clear TIF_SINGLESTEP here, so it's
|
||||
* important that it be tested last, and then claim that we don't
|
||||
* need to recheck the flags.
|
||||
* TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
|
||||
* entered with interrupts disabled so we don't miss an event that
|
||||
* modified the thread_info flags. We loop until all the tested flags
|
||||
* are clear. Note that the function is called on certain conditions
|
||||
* that are not listed in the loop condition here (e.g. SINGLESTEP)
|
||||
* which guarantees we will do those things once, and redo them if any
|
||||
* of the other work items is re-done, but won't continue looping if
|
||||
* all the other work is done.
|
||||
*/
|
||||
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
||||
void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
|
||||
{
|
||||
/* If we enter in kernel mode, do nothing and exit the caller loop. */
|
||||
if (!user_mode(regs))
|
||||
return 0;
|
||||
if (WARN_ON(!user_mode(regs)))
|
||||
return;
|
||||
|
||||
user_exit();
|
||||
do {
|
||||
local_irq_enable();
|
||||
|
||||
/* Enable interrupts; they are disabled again on return to caller. */
|
||||
local_irq_enable();
|
||||
if (thread_info_flags & _TIF_NEED_RESCHED)
|
||||
schedule();
|
||||
|
||||
if (thread_info_flags & _TIF_NEED_RESCHED) {
|
||||
schedule();
|
||||
return 1;
|
||||
}
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
if (thread_info_flags & _TIF_ASYNC_TLB) {
|
||||
do_async_page_fault(regs);
|
||||
return 1;
|
||||
}
|
||||
if (thread_info_flags & _TIF_ASYNC_TLB)
|
||||
do_async_page_fault(regs);
|
||||
#endif
|
||||
if (thread_info_flags & _TIF_SIGPENDING) {
|
||||
do_signal(regs);
|
||||
return 1;
|
||||
}
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
return 1;
|
||||
}
|
||||
if (thread_info_flags & _TIF_SINGLESTEP)
|
||||
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
thread_info_flags = READ_ONCE(current_thread_info()->flags);
|
||||
|
||||
} while (thread_info_flags & _TIF_WORK_MASK);
|
||||
|
||||
if (thread_info_flags & _TIF_SINGLESTEP) {
|
||||
single_step_once(regs);
|
||||
#ifndef __tilegx__
|
||||
/*
|
||||
* FIXME: on tilepro, since we enable interrupts in
|
||||
* this routine, it's possible that we miss a signal
|
||||
* or other asynchronous event.
|
||||
*/
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
|
||||
user_enter();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
|
|
Loading…
Reference in New Issue