sched/rt, ia64: Use CONFIG_PREEMPTION
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Switch the entry code and kprobes over to use CONFIG_PREEMPTION. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-ia64@vger.kernel.org Link: https://lore.kernel.org/r/20191015191821.11479-10-bigeasy@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
143cd41f50
commit
b9b75e53b2
|
@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall)
|
||||||
*
|
*
|
||||||
* p6 controls whether current_thread_info()->flags needs to be check for
|
* p6 controls whether current_thread_info()->flags needs to be check for
|
||||||
* extra work. We always check for extra work when returning to user-level.
|
* extra work. We always check for extra work when returning to user-level.
|
||||||
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
|
* With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
|
||||||
* is 0. After extra work processing has been completed, execution
|
* is 0. After extra work processing has been completed, execution
|
||||||
* resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
|
* resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
|
||||||
* needs to be redone.
|
* needs to be redone.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPTION
|
||||||
RSM_PSR_I(p0, r2, r18) // disable interrupts
|
RSM_PSR_I(p0, r2, r18) // disable interrupts
|
||||||
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
||||||
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
|
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
|
||||||
|
@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
|
||||||
(pUStk) mov r21=0 // r21 <- 0
|
(pUStk) mov r21=0 // r21 <- 0
|
||||||
;;
|
;;
|
||||||
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
|
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
|
||||||
#else /* !CONFIG_PREEMPT */
|
#else /* !CONFIG_PREEMPTION */
|
||||||
RSM_PSR_I(pUStk, r2, r18)
|
RSM_PSR_I(pUStk, r2, r18)
|
||||||
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
||||||
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
|
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
|
||||||
|
@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
|
||||||
*
|
*
|
||||||
* p6 controls whether current_thread_info()->flags needs to be check for
|
* p6 controls whether current_thread_info()->flags needs to be check for
|
||||||
* extra work. We always check for extra work when returning to user-level.
|
* extra work. We always check for extra work when returning to user-level.
|
||||||
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
|
* With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
|
||||||
* is 0. After extra work processing has been completed, execution
|
* is 0. After extra work processing has been completed, execution
|
||||||
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
|
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
|
||||||
* needs to be redone.
|
* needs to be redone.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPTION
|
||||||
RSM_PSR_I(p0, r17, r31) // disable interrupts
|
RSM_PSR_I(p0, r17, r31) // disable interrupts
|
||||||
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
|
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
|
||||||
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
|
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
|
||||||
|
@ -1120,7 +1120,7 @@ skip_rbs_switch:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On entry:
|
* On entry:
|
||||||
* r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT)
|
* r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION)
|
||||||
* r31 = current->thread_info->flags
|
* r31 = current->thread_info->flags
|
||||||
* On exit:
|
* On exit:
|
||||||
* p6 = TRUE if work-pending-check needs to be redone
|
* p6 = TRUE if work-pending-check needs to be redone
|
||||||
|
|
|
@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_PREEMPT)
|
#if !defined(CONFIG_PREEMPTION)
|
||||||
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
|
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
|
||||||
/* Boost up -- we can execute copied instructions directly */
|
/* Boost up -- we can execute copied instructions directly */
|
||||||
ia64_psr(regs)->ri = p->ainsn.slot;
|
ia64_psr(regs)->ri = p->ainsn.slot;
|
||||||
|
|
Loading…
Reference in New Issue