powerpc/32: Reconcile interrupts in C

There is no need for this to be in asm anymore,
use the new interrupt entry wrapper.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/602e1ec47e15ca540f7edb9cf6feb6c249911bd6.1615552866.git.christophe.leroy@csgroup.eu
This commit is contained in:
Christophe Leroy 2021-03-12 12:50:17 +00:00 committed by Michael Ellerman
parent 0512aadd75
commit be39e10506
2 changed files with 4 additions and 58 deletions

View File

@ -29,6 +29,10 @@ static inline void booke_restore_dbcr0(void)
static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
#ifdef CONFIG_PPC32
if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
#endif
/*
* Book3E reconciles irq soft mask in asm
*/

View File

@ -202,22 +202,6 @@ transfer_to_handler_cont:
lwz r9,4(r9) /* where to go when done */
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
mtspr SPRN_NRI, r0
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* When tracing IRQ state (lockdep) we enable the MMU before we call
* the IRQ tracing functions as they might access vmalloc space or
* perform IOs for console output.
*
* To speed up the syscall path where interrupts stay on, let's check
* first if we are changing the MSR value at all.
*/
tophys_novmstack r12, r1
lwz r12,_MSR(r12)
andi. r12,r12,MSR_EE
bne 1f
/* MSR isn't changing, just transition directly */
#endif
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
@ -244,48 +228,6 @@ transfer_to_handler_cont:
_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
#ifdef CONFIG_TRACE_IRQFLAGS
1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
* keep interrupts disabled at this point otherwise we might risk
* taking an interrupt before we tell lockdep they are enabled.
*/
lis r12,reenable_mmu@h
ori r12,r12,reenable_mmu@l
LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r0
rfi
#ifdef CONFIG_40x
b . /* Prevent prefetch past rfi */
#endif
reenable_mmu:
/*
* We save a bunch of GPRs,
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r0, r4-r8, r12, CCR, CTR, XER etc... are left
* clobbered as they aren't useful past this point.
*/
stwu r1,-32(r1)
stw r9,8(r1)
stw r11,12(r1)
stw r3,16(r1)
/* If we are disabling interrupts (normal case), simply log it with
* lockdep
*/
1: bl trace_hardirqs_off
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
addi r1,r1,32
mtctr r11
mtlr r9
bctr /* jump to handler */
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifndef CONFIG_VMAP_STACK
/*
* On kernel stack overflow, load up an initial stack pointer