Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "Here are a few powerpc fixes. Arguably some of this should have come to you earlier but I'm only just catching up after my medical leave. Mostly these fixes regressions, a couple are long standing bugs." * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/pseries: Fix software invalidate TCE powerpc: check_and_cede_processor() never cedes powerpc/ftrace: Do not trace restore_interrupts() powerpc: Fix Section mismatch warnings in prom_init.c ppc64: fix missing to check all bits of _TIF_USER_WORK_MASK in preempt powerpc: Fix uninitialised error in numa.c powerpc: Fix BPF_JIT code to link with multiple TOCs
This commit is contained in:
commit
15114c7e1c
|
@ -103,6 +103,11 @@ static inline void hard_irq_disable(void)
|
||||||
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
|
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
|
||||||
#define hard_irq_disable hard_irq_disable
|
#define hard_irq_disable hard_irq_disable
|
||||||
|
|
||||||
|
static inline bool lazy_irq_pending(void)
|
||||||
|
{
|
||||||
|
return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called by asynchronous interrupts to conditionally
|
* This is called by asynchronous interrupts to conditionally
|
||||||
* re-enable hard interrupts when soft-disabled after having
|
* re-enable hard interrupts when soft-disabled after having
|
||||||
|
|
|
@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
|
||||||
mtmsrd r10,1 /* Update machine state */
|
mtmsrd r10,1 /* Update machine state */
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
|
||||||
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
|
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
|
||||||
li r0,_TIF_NEED_RESCHED /* bits to check */
|
|
||||||
ld r3,_MSR(r1)
|
ld r3,_MSR(r1)
|
||||||
ld r4,TI_FLAGS(r9)
|
ld r4,TI_FLAGS(r9)
|
||||||
/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
|
|
||||||
rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
|
|
||||||
and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
|
|
||||||
bne do_work
|
|
||||||
|
|
||||||
#else /* !CONFIG_PREEMPT */
|
|
||||||
ld r3,_MSR(r1) /* Returning to user mode? */
|
|
||||||
andi. r3,r3,MSR_PR
|
andi. r3,r3,MSR_PR
|
||||||
beq restore /* if not, just restore regs and return */
|
beq resume_kernel
|
||||||
|
|
||||||
/* Check current_thread_info()->flags */
|
/* Check current_thread_info()->flags */
|
||||||
|
andi. r0,r4,_TIF_USER_WORK_MASK
|
||||||
|
beq restore
|
||||||
|
|
||||||
|
andi. r0,r4,_TIF_NEED_RESCHED
|
||||||
|
beq 1f
|
||||||
|
bl .restore_interrupts
|
||||||
|
bl .schedule
|
||||||
|
b .ret_from_except_lite
|
||||||
|
|
||||||
|
1: bl .save_nvgprs
|
||||||
|
bl .restore_interrupts
|
||||||
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
|
bl .do_notify_resume
|
||||||
|
b .ret_from_except
|
||||||
|
|
||||||
|
resume_kernel:
|
||||||
|
#ifdef CONFIG_PREEMPT
|
||||||
|
/* Check if we need to preempt */
|
||||||
|
andi. r0,r4,_TIF_NEED_RESCHED
|
||||||
|
beq+ restore
|
||||||
|
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||||
|
lwz r8,TI_PREEMPT(r9)
|
||||||
|
cmpwi cr1,r8,0
|
||||||
|
ld r0,SOFTE(r1)
|
||||||
|
cmpdi r0,0
|
||||||
|
crandc eq,cr1*4+eq,eq
|
||||||
|
bne restore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here we are preempting the current task. We want to make
|
||||||
|
* sure we are soft-disabled first
|
||||||
|
*/
|
||||||
|
SOFT_DISABLE_INTS(r3,r4)
|
||||||
|
1: bl .preempt_schedule_irq
|
||||||
|
|
||||||
|
/* Re-test flags and eventually loop */
|
||||||
clrrdi r9,r1,THREAD_SHIFT
|
clrrdi r9,r1,THREAD_SHIFT
|
||||||
ld r4,TI_FLAGS(r9)
|
ld r4,TI_FLAGS(r9)
|
||||||
andi. r0,r4,_TIF_USER_WORK_MASK
|
andi. r0,r4,_TIF_NEED_RESCHED
|
||||||
bne do_work
|
bne 1b
|
||||||
#endif /* !CONFIG_PREEMPT */
|
#endif /* CONFIG_PREEMPT */
|
||||||
|
|
||||||
.globl fast_exc_return_irq
|
.globl fast_exc_return_irq
|
||||||
fast_exc_return_irq:
|
fast_exc_return_irq:
|
||||||
|
@ -759,50 +786,6 @@ restore_check_irq_replay:
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
1: b .ret_from_except /* What else to do here ? */
|
1: b .ret_from_except /* What else to do here ? */
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
3:
|
|
||||||
do_work:
|
|
||||||
#ifdef CONFIG_PREEMPT
|
|
||||||
andi. r0,r3,MSR_PR /* Returning to user mode? */
|
|
||||||
bne user_work
|
|
||||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
|
||||||
lwz r8,TI_PREEMPT(r9)
|
|
||||||
cmpwi cr1,r8,0
|
|
||||||
ld r0,SOFTE(r1)
|
|
||||||
cmpdi r0,0
|
|
||||||
crandc eq,cr1*4+eq,eq
|
|
||||||
bne restore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Here we are preempting the current task. We want to make
|
|
||||||
* sure we are soft-disabled first
|
|
||||||
*/
|
|
||||||
SOFT_DISABLE_INTS(r3,r4)
|
|
||||||
1: bl .preempt_schedule_irq
|
|
||||||
|
|
||||||
/* Re-test flags and eventually loop */
|
|
||||||
clrrdi r9,r1,THREAD_SHIFT
|
|
||||||
ld r4,TI_FLAGS(r9)
|
|
||||||
andi. r0,r4,_TIF_NEED_RESCHED
|
|
||||||
bne 1b
|
|
||||||
b restore
|
|
||||||
|
|
||||||
user_work:
|
|
||||||
#endif /* CONFIG_PREEMPT */
|
|
||||||
|
|
||||||
andi. r0,r4,_TIF_NEED_RESCHED
|
|
||||||
beq 1f
|
|
||||||
bl .restore_interrupts
|
|
||||||
bl .schedule
|
|
||||||
b .ret_from_except_lite
|
|
||||||
|
|
||||||
1: bl .save_nvgprs
|
|
||||||
bl .restore_interrupts
|
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
|
||||||
bl .do_notify_resume
|
|
||||||
b .ret_from_except
|
|
||||||
|
|
||||||
unrecov_restore:
|
unrecov_restore:
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
bl .unrecoverable_exception
|
bl .unrecoverable_exception
|
||||||
|
|
|
@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
|
||||||
* NOTE: This is called with interrupts hard disabled but not marked
|
* NOTE: This is called with interrupts hard disabled but not marked
|
||||||
* as such in paca->irq_happened, so we need to resync this.
|
* as such in paca->irq_happened, so we need to resync this.
|
||||||
*/
|
*/
|
||||||
void restore_interrupts(void)
|
void notrace restore_interrupts(void)
|
||||||
{
|
{
|
||||||
if (irqs_disabled()) {
|
if (irqs_disabled()) {
|
||||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||||
|
|
|
@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
|
||||||
|
|
||||||
extern char opal_secondary_entry;
|
extern char opal_secondary_entry;
|
||||||
|
|
||||||
static void prom_query_opal(void)
|
static void __init prom_query_opal(void)
|
||||||
{
|
{
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
|
@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
|
||||||
prom_debug("prom_opal_hold_cpus: end...\n");
|
prom_debug("prom_opal_hold_cpus: end...\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prom_opal_takeover(void)
|
static void __init prom_opal_takeover(void)
|
||||||
{
|
{
|
||||||
struct opal_secondary_data *data = &RELOC(opal_secondary_data);
|
struct opal_secondary_data *data = &RELOC(opal_secondary_data);
|
||||||
struct opal_takeover_args *args = &data->args;
|
struct opal_takeover_args *args = &data->args;
|
||||||
|
|
|
@ -635,7 +635,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
|
||||||
*/
|
*/
|
||||||
static void __init parse_drconf_memory(struct device_node *memory)
|
static void __init parse_drconf_memory(struct device_node *memory)
|
||||||
{
|
{
|
||||||
const u32 *dm, *usm;
|
const u32 *uninitialized_var(dm), *usm;
|
||||||
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
||||||
unsigned long lmb_size, base, size, sz;
|
unsigned long lmb_size, base, size, sz;
|
||||||
int nid;
|
int nid;
|
||||||
|
|
|
@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
|
||||||
mr r4, r_addr; \
|
mr r4, r_addr; \
|
||||||
li r6, SIZE; \
|
li r6, SIZE; \
|
||||||
bl skb_copy_bits; \
|
bl skb_copy_bits; \
|
||||||
|
nop; \
|
||||||
/* R3 = 0 on success */ \
|
/* R3 = 0 on success */ \
|
||||||
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
||||||
ld r0, 16(r1); \
|
ld r0, 16(r1); \
|
||||||
|
@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
|
||||||
mr r4, r_addr; \
|
mr r4, r_addr; \
|
||||||
li r5, SIZE; \
|
li r5, SIZE; \
|
||||||
bl bpf_internal_load_pointer_neg_helper; \
|
bl bpf_internal_load_pointer_neg_helper; \
|
||||||
|
nop; \
|
||||||
/* R3 != 0 on success */ \
|
/* R3 != 0 on success */ \
|
||||||
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
||||||
ld r0, 16(r1); \
|
ld r0, 16(r1); \
|
||||||
|
|
|
@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
|
||||||
tcep++;
|
tcep++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tbl->it_type == TCE_PCI_SWINV_CREATE)
|
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
|
||||||
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
|
||||||
while (npages--)
|
while (npages--)
|
||||||
*(tcep++) = 0;
|
*(tcep++) = 0;
|
||||||
|
|
||||||
if (tbl->it_type == TCE_PCI_SWINV_FREE)
|
if (tbl->it_type & TCE_PCI_SWINV_FREE)
|
||||||
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ static void check_and_cede_processor(void)
|
||||||
* we first hard disable then check.
|
* we first hard disable then check.
|
||||||
*/
|
*/
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
if (get_paca()->irq_happened == 0)
|
if (!lazy_irq_pending())
|
||||||
cede_processor();
|
cede_processor();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue