A couple more MIPS fixes for 4.18:
- Use async IPIs for arch_trigger_cpumask_backtrace() in order to avoid warnings & deadlocks, fixing a problem introduced in v3.19 with the fix trivial to backport as far as v4.9. - Fix ioremap()'s MMU/TLB backed path to avoid spuriously rejecting valid requests due to an incorrect belief that the memory region is backed by potentially-in-use RAM. This fixes a regression in v4.2. -----BEGIN PGP SIGNATURE----- iIsEABYIADMWIQRgLjeFAZEXQzy86/s+p5+stXUA3QUCW0UoBRUccGF1bC5idXJ0 b25AbWlwcy5jb20ACgkQPqefrLV1AN2QnQD9EzZbdcitwdyd/wCPHi/r2yPJ7eD6 vXuIqWEjs2TujpMA/iVa4uC2myYAP56vjOx66KpgfIXufnQg6b8f5ARgYMIK =gA7+ -----END PGP SIGNATURE----- Merge tag 'mips_fixes_4.18_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux Pull MIPS fixes from Paul Burton: "A couple more MIPS fixes for 4.18: - Use async IPIs for arch_trigger_cpumask_backtrace() in order to avoid warnings & deadlocks, fixing a problem introduced in v3.19 with the fix trivial to backport as far as v4.9. - Fix ioremap()'s MMU/TLB backed path to avoid spuriously rejecting valid requests due to an incorrect belief that the memory region is backed by potentially-in-use RAM. This fixes a regression in v4.2" * tag 'mips_fixes_4.18_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: MIPS: Fix ioremap() RAM check MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() MIPS: Call dump_stack() from show_regs()
This commit is contained in:
commit
1e09177aca
|
@ -29,6 +29,7 @@
|
|||
#include <linux/kallsyms.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
|
|||
return sp & ALMASK;
|
||||
}
|
||||
|
||||
static void arch_dump_stack(void *info)
|
||||
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
|
||||
static struct cpumask backtrace_csd_busy;
|
||||
|
||||
static void handle_backtrace(void *info)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
||||
}
|
||||
|
||||
regs = get_irq_regs();
|
||||
static void raise_backtrace(cpumask_t *mask)
|
||||
{
|
||||
call_single_data_t *csd;
|
||||
int cpu;
|
||||
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
for_each_cpu(cpu, mask) {
|
||||
/*
|
||||
* If we previously sent an IPI to the target CPU & it hasn't
|
||||
* cleared its bit in the busy cpumask then it didn't handle
|
||||
* our previous IPI & it's not safe for us to reuse the
|
||||
* call_single_data_t.
|
||||
*/
|
||||
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
||||
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
dump_stack();
|
||||
csd = &per_cpu(backtrace_csd, cpu);
|
||||
csd->func = handle_backtrace;
|
||||
smp_call_function_single_async(cpu, csd);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
long this_cpu = get_cpu();
|
||||
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
dump_stack();
|
||||
|
||||
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
|
||||
|
||||
put_cpu();
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
|
|
@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_regs((struct pt_regs *)regs);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|||
return error;
|
||||
}
|
||||
|
||||
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (pfn_valid(start_pfn + i) &&
|
||||
!PageReserved(pfn_to_page(start_pfn + i)))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic mapping function (not visible outside):
|
||||
*/
|
||||
|
@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|||
|
||||
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
|
||||
{
|
||||
unsigned long offset, pfn, last_pfn;
|
||||
struct vm_struct * area;
|
||||
unsigned long offset;
|
||||
phys_addr_t last_addr;
|
||||
void * addr;
|
||||
|
||||
|
@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
|
|||
return (void __iomem *) CKSEG1ADDR(phys_addr);
|
||||
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
* Don't allow anybody to remap RAM that may be allocated by the page
|
||||
* allocator, since that could lead to races & data clobbering.
|
||||
*/
|
||||
if (phys_addr < virt_to_phys(high_memory)) {
|
||||
char *t_addr, *t_end;
|
||||
struct page *page;
|
||||
|
||||
t_addr = __va(phys_addr);
|
||||
t_end = t_addr + (size - 1);
|
||||
|
||||
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
|
||||
if(!PageReserved(page))
|
||||
return NULL;
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
last_pfn = PFN_DOWN(last_addr);
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1) {
|
||||
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
||||
&phys_addr, &last_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue