OpenRISC updates for 5.9
A few patches all over the place during this cycle, mostly bug and sparse warning fixes for OpenRISC, but a few enhancements too. Note, there are 2 non OpenRISC specific fixups. Non OpenRISC fixes: - In init we need to align the init_task correctly to fix an issue with MUTEX_FLAGS, reviewed by Peter Z. No one picked this up so I kept it on my tree. - In asm-generic/io.h I fixed up some sparse warnings, OK'd by Arnd. Arnd asked to merge it via my tree. OpenRISC fixes: - Many fixes for OpenRISC sprase warnings. - Add support OpenRISC SMP tlb flushing rather than always flushing the entire TLB on every CPU. - Fix bug when dumping stack via /proc/xxx/stack of user threads. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE2cRzVK74bBA6Je/xw7McLV5mJ+QFAl829wgACgkQw7McLV5m J+RvMg/+Ik9jmHiCoDilVzB5yqJ0Ea8oLjg75V9eBE3YtnYJMAbDHb8ye2OsYrlp QhrAHFi8PB7nJQphod3XXt8Y5JWMYjKIgdazybVQtUlD5IAXgYAR6/IxJ1DVzxa0 AzJ7TGmYSxnhW7GzbRU5xjgdIi5cKQjBUcVM/blDQB6/GZ4wY3OBxK1pn0kNXMPU gnS+0yPDlwXaZw67YmbF5kF34lvEe0knkOaxxP/S0t2ROb6Xu0PJCEDTbdcGApsB 2xdm0dJwK50ulS0/HWxC18vC/R7d1b0qjR+xvisjydHbZawEN2Kcf3mOCSAETSTk ST1WFxuTAObqdyc4F15tdsqFvbchPtCH9UAjkkSbmRxGVOKQa88NmW1A+s0hj4BX enf6I9SYzqiU/WkuFDwSnJ4NETOpPaUVqZbi3WTUfljyXmOdqXbT+416YxViOXpx OtSyGVN18qs8wjsWlWiGyhM/eAnHwr9q0q1kJ8VZTh+nQSnQFmuWjHSfRan2PkmQ nnbvXHXJcgWYVlk+JZLOnhOB3zrkH5xmlM2UakVUvP92ESnnSmBCC0RLA0k6kGZ3 PkFBbY4etbA7Ug8r1KueOaqHKwJpTpIb4tU75y3KXyi05FeLEln1doC5M4EQUPDy eXzdWj6afuEKmAPILiEYlSVXO3t8iIncVBkK7isaR37dURNnJWE= =0MlF -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://github.com/openrisc/linux Pull OpenRISC updates from Stafford Horne: "A few patches all over the place during this cycle, mostly bug and sparse warning fixes for OpenRISC, but a few enhancements too. Note, there are 2 non OpenRISC specific fixups. Non OpenRISC fixes: - In init we need to align the init_task correctly to fix an issue with MUTEX_FLAGS, reviewed by Peter Z. No one picked this up so I kept it on my tree. - In asm-generic/io.h I fixed up some sparse warnings, OK'd by Arnd. Arnd asked to merge it via my tree. OpenRISC fixes: - Many fixes for OpenRISC sprase warnings. - Add support OpenRISC SMP tlb flushing rather than always flushing the entire TLB on every CPU. - Fix bug when dumping stack via /proc/xxx/stack of user threads" * tag 'for-linus' of git://github.com/openrisc/linux: openrisc: uaccess: Add user address space check to access_ok openrisc: signal: Fix sparse address space warnings openrisc: uaccess: Remove unused macro __addr_ok openrisc: uaccess: Use static inline function in access_ok openrisc: uaccess: Fix sparse address space warnings openrisc: io: Fixup defines and move include to the end asm-generic/io.h: Fix sparse warnings on big-endian architectures openrisc: Implement proper SMP tlb flushing openrisc: Fix oops caused when dumping stack openrisc: Add support for external initrd images init: Align init_task to avoid conflict with MUTEX_FLAGS openrisc: fix __user in raw_copy_to_user()'s prototype
This commit is contained in:
commit
e1d74fbe50
|
@ -14,6 +14,8 @@
|
|||
#ifndef __ASM_OPENRISC_IO_H
|
||||
#define __ASM_OPENRISC_IO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* PCI: can we really do 0 here if we have no port IO?
|
||||
*/
|
||||
|
@ -25,9 +27,12 @@
|
|||
#define PIO_OFFSET 0
|
||||
#define PIO_MASK 0
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#define ioremap ioremap
|
||||
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
#define iounmap iounmap
|
||||
extern void iounmap(void *addr);
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#endif
|
||||
|
|
|
@ -48,16 +48,17 @@
|
|||
/* Ensure that the range from addr to addr+size is all within the process'
|
||||
* address space
|
||||
*/
|
||||
#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
|
||||
static inline int __range_ok(unsigned long addr, unsigned long size)
|
||||
{
|
||||
const mm_segment_t fs = get_fs();
|
||||
|
||||
/* Ensure that addr is below task's addr_limit */
|
||||
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
|
||||
return size <= fs && addr <= (fs - size);
|
||||
}
|
||||
|
||||
#define access_ok(addr, size) \
|
||||
({ \
|
||||
unsigned long __ao_addr = (unsigned long)(addr); \
|
||||
unsigned long __ao_size = (unsigned long)(size); \
|
||||
__range_ok(__ao_addr, __ao_size); \
|
||||
__chk_user_ptr(addr); \
|
||||
__range_ok((unsigned long)(addr), (size)); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -100,7 +101,7 @@ extern long __put_user_bad(void);
|
|||
#define __put_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
if (access_ok(__pu_addr, size)) \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__pu_err; \
|
||||
|
@ -173,7 +174,7 @@ struct __large_struct {
|
|||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT, __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) * __gu_addr = (ptr); \
|
||||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
if (access_ok(__gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
|
@ -241,17 +242,17 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long size)
|
|||
return __copy_tofrom_user(to, (__force const void *)from, size);
|
||||
}
|
||||
static inline unsigned long
|
||||
raw_copy_to_user(void *to, const void __user *from, unsigned long size)
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long size)
|
||||
{
|
||||
return __copy_tofrom_user((__force void *)to, from, size);
|
||||
}
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
extern unsigned long __clear_user(void *addr, unsigned long size);
|
||||
extern unsigned long __clear_user(void __user *addr, unsigned long size);
|
||||
|
||||
static inline __must_check unsigned long
|
||||
clear_user(void *addr, unsigned long size)
|
||||
clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
if (likely(access_ok(addr, size)))
|
||||
size = __clear_user(addr, size);
|
||||
|
|
|
@ -292,13 +292,15 @@ void __init setup_arch(char **cmdline_p)
|
|||
init_mm.brk = (unsigned long)_end;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
initrd_start = (unsigned long)&__initrd_start;
|
||||
initrd_end = (unsigned long)&__initrd_end;
|
||||
if (initrd_start == initrd_end) {
|
||||
printk(KERN_INFO "Initial ramdisk not found\n");
|
||||
initrd_start = 0;
|
||||
initrd_end = 0;
|
||||
} else {
|
||||
printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
(void *)(initrd_start), initrd_end - initrd_start);
|
||||
initrd_below_start_ok = 1;
|
||||
}
|
||||
initrd_below_start_ok = 1;
|
||||
#endif
|
||||
|
||||
/* setup memblock allocator */
|
||||
|
|
|
@ -68,7 +68,7 @@ static int restore_sigcontext(struct pt_regs *regs,
|
|||
|
||||
asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
|
||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp;
|
||||
sigset_t set;
|
||||
|
||||
/*
|
||||
|
@ -76,7 +76,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
|
|||
* then frame should be dword aligned here. If it's
|
||||
* not, then the user is trying to mess with us.
|
||||
*/
|
||||
if (((long)frame) & 3)
|
||||
if (((unsigned long)frame) & 3)
|
||||
goto badframe;
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
|
@ -151,7 +151,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
|
|||
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe *frame;
|
||||
struct rt_sigframe __user *frame;
|
||||
unsigned long return_ip;
|
||||
int err = 0;
|
||||
|
||||
|
@ -181,10 +181,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|||
l.ori r11,r0,__NR_sigreturn
|
||||
l.sys 1
|
||||
*/
|
||||
err |= __put_user(0xa960, (short *)(frame->retcode + 0));
|
||||
err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
|
||||
err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
|
||||
err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
|
||||
err |= __put_user(0xa960, (short __user *)(frame->retcode + 0));
|
||||
err |= __put_user(__NR_rt_sigreturn, (short __user *)(frame->retcode + 2));
|
||||
err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4));
|
||||
err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8));
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
|
|
@ -219,30 +219,99 @@ static inline void ipi_flush_tlb_all(void *ignored)
|
|||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_mm(void *info)
|
||||
{
|
||||
struct mm_struct *mm = (struct mm_struct *)info;
|
||||
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
|
||||
{
|
||||
unsigned int cpuid;
|
||||
|
||||
if (cpumask_empty(cmask))
|
||||
return;
|
||||
|
||||
cpuid = get_cpu();
|
||||
|
||||
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
|
||||
/* local cpu is the only cpu present in cpumask */
|
||||
local_flush_tlb_mm(mm);
|
||||
} else {
|
||||
on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
struct flush_tlb_data {
|
||||
unsigned long addr1;
|
||||
unsigned long addr2;
|
||||
};
|
||||
|
||||
static inline void ipi_flush_tlb_page(void *info)
|
||||
{
|
||||
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
||||
|
||||
local_flush_tlb_page(NULL, fd->addr1);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_range(void *info)
|
||||
{
|
||||
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
|
||||
|
||||
local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
|
||||
}
|
||||
|
||||
static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned int cpuid;
|
||||
|
||||
if (cpumask_empty(cmask))
|
||||
return;
|
||||
|
||||
cpuid = get_cpu();
|
||||
|
||||
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
|
||||
/* local cpu is the only cpu present in cpumask */
|
||||
if ((end - start) <= PAGE_SIZE)
|
||||
local_flush_tlb_page(NULL, start);
|
||||
else
|
||||
local_flush_tlb_range(NULL, start, end);
|
||||
} else {
|
||||
struct flush_tlb_data fd;
|
||||
|
||||
fd.addr1 = start;
|
||||
fd.addr2 = end;
|
||||
|
||||
if ((end - start) <= PAGE_SIZE)
|
||||
on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
|
||||
else
|
||||
on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: implement proper functionality instead of flush_tlb_all.
|
||||
* *But*, as things currently stands, the local_tlb_flush_* functions will
|
||||
* all boil down to local_tlb_flush_all anyway.
|
||||
*/
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
smp_flush_tlb_mm(mm_cpumask(mm), mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
|
||||
}
|
||||
|
||||
/* Instruction cache invalidate - performed on each cpu */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/stacktrace.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||
{
|
||||
unsigned long *sp = NULL;
|
||||
|
||||
if (!try_get_task_stack(tsk))
|
||||
return;
|
||||
|
||||
if (tsk == current)
|
||||
sp = (unsigned long *) &sp;
|
||||
else
|
||||
sp = (unsigned long *) KSTK_ESP(tsk);
|
||||
else {
|
||||
unsigned long ksp;
|
||||
|
||||
/* Locate stack from kernel context */
|
||||
ksp = task_thread_info(tsk)->ksp;
|
||||
ksp += STACK_FRAME_OVERHEAD; /* redzone */
|
||||
ksp += sizeof(struct pt_regs);
|
||||
|
||||
sp = (unsigned long *) ksp;
|
||||
}
|
||||
|
||||
unwind_stack(trace, sp, save_stack_address_nosched);
|
||||
|
||||
put_task_stack(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||
|
||||
|
|
|
@ -96,18 +96,6 @@ SECTIONS
|
|||
|
||||
__init_end = .;
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.initrd : AT(ADDR(.initrd) - LOAD_OFFSET)
|
||||
{
|
||||
__initrd_start = .;
|
||||
*(.initrd)
|
||||
__initrd_end = .;
|
||||
FILL (0);
|
||||
. = ALIGN (PAGE_SIZE);
|
||||
}
|
||||
|
||||
__vmlinux_end = .; /* last address of the physical file */
|
||||
|
||||
BSS_SECTION(0, 0, 0x20)
|
||||
|
||||
_end = .;
|
||||
|
|
|
@ -137,21 +137,28 @@ void local_flush_tlb_mm(struct mm_struct *mm)
|
|||
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *next_tsk)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
if (unlikely(prev == next))
|
||||
return;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/* remember the pgd for the fault handlers
|
||||
* this is similar to the pgd register in some other CPU's.
|
||||
* we need our own copy of it because current and active_mm
|
||||
* might be invalid at points where we still need to derefer
|
||||
* the pgd.
|
||||
*/
|
||||
current_pgd[smp_processor_id()] = next->pgd;
|
||||
current_pgd[cpu] = next->pgd;
|
||||
|
||||
/* We don't have context support implemented, so flush all
|
||||
* entries belonging to previous map
|
||||
*/
|
||||
|
||||
if (prev != next)
|
||||
local_flush_tlb_mm(prev);
|
||||
|
||||
local_flush_tlb_mm(prev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -163,7 +163,7 @@ static inline u16 readw(const volatile void __iomem *addr)
|
|||
u16 val;
|
||||
|
||||
__io_br();
|
||||
val = __le16_to_cpu(__raw_readw(addr));
|
||||
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
|
||||
__io_ar(val);
|
||||
return val;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static inline u32 readl(const volatile void __iomem *addr)
|
|||
u32 val;
|
||||
|
||||
__io_br();
|
||||
val = __le32_to_cpu(__raw_readl(addr));
|
||||
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
|
||||
__io_ar(val);
|
||||
return val;
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
|
|||
static inline void writew(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
__io_bw();
|
||||
__raw_writew(cpu_to_le16(value), addr);
|
||||
__raw_writew((u16 __force)cpu_to_le16(value), addr);
|
||||
__io_aw();
|
||||
}
|
||||
#endif
|
||||
|
@ -222,7 +222,7 @@ static inline void writew(u16 value, volatile void __iomem *addr)
|
|||
static inline void writel(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
__io_bw();
|
||||
__raw_writel(__cpu_to_le32(value), addr);
|
||||
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
|
||||
__io_aw();
|
||||
}
|
||||
#endif
|
||||
|
@ -474,7 +474,7 @@ static inline u16 _inw(unsigned long addr)
|
|||
u16 val;
|
||||
|
||||
__io_pbr();
|
||||
val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
|
||||
val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
|
||||
__io_par(val);
|
||||
return val;
|
||||
}
|
||||
|
@ -487,7 +487,7 @@ static inline u32 _inl(unsigned long addr)
|
|||
u32 val;
|
||||
|
||||
__io_pbr();
|
||||
val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
|
||||
val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
|
||||
__io_par(val);
|
||||
return val;
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ static inline void _outb(u8 value, unsigned long addr)
|
|||
static inline void _outw(u16 value, unsigned long addr)
|
||||
{
|
||||
__io_pbw();
|
||||
__raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
|
||||
__raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
|
||||
__io_paw();
|
||||
}
|
||||
#endif
|
||||
|
@ -518,7 +518,7 @@ static inline void _outw(u16 value, unsigned long addr)
|
|||
static inline void _outl(u32 value, unsigned long addr)
|
||||
{
|
||||
__io_pbw();
|
||||
__raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
|
||||
__raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
|
||||
__io_paw();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -65,6 +65,7 @@ struct task_struct init_task
|
|||
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
|
||||
__init_task_data
|
||||
#endif
|
||||
__aligned(L1_CACHE_BYTES)
|
||||
= {
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
.thread_info = INIT_THREAD_INFO(init_task),
|
||||
|
|
Loading…
Reference in New Issue