Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf: "This is a grab bag of changes that includes some NOHZ and context-tracking related changes, some debugging improvements, JUMP_LABEL support, and some fixes for tilepro allmodconfig support. We also remove the now-unused node_has_online_mem() definitions both for tile's asm/topology.h as well as in linux/topology.h itself" * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: numa: remove stale node_has_online_mem() define arch/tile: move user_exit() to early kernel entry sequence tile: fix bug in setting PT_FLAGS_DISABLE_IRQ on kernel entry tile: fix tilepro casts for readl, writel, etc tile: fix a -Wframe-larger-than warning tile: include the syscall number in the backtrace MAINTAINERS: add git URL for tile arch/tile: adopt prepare_exit_to_usermode() model from x86 tile/jump_label: add jump label support for TILE-Gx tile: define a macro ktext_writable_addr to get writable kernel text address
This commit is contained in:
commit
d05d82f711
|
@ -10811,6 +10811,7 @@ F: net/tipc/
|
||||||
TILE ARCHITECTURE
|
TILE ARCHITECTURE
|
||||||
M: Chris Metcalf <cmetcalf@ezchip.com>
|
M: Chris Metcalf <cmetcalf@ezchip.com>
|
||||||
W: http://www.ezchip.com/scm/
|
W: http://www.ezchip.com/scm/
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/tile/
|
F: arch/tile/
|
||||||
F: drivers/char/tile-srom.c
|
F: drivers/char/tile-srom.c
|
||||||
|
|
|
@ -141,6 +141,7 @@ config TILEGX
|
||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||||
|
select HAVE_ARCH_JUMP_LABEL
|
||||||
|
|
||||||
config TILEPRO
|
config TILEPRO
|
||||||
def_bool !TILEGX
|
def_bool !TILEGX
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2015 Tilera Corporation. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation, version 2.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*/
|
||||||
|
#ifndef __ASM_TILE_INSN_H
|
||||||
|
#define __ASM_TILE_INSN_H
|
||||||
|
|
||||||
|
#include <arch/opcode.h>
|
||||||
|
|
||||||
|
static inline tilegx_bundle_bits NOP(void)
|
||||||
|
{
|
||||||
|
return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
||||||
|
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
||||||
|
create_Opcode_X0(RRR_0_OPCODE_X0) |
|
||||||
|
create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
|
||||||
|
create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
|
||||||
|
create_Opcode_X1(RRR_0_OPCODE_X1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline tilegx_bundle_bits tilegx_gen_branch(unsigned long pc,
|
||||||
|
unsigned long addr,
|
||||||
|
bool link)
|
||||||
|
{
|
||||||
|
tilegx_bundle_bits opcode_x0, opcode_x1;
|
||||||
|
long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
|
||||||
|
|
||||||
|
if (link) {
|
||||||
|
/* opcode: jal addr */
|
||||||
|
opcode_x1 =
|
||||||
|
create_Opcode_X1(JUMP_OPCODE_X1) |
|
||||||
|
create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
|
||||||
|
create_JumpOff_X1(pcrel_by_instr);
|
||||||
|
} else {
|
||||||
|
/* opcode: j addr */
|
||||||
|
opcode_x1 =
|
||||||
|
create_Opcode_X1(JUMP_OPCODE_X1) |
|
||||||
|
create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
|
||||||
|
create_JumpOff_X1(pcrel_by_instr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* opcode: fnop */
|
||||||
|
opcode_x0 =
|
||||||
|
create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
||||||
|
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
||||||
|
create_Opcode_X0(RRR_0_OPCODE_X0);
|
||||||
|
|
||||||
|
return opcode_x1 | opcode_x0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __ASM_TILE_INSN_H */
|
|
@ -161,14 +161,14 @@ extern void _tile_writew(u16 val, unsigned long addr);
|
||||||
extern void _tile_writel(u32 val, unsigned long addr);
|
extern void _tile_writel(u32 val, unsigned long addr);
|
||||||
extern void _tile_writeq(u64 val, unsigned long addr);
|
extern void _tile_writeq(u64 val, unsigned long addr);
|
||||||
|
|
||||||
#define __raw_readb(addr) _tile_readb((unsigned long)addr)
|
#define __raw_readb(addr) _tile_readb((unsigned long)(addr))
|
||||||
#define __raw_readw(addr) _tile_readw((unsigned long)addr)
|
#define __raw_readw(addr) _tile_readw((unsigned long)(addr))
|
||||||
#define __raw_readl(addr) _tile_readl((unsigned long)addr)
|
#define __raw_readl(addr) _tile_readl((unsigned long)(addr))
|
||||||
#define __raw_readq(addr) _tile_readq((unsigned long)addr)
|
#define __raw_readq(addr) _tile_readq((unsigned long)(addr))
|
||||||
#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
|
#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)(addr))
|
||||||
#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
|
#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)(addr))
|
||||||
#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
|
#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)(addr))
|
||||||
#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
|
#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)(addr))
|
||||||
|
|
||||||
#else /* CONFIG_PCI */
|
#else /* CONFIG_PCI */
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2015 Tilera Corporation. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation, version 2.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ASM_TILE_JUMP_LABEL_H
|
||||||
|
#define _ASM_TILE_JUMP_LABEL_H
|
||||||
|
|
||||||
|
#include <arch/opcode.h>
|
||||||
|
|
||||||
|
#define JUMP_LABEL_NOP_SIZE TILE_BUNDLE_SIZE_IN_BYTES
|
||||||
|
|
||||||
|
static __always_inline bool arch_static_branch(struct static_key *key,
|
||||||
|
bool branch)
|
||||||
|
{
|
||||||
|
asm_volatile_goto("1:\n\t"
|
||||||
|
"nop" "\n\t"
|
||||||
|
".pushsection __jump_table, \"aw\"\n\t"
|
||||||
|
".quad 1b, %l[l_yes], %0 + %1 \n\t"
|
||||||
|
".popsection\n\t"
|
||||||
|
: : "i" (key), "i" (branch) : : l_yes);
|
||||||
|
return false;
|
||||||
|
l_yes:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline bool arch_static_branch_jump(struct static_key *key,
|
||||||
|
bool branch)
|
||||||
|
{
|
||||||
|
asm_volatile_goto("1:\n\t"
|
||||||
|
"j %l[l_yes]" "\n\t"
|
||||||
|
".pushsection __jump_table, \"aw\"\n\t"
|
||||||
|
".quad 1b, %l[l_yes], %0 + %1 \n\t"
|
||||||
|
".popsection\n\t"
|
||||||
|
: : "i" (key), "i" (branch) : : l_yes);
|
||||||
|
return false;
|
||||||
|
l_yes:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef u64 jump_label_t;
|
||||||
|
|
||||||
|
struct jump_entry {
|
||||||
|
jump_label_t code;
|
||||||
|
jump_label_t target;
|
||||||
|
jump_label_t key;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* _ASM_TILE_JUMP_LABEL_H */
|
|
@ -321,6 +321,16 @@ static inline int pfn_valid(unsigned long pfn)
|
||||||
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
|
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
|
||||||
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The kernel text is mapped at MEM_SV_START as read-only. To allow
|
||||||
|
* modifying kernel text, it is also mapped at PAGE_OFFSET as read-write.
|
||||||
|
* This macro converts a kernel address to its writable kernel text mapping,
|
||||||
|
* which is used to modify the text code on a running kernel by kgdb,
|
||||||
|
* ftrace, kprobe, jump label, etc.
|
||||||
|
*/
|
||||||
|
#define ktext_writable_addr(kaddr) \
|
||||||
|
((unsigned long)(kaddr) - MEM_SV_START + PAGE_OFFSET)
|
||||||
|
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
||||||
extern pte_t *virt_to_kpte(unsigned long kaddr);
|
extern pte_t *virt_to_kpte(unsigned long kaddr);
|
||||||
|
|
|
@ -212,7 +212,7 @@ static inline void release_thread(struct task_struct *dead_task)
|
||||||
/* Nothing for now */
|
/* Nothing for now */
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int do_work_pending(struct pt_regs *regs, u32 flags);
|
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -140,10 +140,14 @@ extern void _cpu_idle(void);
|
||||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||||
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||||||
|
|
||||||
|
/* Work to do as we loop to exit to user space. */
|
||||||
|
#define _TIF_WORK_MASK \
|
||||||
|
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||||
|
_TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
|
||||||
|
|
||||||
/* Work to do on any return to user space. */
|
/* Work to do on any return to user space. */
|
||||||
#define _TIF_ALLWORK_MASK \
|
#define _TIF_ALLWORK_MASK \
|
||||||
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
|
(_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
|
||||||
_TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
|
|
||||||
|
|
||||||
/* Work to do at syscall entry. */
|
/* Work to do at syscall entry. */
|
||||||
#define _TIF_SYSCALL_ENTRY_WORK \
|
#define _TIF_SYSCALL_ENTRY_WORK \
|
||||||
|
|
|
@ -44,9 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
|
||||||
/* For now, use numa node -1 for global allocation. */
|
/* For now, use numa node -1 for global allocation. */
|
||||||
#define pcibus_to_node(bus) ((void)(bus), -1)
|
#define pcibus_to_node(bus) ((void)(bus), -1)
|
||||||
|
|
||||||
/* By definition, we create nodes based on online memory. */
|
|
||||||
#define node_has_online_mem(nid) 1
|
|
||||||
|
|
||||||
#endif /* CONFIG_NUMA */
|
#endif /* CONFIG_NUMA */
|
||||||
|
|
||||||
#include <asm-generic/topology.h>
|
#include <asm-generic/topology.h>
|
||||||
|
|
|
@ -32,5 +32,6 @@ obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
|
||||||
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
|
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
|
||||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||||
obj-$(CONFIG_KGDB) += kgdb.o
|
obj-$(CONFIG_KGDB) += kgdb.o
|
||||||
|
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||||
|
|
||||||
obj-y += vdso/
|
obj-y += vdso/
|
||||||
|
|
|
@ -20,21 +20,12 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/ftrace.h>
|
#include <asm/ftrace.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
#include <asm/insn.h>
|
||||||
|
|
||||||
#include <arch/opcode.h>
|
#include <arch/opcode.h>
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
static inline tilegx_bundle_bits NOP(void)
|
|
||||||
{
|
|
||||||
return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
|
||||||
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
|
||||||
create_Opcode_X0(RRR_0_OPCODE_X0) |
|
|
||||||
create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
|
|
||||||
create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
|
|
||||||
create_Opcode_X1(RRR_0_OPCODE_X1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int machine_stopped __read_mostly;
|
static int machine_stopped __read_mostly;
|
||||||
|
|
||||||
int ftrace_arch_code_modify_prepare(void)
|
int ftrace_arch_code_modify_prepare(void)
|
||||||
|
@ -117,7 +108,7 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Operate on writable kernel text mapping. */
|
/* Operate on writable kernel text mapping. */
|
||||||
pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
|
pc_wr = ktext_writable_addr(pc);
|
||||||
|
|
||||||
if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
|
if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
|
@ -572,7 +572,7 @@ intvec_\vecname:
|
||||||
}
|
}
|
||||||
wh64 r52
|
wh64 r52
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
|
||||||
.ifnc \function,handle_nmi
|
.ifnc \function,handle_nmi
|
||||||
/*
|
/*
|
||||||
* We finally have enough state set up to notify the irq
|
* We finally have enough state set up to notify the irq
|
||||||
|
@ -588,6 +588,9 @@ intvec_\vecname:
|
||||||
{ move r32, r2; move r33, r3 }
|
{ move r32, r2; move r33, r3 }
|
||||||
.endif
|
.endif
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
|
jal context_tracking_user_exit
|
||||||
|
#endif
|
||||||
.ifnc \function,handle_syscall
|
.ifnc \function,handle_syscall
|
||||||
{ move r0, r30; move r1, r31 }
|
{ move r0, r30; move r1, r31 }
|
||||||
{ move r2, r32; move r3, r33 }
|
{ move r2, r32; move r3, r33 }
|
||||||
|
@ -845,18 +848,6 @@ STD_ENTRY(interrupt_return)
|
||||||
.Lresume_userspace:
|
.Lresume_userspace:
|
||||||
FEEDBACK_REENTER(interrupt_return)
|
FEEDBACK_REENTER(interrupt_return)
|
||||||
|
|
||||||
/*
|
|
||||||
* Use r33 to hold whether we have already loaded the callee-saves
|
|
||||||
* into ptregs. We don't want to do it twice in this loop, since
|
|
||||||
* then we'd clobber whatever changes are made by ptrace, etc.
|
|
||||||
* Get base of stack in r32.
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
GET_THREAD_INFO(r32)
|
|
||||||
movei r33, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
.Lretry_work_pending:
|
|
||||||
/*
|
/*
|
||||||
* Disable interrupts so as to make sure we don't
|
* Disable interrupts so as to make sure we don't
|
||||||
* miss an interrupt that sets any of the thread flags (like
|
* miss an interrupt that sets any of the thread flags (like
|
||||||
|
@ -867,33 +858,27 @@ STD_ENTRY(interrupt_return)
|
||||||
IRQ_DISABLE(r20, r21)
|
IRQ_DISABLE(r20, r21)
|
||||||
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
||||||
|
|
||||||
|
|
||||||
/* Check to see if there is any work to do before returning to user. */
|
|
||||||
{
|
|
||||||
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
|
|
||||||
moveli r1, lo16(_TIF_ALLWORK_MASK)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
lw r29, r29
|
|
||||||
auli r1, r1, ha16(_TIF_ALLWORK_MASK)
|
|
||||||
}
|
|
||||||
and r1, r29, r1
|
|
||||||
bzt r1, .Lrestore_all
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure we have all the registers saved for signal
|
* See if there are any work items (including single-shot items)
|
||||||
* handling, notify-resume, or single-step. Call out to C
|
* to do. If so, save the callee-save registers to pt_regs
|
||||||
* code to figure out exactly what we need to do for each flag bit,
|
* and then dispatch to C code.
|
||||||
* then if necessary, reload the flags and recheck.
|
|
||||||
*/
|
*/
|
||||||
|
GET_THREAD_INFO(r21)
|
||||||
|
{
|
||||||
|
addi r22, r21, THREAD_INFO_FLAGS_OFFSET
|
||||||
|
moveli r20, lo16(_TIF_ALLWORK_MASK)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
lw r22, r22
|
||||||
|
auli r20, r20, ha16(_TIF_ALLWORK_MASK)
|
||||||
|
}
|
||||||
|
and r1, r22, r20
|
||||||
{
|
{
|
||||||
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
||||||
bnz r33, 1f
|
bzt r1, .Lrestore_all
|
||||||
}
|
}
|
||||||
push_extra_callee_saves r0
|
push_extra_callee_saves r0
|
||||||
movei r33, 1
|
jal prepare_exit_to_usermode
|
||||||
1: jal do_work_pending
|
|
||||||
bnz r0, .Lretry_work_pending
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the NMI case we
|
* In the NMI case we
|
||||||
|
@ -1327,7 +1312,7 @@ STD_ENTRY(ret_from_kernel_thread)
|
||||||
FEEDBACK_REENTER(ret_from_kernel_thread)
|
FEEDBACK_REENTER(ret_from_kernel_thread)
|
||||||
{
|
{
|
||||||
movei r30, 0 /* not an NMI */
|
movei r30, 0 /* not an NMI */
|
||||||
j .Lresume_userspace /* jump into middle of interrupt_return */
|
j interrupt_return
|
||||||
}
|
}
|
||||||
STD_ENDPROC(ret_from_kernel_thread)
|
STD_ENDPROC(ret_from_kernel_thread)
|
||||||
|
|
||||||
|
|
|
@ -658,7 +658,7 @@ intvec_\vecname:
|
||||||
*/
|
*/
|
||||||
mfspr r32, SPR_EX_CONTEXT_K_1
|
mfspr r32, SPR_EX_CONTEXT_K_1
|
||||||
{
|
{
|
||||||
IS_KERNEL_EX1(r22, r22)
|
IS_KERNEL_EX1(r32, r32)
|
||||||
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
|
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
|
||||||
}
|
}
|
||||||
beqzt r32, 1f /* zero if from user space */
|
beqzt r32, 1f /* zero if from user space */
|
||||||
|
@ -753,7 +753,7 @@ intvec_\vecname:
|
||||||
}
|
}
|
||||||
wh64 r52
|
wh64 r52
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
|
||||||
.ifnc \function,handle_nmi
|
.ifnc \function,handle_nmi
|
||||||
/*
|
/*
|
||||||
* We finally have enough state set up to notify the irq
|
* We finally have enough state set up to notify the irq
|
||||||
|
@ -769,6 +769,9 @@ intvec_\vecname:
|
||||||
{ move r32, r2; move r33, r3 }
|
{ move r32, r2; move r33, r3 }
|
||||||
.endif
|
.endif
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
|
jal context_tracking_user_exit
|
||||||
|
#endif
|
||||||
.ifnc \function,handle_syscall
|
.ifnc \function,handle_syscall
|
||||||
{ move r0, r30; move r1, r31 }
|
{ move r0, r30; move r1, r31 }
|
||||||
{ move r2, r32; move r3, r33 }
|
{ move r2, r32; move r3, r33 }
|
||||||
|
@ -878,20 +881,6 @@ STD_ENTRY(interrupt_return)
|
||||||
.Lresume_userspace:
|
.Lresume_userspace:
|
||||||
FEEDBACK_REENTER(interrupt_return)
|
FEEDBACK_REENTER(interrupt_return)
|
||||||
|
|
||||||
/*
|
|
||||||
* Use r33 to hold whether we have already loaded the callee-saves
|
|
||||||
* into ptregs. We don't want to do it twice in this loop, since
|
|
||||||
* then we'd clobber whatever changes are made by ptrace, etc.
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
movei r33, 0
|
|
||||||
move r32, sp
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get base of stack in r32. */
|
|
||||||
EXTRACT_THREAD_INFO(r32)
|
|
||||||
|
|
||||||
.Lretry_work_pending:
|
|
||||||
/*
|
/*
|
||||||
* Disable interrupts so as to make sure we don't
|
* Disable interrupts so as to make sure we don't
|
||||||
* miss an interrupt that sets any of the thread flags (like
|
* miss an interrupt that sets any of the thread flags (like
|
||||||
|
@ -902,33 +891,28 @@ STD_ENTRY(interrupt_return)
|
||||||
IRQ_DISABLE(r20, r21)
|
IRQ_DISABLE(r20, r21)
|
||||||
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
|
||||||
|
|
||||||
|
|
||||||
/* Check to see if there is any work to do before returning to user. */
|
|
||||||
{
|
|
||||||
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
|
|
||||||
moveli r1, hw1_last(_TIF_ALLWORK_MASK)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
ld r29, r29
|
|
||||||
shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
|
|
||||||
}
|
|
||||||
and r1, r29, r1
|
|
||||||
beqzt r1, .Lrestore_all
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure we have all the registers saved for signal
|
* See if there are any work items (including single-shot items)
|
||||||
* handling or notify-resume. Call out to C code to figure out
|
* to do. If so, save the callee-save registers to pt_regs
|
||||||
* exactly what we need to do for each flag bit, then if
|
* and then dispatch to C code.
|
||||||
* necessary, reload the flags and recheck.
|
|
||||||
*/
|
*/
|
||||||
|
move r21, sp
|
||||||
|
EXTRACT_THREAD_INFO(r21)
|
||||||
|
{
|
||||||
|
addi r22, r21, THREAD_INFO_FLAGS_OFFSET
|
||||||
|
moveli r20, hw1_last(_TIF_ALLWORK_MASK)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
ld r22, r22
|
||||||
|
shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
|
||||||
|
}
|
||||||
|
and r1, r22, r20
|
||||||
{
|
{
|
||||||
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
|
||||||
bnez r33, 1f
|
beqzt r1, .Lrestore_all
|
||||||
}
|
}
|
||||||
push_extra_callee_saves r0
|
push_extra_callee_saves r0
|
||||||
movei r33, 1
|
jal prepare_exit_to_usermode
|
||||||
1: jal do_work_pending
|
|
||||||
bnez r0, .Lretry_work_pending
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the NMI case we
|
* In the NMI case we
|
||||||
|
@ -1411,7 +1395,7 @@ STD_ENTRY(ret_from_kernel_thread)
|
||||||
FEEDBACK_REENTER(ret_from_kernel_thread)
|
FEEDBACK_REENTER(ret_from_kernel_thread)
|
||||||
{
|
{
|
||||||
movei r30, 0 /* not an NMI */
|
movei r30, 0 /* not an NMI */
|
||||||
j .Lresume_userspace /* jump into middle of interrupt_return */
|
j interrupt_return
|
||||||
}
|
}
|
||||||
STD_ENDPROC(ret_from_kernel_thread)
|
STD_ENDPROC(ret_from_kernel_thread)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2015 Tilera Corporation. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation, version 2.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* jump label TILE-Gx support
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/jump_label.h>
|
||||||
|
#include <linux/memory.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/insn.h>
|
||||||
|
|
||||||
|
#ifdef HAVE_JUMP_LABEL
|
||||||
|
|
||||||
|
static void __jump_label_transform(struct jump_entry *e,
|
||||||
|
enum jump_label_type type)
|
||||||
|
{
|
||||||
|
tilegx_bundle_bits opcode;
|
||||||
|
/* Operate on writable kernel text mapping. */
|
||||||
|
unsigned long pc_wr = ktext_writable_addr(e->code);
|
||||||
|
|
||||||
|
if (type == JUMP_LABEL_JMP)
|
||||||
|
opcode = tilegx_gen_branch(e->code, e->target, false);
|
||||||
|
else
|
||||||
|
opcode = NOP();
|
||||||
|
|
||||||
|
*(tilegx_bundle_bits *)pc_wr = opcode;
|
||||||
|
/* Make sure that above mem writes were issued towards the memory. */
|
||||||
|
smp_wmb();
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_jump_label_transform(struct jump_entry *e,
|
||||||
|
enum jump_label_type type)
|
||||||
|
{
|
||||||
|
get_online_cpus();
|
||||||
|
mutex_lock(&text_mutex);
|
||||||
|
|
||||||
|
__jump_label_transform(e, type);
|
||||||
|
flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
|
||||||
|
|
||||||
|
mutex_unlock(&text_mutex);
|
||||||
|
put_online_cpus();
|
||||||
|
}
|
||||||
|
|
||||||
|
__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
|
||||||
|
enum jump_label_type type)
|
||||||
|
{
|
||||||
|
__jump_label_transform(e, type);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* HAVE_JUMP_LABEL */
|
|
@ -164,7 +164,7 @@ static unsigned long writable_address(unsigned long addr)
|
||||||
unsigned long ret = 0;
|
unsigned long ret = 0;
|
||||||
|
|
||||||
if (core_kernel_text(addr))
|
if (core_kernel_text(addr))
|
||||||
ret = addr - MEM_SV_START + PAGE_OFFSET;
|
ret = ktext_writable_addr(addr);
|
||||||
else if (is_module_text_address(addr))
|
else if (is_module_text_address(addr))
|
||||||
ret = addr;
|
ret = addr;
|
||||||
else
|
else
|
||||||
|
|
|
@ -116,7 +116,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||||
unsigned long addr_wr;
|
unsigned long addr_wr;
|
||||||
|
|
||||||
/* Operate on writable kernel text mapping. */
|
/* Operate on writable kernel text mapping. */
|
||||||
addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
|
addr_wr = ktext_writable_addr(p->addr);
|
||||||
|
|
||||||
if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
|
if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
|
||||||
sizeof(breakpoint_insn)))
|
sizeof(breakpoint_insn)))
|
||||||
|
@ -131,7 +131,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *kp)
|
||||||
unsigned long addr_wr;
|
unsigned long addr_wr;
|
||||||
|
|
||||||
/* Operate on writable kernel text mapping. */
|
/* Operate on writable kernel text mapping. */
|
||||||
addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
|
addr_wr = ktext_writable_addr(kp->addr);
|
||||||
|
|
||||||
if (probe_kernel_write((void *)addr_wr, &kp->opcode,
|
if (probe_kernel_write((void *)addr_wr, &kp->opcode,
|
||||||
sizeof(kp->opcode)))
|
sizeof(kp->opcode)))
|
||||||
|
|
|
@ -462,54 +462,57 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine is called on return from interrupt if any of the
|
* This routine is called on return from interrupt if any of the
|
||||||
* TIF_WORK_MASK flags are set in thread_info->flags. It is
|
* TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
|
||||||
* entered with interrupts disabled so we don't miss an event
|
* entered with interrupts disabled so we don't miss an event that
|
||||||
* that modified the thread_info flags. If any flag is set, we
|
* modified the thread_info flags. We loop until all the tested flags
|
||||||
* handle it and return, and the calling assembly code will
|
* are clear. Note that the function is called on certain conditions
|
||||||
* re-disable interrupts, reload the thread flags, and call back
|
* that are not listed in the loop condition here (e.g. SINGLESTEP)
|
||||||
* if more flags need to be handled.
|
* which guarantees we will do those things once, and redo them if any
|
||||||
*
|
* of the other work items is re-done, but won't continue looping if
|
||||||
* We return whether we need to check the thread_info flags again
|
* all the other work is done.
|
||||||
* or not. Note that we don't clear TIF_SINGLESTEP here, so it's
|
|
||||||
* important that it be tested last, and then claim that we don't
|
|
||||||
* need to recheck the flags.
|
|
||||||
*/
|
*/
|
||||||
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
|
||||||
{
|
{
|
||||||
/* If we enter in kernel mode, do nothing and exit the caller loop. */
|
if (WARN_ON(!user_mode(regs)))
|
||||||
if (!user_mode(regs))
|
return;
|
||||||
return 0;
|
|
||||||
|
|
||||||
user_exit();
|
do {
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
/* Enable interrupts; they are disabled again on return to caller. */
|
if (thread_info_flags & _TIF_NEED_RESCHED)
|
||||||
local_irq_enable();
|
schedule();
|
||||||
|
|
||||||
if (thread_info_flags & _TIF_NEED_RESCHED) {
|
|
||||||
schedule();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
#if CHIP_HAS_TILE_DMA()
|
||||||
if (thread_info_flags & _TIF_ASYNC_TLB) {
|
if (thread_info_flags & _TIF_ASYNC_TLB)
|
||||||
do_async_page_fault(regs);
|
do_async_page_fault(regs);
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
if (thread_info_flags & _TIF_SIGPENDING) {
|
|
||||||
do_signal(regs);
|
if (thread_info_flags & _TIF_SIGPENDING)
|
||||||
return 1;
|
do_signal(regs);
|
||||||
}
|
|
||||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||||
tracehook_notify_resume(regs);
|
tracehook_notify_resume(regs);
|
||||||
return 1;
|
}
|
||||||
}
|
|
||||||
if (thread_info_flags & _TIF_SINGLESTEP)
|
local_irq_disable();
|
||||||
|
thread_info_flags = READ_ONCE(current_thread_info()->flags);
|
||||||
|
|
||||||
|
} while (thread_info_flags & _TIF_WORK_MASK);
|
||||||
|
|
||||||
|
if (thread_info_flags & _TIF_SINGLESTEP) {
|
||||||
single_step_once(regs);
|
single_step_once(regs);
|
||||||
|
#ifndef __tilegx__
|
||||||
|
/*
|
||||||
|
* FIXME: on tilepro, since we enable interrupts in
|
||||||
|
* this routine, it's possible that we miss a signal
|
||||||
|
* or other asynchronous event.
|
||||||
|
*/
|
||||||
|
local_irq_disable();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
user_enter();
|
user_enter();
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long get_wchan(struct task_struct *p)
|
unsigned long get_wchan(struct task_struct *p)
|
||||||
|
|
|
@ -255,13 +255,6 @@ int do_syscall_trace_enter(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u32 work = ACCESS_ONCE(current_thread_info()->flags);
|
u32 work = ACCESS_ONCE(current_thread_info()->flags);
|
||||||
|
|
||||||
/*
|
|
||||||
* If TIF_NOHZ is set, we are required to call user_exit() before
|
|
||||||
* doing anything that could touch RCU.
|
|
||||||
*/
|
|
||||||
if (work & _TIF_NOHZ)
|
|
||||||
user_exit();
|
|
||||||
|
|
||||||
if (secure_computing() == -1)
|
if (secure_computing() == -1)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -280,12 +273,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
long errno;
|
long errno;
|
||||||
|
|
||||||
/*
|
|
||||||
* We may come here right after calling schedule_user()
|
|
||||||
* in which case we can be in RCU user mode.
|
|
||||||
*/
|
|
||||||
user_exit();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The standard tile calling convention returns the value (or negative
|
* The standard tile calling convention returns the value (or negative
|
||||||
* errno) in r0, and zero (or positive errno) in r1.
|
* errno) in r0, and zero (or positive errno) in r1.
|
||||||
|
@ -322,7 +309,5 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
|
||||||
/* Handle synthetic interrupt delivered only by the simulator. */
|
/* Handle synthetic interrupt delivered only by the simulator. */
|
||||||
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
|
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
|
||||||
{
|
{
|
||||||
enum ctx_state prev_state = exception_enter();
|
|
||||||
send_sigtrap(current, regs);
|
send_sigtrap(current, regs);
|
||||||
exception_exit(prev_state);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -882,7 +882,7 @@ static int __init node_neighbors(int node, int cpu,
|
||||||
|
|
||||||
static void __init setup_numa_mapping(void)
|
static void __init setup_numa_mapping(void)
|
||||||
{
|
{
|
||||||
int distance[MAX_NUMNODES][NR_CPUS];
|
u8 distance[MAX_NUMNODES][NR_CPUS];
|
||||||
HV_Coord coord;
|
HV_Coord coord;
|
||||||
int cpu, node, cpus, i, x, y;
|
int cpu, node, cpus, i, x, y;
|
||||||
int num_nodes = num_online_nodes();
|
int num_nodes = num_online_nodes();
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/prctl.h>
|
#include <linux/prctl.h>
|
||||||
#include <linux/context_tracking.h>
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -739,7 +738,6 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
|
||||||
|
|
||||||
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
|
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
|
||||||
{
|
{
|
||||||
enum ctx_state prev_state = exception_enter();
|
|
||||||
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
|
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
|
||||||
struct thread_info *info = (void *)current_thread_info();
|
struct thread_info *info = (void *)current_thread_info();
|
||||||
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
|
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
|
||||||
|
@ -756,7 +754,6 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
|
||||||
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
|
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
|
||||||
send_sigtrap(current, regs);
|
send_sigtrap(current, regs);
|
||||||
}
|
}
|
||||||
exception_exit(prev_state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -78,8 +78,7 @@ static bool read_memory_func(void *result, unsigned long address,
|
||||||
/* Return a pt_regs pointer for a valid fault handler frame */
|
/* Return a pt_regs pointer for a valid fault handler frame */
|
||||||
static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
||||||
{
|
{
|
||||||
const char *fault = NULL; /* happy compiler */
|
char fault[64];
|
||||||
char fault_buf[64];
|
|
||||||
unsigned long sp = kbt->it.sp;
|
unsigned long sp = kbt->it.sp;
|
||||||
struct pt_regs *p;
|
struct pt_regs *p;
|
||||||
|
|
||||||
|
@ -90,14 +89,14 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
||||||
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
|
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
|
||||||
return NULL;
|
return NULL;
|
||||||
p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
|
p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
|
||||||
if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
|
if (kbt->verbose) { /* else we aren't going to use it */
|
||||||
fault = "syscall";
|
if (p->faultnum == INT_SWINT_1 ||
|
||||||
else {
|
p->faultnum == INT_SWINT_1_SIGRETURN)
|
||||||
if (kbt->verbose) { /* else we aren't going to use it */
|
snprintf(fault, sizeof(fault),
|
||||||
snprintf(fault_buf, sizeof(fault_buf),
|
"syscall %ld", p->regs[TREG_SYSCALL_NR]);
|
||||||
|
else
|
||||||
|
snprintf(fault, sizeof(fault),
|
||||||
"interrupt %ld", p->faultnum);
|
"interrupt %ld", p->faultnum);
|
||||||
fault = fault_buf;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (EX1_PL(p->ex1) == KERNEL_PL &&
|
if (EX1_PL(p->ex1) == KERNEL_PL &&
|
||||||
__kernel_text_address(p->pc) &&
|
__kernel_text_address(p->pc) &&
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <linux/context_tracking.h>
|
|
||||||
#include <asm/stack.h>
|
#include <asm/stack.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
@ -254,7 +253,6 @@ static int do_bpt(struct pt_regs *regs)
|
||||||
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
unsigned long reason)
|
unsigned long reason)
|
||||||
{
|
{
|
||||||
enum ctx_state prev_state = exception_enter();
|
|
||||||
siginfo_t info = { 0 };
|
siginfo_t info = { 0 };
|
||||||
int signo, code;
|
int signo, code;
|
||||||
unsigned long address = 0;
|
unsigned long address = 0;
|
||||||
|
@ -263,7 +261,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
|
|
||||||
/* Handle breakpoints, etc. */
|
/* Handle breakpoints, etc. */
|
||||||
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
|
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
/* Re-enable interrupts, if they were previously enabled. */
|
/* Re-enable interrupts, if they were previously enabled. */
|
||||||
if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
|
if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
|
||||||
|
@ -277,7 +275,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
const char *name;
|
const char *name;
|
||||||
char buf[100];
|
char buf[100];
|
||||||
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
|
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
|
||||||
goto done;
|
return;
|
||||||
if (fault_num >= 0 &&
|
if (fault_num >= 0 &&
|
||||||
fault_num < ARRAY_SIZE(int_name) &&
|
fault_num < ARRAY_SIZE(int_name) &&
|
||||||
int_name[fault_num] != NULL)
|
int_name[fault_num] != NULL)
|
||||||
|
@ -319,7 +317,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
case INT_GPV:
|
case INT_GPV:
|
||||||
#if CHIP_HAS_TILE_DMA()
|
#if CHIP_HAS_TILE_DMA()
|
||||||
if (retry_gpv(reason))
|
if (retry_gpv(reason))
|
||||||
goto done;
|
return;
|
||||||
#endif
|
#endif
|
||||||
/*FALLTHROUGH*/
|
/*FALLTHROUGH*/
|
||||||
case INT_UDN_ACCESS:
|
case INT_UDN_ACCESS:
|
||||||
|
@ -346,7 +344,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
if (!state ||
|
if (!state ||
|
||||||
(void __user *)(regs->pc) != state->buffer) {
|
(void __user *)(regs->pc) != state->buffer) {
|
||||||
single_step_once(regs);
|
single_step_once(regs);
|
||||||
goto done;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -390,9 +388,6 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||||
if (signo != SIGTRAP)
|
if (signo != SIGTRAP)
|
||||||
trace_unhandled_signal("trap", regs, address, signo);
|
trace_unhandled_signal("trap", regs, address, signo);
|
||||||
force_sig_info(signo, &info, current);
|
force_sig_info(signo, &info, current);
|
||||||
|
|
||||||
done:
|
|
||||||
exception_exit(prev_state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
|
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/prctl.h>
|
#include <linux/prctl.h>
|
||||||
#include <linux/context_tracking.h>
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -1449,7 +1448,6 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
||||||
|
|
||||||
void do_unaligned(struct pt_regs *regs, int vecnum)
|
void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
{
|
{
|
||||||
enum ctx_state prev_state = exception_enter();
|
|
||||||
tilegx_bundle_bits __user *pc;
|
tilegx_bundle_bits __user *pc;
|
||||||
tilegx_bundle_bits bundle;
|
tilegx_bundle_bits bundle;
|
||||||
struct thread_info *info = current_thread_info();
|
struct thread_info *info = current_thread_info();
|
||||||
|
@ -1503,7 +1501,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
*((tilegx_bundle_bits *)(regs->pc)));
|
*((tilegx_bundle_bits *)(regs->pc)));
|
||||||
jit_bundle_gen(regs, bundle, align_ctl);
|
jit_bundle_gen(regs, bundle, align_ctl);
|
||||||
}
|
}
|
||||||
goto done;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1527,7 +1525,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
|
|
||||||
trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
|
trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
|
||||||
force_sig_info(info.si_signo, &info, current);
|
force_sig_info(info.si_signo, &info, current);
|
||||||
goto done;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1544,7 +1542,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
trace_unhandled_signal("segfault in unalign fixup", regs,
|
trace_unhandled_signal("segfault in unalign fixup", regs,
|
||||||
(unsigned long)info.si_addr, SIGSEGV);
|
(unsigned long)info.si_addr, SIGSEGV);
|
||||||
force_sig_info(info.si_signo, &info, current);
|
force_sig_info(info.si_signo, &info, current);
|
||||||
goto done;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!info->unalign_jit_base) {
|
if (!info->unalign_jit_base) {
|
||||||
|
@ -1579,7 +1577,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
|
|
||||||
if (IS_ERR((void __force *)user_page)) {
|
if (IS_ERR((void __force *)user_page)) {
|
||||||
pr_err("Out of kernel pages trying do_mmap\n");
|
pr_err("Out of kernel pages trying do_mmap\n");
|
||||||
goto done;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save the address in the thread_info struct */
|
/* Save the address in the thread_info struct */
|
||||||
|
@ -1592,9 +1590,6 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
||||||
|
|
||||||
/* Generate unalign JIT */
|
/* Generate unalign JIT */
|
||||||
jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
|
jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
|
||||||
|
|
||||||
done:
|
|
||||||
exception_exit(prev_state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __tilegx__ */
|
#endif /* __tilegx__ */
|
||||||
|
|
|
@ -35,7 +35,6 @@
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
#include <linux/context_tracking.h>
|
|
||||||
|
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
@ -845,9 +844,7 @@ static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
|
||||||
void do_page_fault(struct pt_regs *regs, int fault_num,
|
void do_page_fault(struct pt_regs *regs, int fault_num,
|
||||||
unsigned long address, unsigned long write)
|
unsigned long address, unsigned long write)
|
||||||
{
|
{
|
||||||
enum ctx_state prev_state = exception_enter();
|
|
||||||
__do_page_fault(regs, fault_num, address, write);
|
__do_page_fault(regs, fault_num, address, write);
|
||||||
exception_exit(prev_state);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
#if CHIP_HAS_TILE_DMA()
|
||||||
|
|
|
@ -34,10 +34,6 @@
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <asm/topology.h>
|
#include <asm/topology.h>
|
||||||
|
|
||||||
#ifndef node_has_online_mem
|
|
||||||
#define node_has_online_mem(nid) (1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef nr_cpus_node
|
#ifndef nr_cpus_node
|
||||||
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue