2019-06-04 16:11:33 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-08-02 17:55:55 +08:00
|
|
|
* arch/arm/include/asm/assembler.h
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2000 Russell King
|
|
|
|
*
|
|
|
|
* This file contains arm architecture specific defines
|
|
|
|
* for the different processors.
|
|
|
|
*
|
|
|
|
* Do not include any C declarations in this file - it is included by
|
|
|
|
* assembler source.
|
|
|
|
*/
|
2011-06-13 13:46:44 +08:00
|
|
|
#ifndef __ASM_ASSEMBLER_H__
|
|
|
|
#define __ASM_ASSEMBLER_H__
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#error "Only include this from assembly code"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <asm/ptrace.h>
|
2012-02-10 00:47:17 +08:00
|
|
|
#include <asm/opcodes-virt.h>
|
2014-04-02 17:57:49 +08:00
|
|
|
#include <asm/asm-offsets.h>
|
2014-06-18 23:12:40 +08:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/thread_info.h>
|
2020-05-03 20:03:54 +08:00
|
|
|
#include <asm/uaccess-asm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-03-11 00:30:31 +08:00
|
|
|
#define IOMEM(x) (x)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Endian independent macros for shifting bytes within registers.
|
|
|
|
*/
|
|
|
|
#ifndef __ARMEB__
|
2014-02-25 15:41:09 +08:00
|
|
|
#define lspull lsr
|
|
|
|
#define lspush lsl
|
2005-04-17 06:20:36 +08:00
|
|
|
#define get_byte_0 lsl #0
|
|
|
|
#define get_byte_1 lsr #8
|
|
|
|
#define get_byte_2 lsr #16
|
|
|
|
#define get_byte_3 lsr #24
|
|
|
|
#define put_byte_0 lsl #0
|
|
|
|
#define put_byte_1 lsl #8
|
|
|
|
#define put_byte_2 lsl #16
|
|
|
|
#define put_byte_3 lsl #24
|
|
|
|
#else
|
2014-02-25 15:41:09 +08:00
|
|
|
#define lspull lsl
|
|
|
|
#define lspush lsr
|
2005-04-17 06:20:36 +08:00
|
|
|
#define get_byte_0 lsr #24
|
|
|
|
#define get_byte_1 lsr #16
|
|
|
|
#define get_byte_2 lsr #8
|
|
|
|
#define get_byte_3 lsl #0
|
|
|
|
#define put_byte_0 lsl #24
|
|
|
|
#define put_byte_1 lsl #16
|
|
|
|
#define put_byte_2 lsl #8
|
|
|
|
#define put_byte_3 lsl #0
|
|
|
|
#endif
|
|
|
|
|
2013-02-13 02:59:57 +08:00
|
|
|
/* Select code for any configuration running in BE8 mode */
|
|
|
|
#ifdef CONFIG_CPU_ENDIAN_BE8
|
|
|
|
#define ARM_BE8(code...) code
|
|
|
|
#else
|
|
|
|
#define ARM_BE8(code...)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Data preload for architectures that support it
|
|
|
|
*/
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 5
|
|
|
|
#define PLD(code...) code
|
|
|
|
#else
|
|
|
|
#define PLD(code...)
|
|
|
|
#endif
|
|
|
|
|
2008-04-01 00:38:31 +08:00
|
|
|
/*
|
|
|
|
* This can be used to enable code to cacheline align the destination
|
|
|
|
* pointer when bulk writing to memory. Experiments on StrongARM and
|
|
|
|
* XScale didn't show this a worthwhile thing to do when the cache is not
|
|
|
|
* set to write-allocate (this would need further testing on XScale when WA
|
|
|
|
* is used).
|
|
|
|
*
|
|
|
|
* On Feroceon there is much to gain however, regardless of cache mode.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_CPU_FEROCEON
|
|
|
|
#define CALGN(code...) code
|
|
|
|
#else
|
|
|
|
#define CALGN(code...)
|
|
|
|
#endif
|
|
|
|
|
2017-07-01 00:03:59 +08:00
|
|
|
#define IMM12_MASK 0xfff
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-03-24 00:59:37 +08:00
|
|
|
* Enable and disable interrupts
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-11-09 23:04:22 +08:00
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro disable_irq_notrace
|
2005-11-09 23:04:22 +08:00
|
|
|
cpsid i
|
2006-03-24 00:59:37 +08:00
|
|
|
.endm
|
|
|
|
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro enable_irq_notrace
|
2006-03-24 00:59:37 +08:00
|
|
|
cpsie i
|
|
|
|
.endm
|
2005-11-09 23:04:22 +08:00
|
|
|
#else
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro disable_irq_notrace
|
2006-03-24 00:59:37 +08:00
|
|
|
msr cpsr_c, #PSR_I_BIT | SVC_MODE
|
|
|
|
.endm
|
|
|
|
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro enable_irq_notrace
|
2006-03-24 00:59:37 +08:00
|
|
|
msr cpsr_c, #SVC_MODE
|
|
|
|
.endm
|
2005-11-09 23:04:22 +08:00
|
|
|
#endif
|
2006-03-24 00:59:37 +08:00
|
|
|
|
2015-08-20 23:13:37 +08:00
|
|
|
.macro asm_trace_hardirqs_off, save=1
|
2009-08-14 02:38:17 +08:00
|
|
|
#if defined(CONFIG_TRACE_IRQFLAGS)
|
2015-08-20 23:13:37 +08:00
|
|
|
.if \save
|
2009-08-14 02:38:17 +08:00
|
|
|
stmdb sp!, {r0-r3, ip, lr}
|
2015-08-20 23:13:37 +08:00
|
|
|
.endif
|
2009-08-14 02:38:17 +08:00
|
|
|
bl trace_hardirqs_off
|
2015-08-20 23:13:37 +08:00
|
|
|
.if \save
|
2009-08-14 02:38:17 +08:00
|
|
|
ldmia sp!, {r0-r3, ip, lr}
|
2015-08-20 23:13:37 +08:00
|
|
|
.endif
|
2009-08-14 02:38:17 +08:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2015-08-20 23:13:37 +08:00
|
|
|
.macro asm_trace_hardirqs_on, cond=al, save=1
|
2009-08-14 02:38:17 +08:00
|
|
|
#if defined(CONFIG_TRACE_IRQFLAGS)
|
|
|
|
/*
|
|
|
|
* actually the registers should be pushed and pop'd conditionally, but
|
|
|
|
* after bl the flags are certainly clobbered
|
|
|
|
*/
|
2015-08-20 23:13:37 +08:00
|
|
|
.if \save
|
2009-08-14 02:38:17 +08:00
|
|
|
stmdb sp!, {r0-r3, ip, lr}
|
2015-08-20 23:13:37 +08:00
|
|
|
.endif
|
2009-08-14 02:38:17 +08:00
|
|
|
bl\cond trace_hardirqs_on
|
2015-08-20 23:13:37 +08:00
|
|
|
.if \save
|
2009-08-14 02:38:17 +08:00
|
|
|
ldmia sp!, {r0-r3, ip, lr}
|
2015-08-20 23:13:37 +08:00
|
|
|
.endif
|
2009-08-14 02:38:17 +08:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2015-08-20 23:13:37 +08:00
|
|
|
.macro disable_irq, save=1
|
2009-08-14 02:38:17 +08:00
|
|
|
disable_irq_notrace
|
2015-08-20 23:13:37 +08:00
|
|
|
asm_trace_hardirqs_off \save
|
2009-08-14 02:38:17 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro enable_irq
|
|
|
|
asm_trace_hardirqs_on
|
|
|
|
enable_irq_notrace
|
|
|
|
.endm
|
2006-03-24 00:59:37 +08:00
|
|
|
/*
|
|
|
|
* Save the current IRQ state and disable IRQs. Note that this macro
|
|
|
|
* assumes FIQs are enabled, and that the processor is in SVC mode.
|
|
|
|
*/
|
|
|
|
.macro save_and_disable_irqs, oldcpsr
|
2010-05-22 01:06:41 +08:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
mrs \oldcpsr, primask
|
|
|
|
#else
|
2006-03-24 00:59:37 +08:00
|
|
|
mrs \oldcpsr, cpsr
|
2010-05-22 01:06:41 +08:00
|
|
|
#endif
|
2006-03-24 00:59:37 +08:00
|
|
|
disable_irq
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2012-02-15 23:01:42 +08:00
|
|
|
.macro save_and_disable_irqs_notrace, oldcpsr
|
2016-08-31 00:28:43 +08:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
mrs \oldcpsr, primask
|
|
|
|
#else
|
2012-02-15 23:01:42 +08:00
|
|
|
mrs \oldcpsr, cpsr
|
2016-08-31 00:28:43 +08:00
|
|
|
#endif
|
2012-02-15 23:01:42 +08:00
|
|
|
disable_irq_notrace
|
|
|
|
.endm
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Restore interrupt state previously stored in a register. We don't
|
|
|
|
* guarantee that this will preserve the flags.
|
|
|
|
*/
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro restore_irqs_notrace, oldcpsr
|
2010-05-22 01:06:41 +08:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
msr primask, \oldcpsr
|
|
|
|
#else
|
2005-04-17 06:20:36 +08:00
|
|
|
msr cpsr_c, \oldcpsr
|
2010-05-22 01:06:41 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2009-08-14 02:38:17 +08:00
|
|
|
.macro restore_irqs, oldcpsr
|
|
|
|
tst \oldcpsr, #PSR_I_BIT
|
2015-08-20 21:22:48 +08:00
|
|
|
asm_trace_hardirqs_on cond=eq
|
2009-08-14 02:38:17 +08:00
|
|
|
restore_irqs_notrace \oldcpsr
|
|
|
|
.endm
|
|
|
|
|
2015-04-21 21:17:25 +08:00
|
|
|
/*
|
|
|
|
* Assembly version of "adr rd, BSYM(sym)". This should only be used to
|
|
|
|
* reference local symbols in the same assembly file which are to be
|
|
|
|
* resolved by the assembler. Other usage is undefined.
|
|
|
|
*/
|
|
|
|
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
|
|
|
.macro badr\c, rd, sym
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
adr\c \rd, \sym + 1
|
|
|
|
#else
|
|
|
|
adr\c \rd, \sym
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
.endr
|
|
|
|
|
2014-04-02 17:57:48 +08:00
|
|
|
/*
|
|
|
|
* Get current thread_info.
|
|
|
|
*/
|
|
|
|
.macro get_thread_info, rd
|
2014-06-18 23:12:40 +08:00
|
|
|
ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
2014-04-02 17:57:48 +08:00
|
|
|
THUMB( mov \rd, sp )
|
2014-06-18 23:12:40 +08:00
|
|
|
THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
|
|
|
mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
|
2014-04-02 17:57:48 +08:00
|
|
|
.endm
|
|
|
|
|
2014-04-02 17:57:49 +08:00
|
|
|
/*
|
|
|
|
* Increment/decrement the preempt count.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
|
.macro inc_preempt_count, ti, tmp
|
|
|
|
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
|
|
|
|
add \tmp, \tmp, #1 @ increment it
|
|
|
|
str \tmp, [\ti, #TI_PREEMPT]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dec_preempt_count, ti, tmp
|
|
|
|
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
|
|
|
|
sub \tmp, \tmp, #1 @ decrement it
|
|
|
|
str \tmp, [\ti, #TI_PREEMPT]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dec_preempt_count_ti, ti, tmp
|
|
|
|
get_thread_info \ti
|
|
|
|
dec_preempt_count \ti, \tmp
|
|
|
|
.endm
|
|
|
|
#else
|
|
|
|
.macro inc_preempt_count, ti, tmp
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dec_preempt_count, ti, tmp
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dec_preempt_count_ti, ti, tmp
|
|
|
|
.endm
|
|
|
|
#endif
|
|
|
|
|
ARM: 8812/1: Optimise copy_{from/to}_user for !CPU_USE_DOMAINS
ARMv6+ processors do not use CONFIG_CPU_USE_DOMAINS and use privileged
ldr/str instructions in copy_{from/to}_user. They are currently
unnecessarily using single ldr/str instructions and can use ldm/stm
instructions instead like memcpy does (but with appropriate fixup
tables).
This speeds up a "dd if=foo of=bar bs=32k" on a tmpfs filesystem by
about 4% on my Cortex-A9.
before:134217728 bytes (128.0MB) copied, 0.543848 seconds, 235.4MB/s
before:134217728 bytes (128.0MB) copied, 0.538610 seconds, 237.6MB/s
before:134217728 bytes (128.0MB) copied, 0.544356 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.544364 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.537130 seconds, 238.3MB/s
before:134217728 bytes (128.0MB) copied, 0.533443 seconds, 240.0MB/s
before:134217728 bytes (128.0MB) copied, 0.545691 seconds, 234.6MB/s
before:134217728 bytes (128.0MB) copied, 0.534695 seconds, 239.4MB/s
before:134217728 bytes (128.0MB) copied, 0.540561 seconds, 236.8MB/s
before:134217728 bytes (128.0MB) copied, 0.541025 seconds, 236.6MB/s
after:134217728 bytes (128.0MB) copied, 0.520445 seconds, 245.9MB/s
after:134217728 bytes (128.0MB) copied, 0.527846 seconds, 242.5MB/s
after:134217728 bytes (128.0MB) copied, 0.519510 seconds, 246.4MB/s
after:134217728 bytes (128.0MB) copied, 0.527231 seconds, 242.8MB/s
after:134217728 bytes (128.0MB) copied, 0.525030 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.524236 seconds, 244.2MB/s
after:134217728 bytes (128.0MB) copied, 0.523659 seconds, 244.4MB/s
after:134217728 bytes (128.0MB) copied, 0.525018 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.519249 seconds, 246.5MB/s
after:134217728 bytes (128.0MB) copied, 0.518527 seconds, 246.9MB/s
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2018-11-09 17:09:48 +08:00
|
|
|
#define USERL(l, x...) \
|
2005-04-17 06:20:36 +08:00
|
|
|
9999: x; \
|
2010-04-19 17:15:03 +08:00
|
|
|
.pushsection __ex_table,"a"; \
|
2005-04-17 06:20:36 +08:00
|
|
|
.align 3; \
|
ARM: 8812/1: Optimise copy_{from/to}_user for !CPU_USE_DOMAINS
ARMv6+ processors do not use CONFIG_CPU_USE_DOMAINS and use privileged
ldr/str instructions in copy_{from/to}_user. They are currently
unnecessarily using single ldr/str instructions and can use ldm/stm
instructions instead like memcpy does (but with appropriate fixup
tables).
This speeds up a "dd if=foo of=bar bs=32k" on a tmpfs filesystem by
about 4% on my Cortex-A9.
before:134217728 bytes (128.0MB) copied, 0.543848 seconds, 235.4MB/s
before:134217728 bytes (128.0MB) copied, 0.538610 seconds, 237.6MB/s
before:134217728 bytes (128.0MB) copied, 0.544356 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.544364 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.537130 seconds, 238.3MB/s
before:134217728 bytes (128.0MB) copied, 0.533443 seconds, 240.0MB/s
before:134217728 bytes (128.0MB) copied, 0.545691 seconds, 234.6MB/s
before:134217728 bytes (128.0MB) copied, 0.534695 seconds, 239.4MB/s
before:134217728 bytes (128.0MB) copied, 0.540561 seconds, 236.8MB/s
before:134217728 bytes (128.0MB) copied, 0.541025 seconds, 236.6MB/s
after:134217728 bytes (128.0MB) copied, 0.520445 seconds, 245.9MB/s
after:134217728 bytes (128.0MB) copied, 0.527846 seconds, 242.5MB/s
after:134217728 bytes (128.0MB) copied, 0.519510 seconds, 246.4MB/s
after:134217728 bytes (128.0MB) copied, 0.527231 seconds, 242.8MB/s
after:134217728 bytes (128.0MB) copied, 0.525030 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.524236 seconds, 244.2MB/s
after:134217728 bytes (128.0MB) copied, 0.523659 seconds, 244.4MB/s
after:134217728 bytes (128.0MB) copied, 0.525018 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.519249 seconds, 246.5MB/s
after:134217728 bytes (128.0MB) copied, 0.518527 seconds, 246.9MB/s
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2018-11-09 17:09:48 +08:00
|
|
|
.long 9999b,l; \
|
2010-04-19 17:15:03 +08:00
|
|
|
.popsection
|
2009-05-26 03:58:00 +08:00
|
|
|
|
ARM: 8812/1: Optimise copy_{from/to}_user for !CPU_USE_DOMAINS
ARMv6+ processors do not use CONFIG_CPU_USE_DOMAINS and use privileged
ldr/str instructions in copy_{from/to}_user. They are currently
unnecessarily using single ldr/str instructions and can use ldm/stm
instructions instead like memcpy does (but with appropriate fixup
tables).
This speeds up a "dd if=foo of=bar bs=32k" on a tmpfs filesystem by
about 4% on my Cortex-A9.
before:134217728 bytes (128.0MB) copied, 0.543848 seconds, 235.4MB/s
before:134217728 bytes (128.0MB) copied, 0.538610 seconds, 237.6MB/s
before:134217728 bytes (128.0MB) copied, 0.544356 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.544364 seconds, 235.1MB/s
before:134217728 bytes (128.0MB) copied, 0.537130 seconds, 238.3MB/s
before:134217728 bytes (128.0MB) copied, 0.533443 seconds, 240.0MB/s
before:134217728 bytes (128.0MB) copied, 0.545691 seconds, 234.6MB/s
before:134217728 bytes (128.0MB) copied, 0.534695 seconds, 239.4MB/s
before:134217728 bytes (128.0MB) copied, 0.540561 seconds, 236.8MB/s
before:134217728 bytes (128.0MB) copied, 0.541025 seconds, 236.6MB/s
after:134217728 bytes (128.0MB) copied, 0.520445 seconds, 245.9MB/s
after:134217728 bytes (128.0MB) copied, 0.527846 seconds, 242.5MB/s
after:134217728 bytes (128.0MB) copied, 0.519510 seconds, 246.4MB/s
after:134217728 bytes (128.0MB) copied, 0.527231 seconds, 242.8MB/s
after:134217728 bytes (128.0MB) copied, 0.525030 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.524236 seconds, 244.2MB/s
after:134217728 bytes (128.0MB) copied, 0.523659 seconds, 244.4MB/s
after:134217728 bytes (128.0MB) copied, 0.525018 seconds, 243.8MB/s
after:134217728 bytes (128.0MB) copied, 0.519249 seconds, 246.5MB/s
after:134217728 bytes (128.0MB) copied, 0.518527 seconds, 246.9MB/s
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2018-11-09 17:09:48 +08:00
|
|
|
#define USER(x...) USERL(9001f, x)
|
|
|
|
|
2010-09-04 17:47:48 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define ALT_SMP(instr...) \
|
|
|
|
9998: instr
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
/*
|
|
|
|
* Note: if you get assembler errors from ALT_UP() when building with
|
|
|
|
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
|
|
|
|
* ALT_SMP( W(instr) ... )
|
|
|
|
*/
|
2010-09-04 17:47:48 +08:00
|
|
|
#define ALT_UP(instr...) \
|
|
|
|
.pushsection ".alt.smp.init", "a" ;\
|
|
|
|
.long 9998b ;\
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
9997: instr ;\
|
2015-04-09 19:59:35 +08:00
|
|
|
.if . - 9997b == 2 ;\
|
|
|
|
nop ;\
|
|
|
|
.endif ;\
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.if . - 9997b != 4 ;\
|
|
|
|
.error "ALT_UP() content must assemble to exactly 4 bytes";\
|
|
|
|
.endif ;\
|
2010-09-04 17:47:48 +08:00
|
|
|
.popsection
|
|
|
|
#define ALT_UP_B(label) \
|
|
|
|
.pushsection ".alt.smp.init", "a" ;\
|
|
|
|
.long 9998b ;\
|
2020-04-29 08:20:11 +08:00
|
|
|
W(b) . + (label - 9998b) ;\
|
2010-09-04 17:47:48 +08:00
|
|
|
.popsection
|
|
|
|
#else
|
|
|
|
#define ALT_SMP(instr...)
|
|
|
|
#define ALT_UP(instr...) instr
|
|
|
|
#define ALT_UP_B(label) b label
|
|
|
|
#endif
|
|
|
|
|
2011-11-23 01:30:28 +08:00
|
|
|
/*
|
|
|
|
* Instruction barrier
|
|
|
|
*/
|
|
|
|
.macro instr_sync
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
|
|
|
isb
|
|
|
|
#elif __LINUX_ARM_ARCH__ == 6
|
|
|
|
mcr p15, 0, r0, c7, c5, 4
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2009-05-26 03:58:00 +08:00
|
|
|
/*
|
|
|
|
* SMP data memory barrier
|
|
|
|
*/
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.macro smp_dmb mode
|
2009-05-26 03:58:00 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.ifeqs "\mode","arm"
|
2013-05-11 01:07:19 +08:00
|
|
|
ALT_SMP(dmb ish)
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.else
|
2013-05-11 01:07:19 +08:00
|
|
|
ALT_SMP(W(dmb) ish)
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.endif
|
2009-05-26 03:58:00 +08:00
|
|
|
#elif __LINUX_ARM_ARCH__ == 6
|
2010-09-04 17:47:48 +08:00
|
|
|
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
|
|
|
|
#else
|
|
|
|
#error Incompatible SMP platform
|
2009-05-26 03:58:00 +08:00
|
|
|
#endif
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.ifeqs "\mode","arm"
|
2010-09-04 17:47:48 +08:00
|
|
|
ALT_UP(nop)
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
.else
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
.endif
|
2009-05-26 03:58:00 +08:00
|
|
|
#endif
|
|
|
|
.endm
|
2009-07-24 19:32:54 +08:00
|
|
|
|
2010-05-22 01:06:41 +08:00
|
|
|
#if defined(CONFIG_CPU_V7M)
|
|
|
|
/*
|
|
|
|
* setmode is used to assert to be in svc mode during boot. For v7-M
|
|
|
|
* this is done in __v7m_setup, so setmode can be empty here.
|
|
|
|
*/
|
|
|
|
.macro setmode, mode, reg
|
|
|
|
.endm
|
|
|
|
#elif defined(CONFIG_THUMB2_KERNEL)
|
2009-07-24 19:32:54 +08:00
|
|
|
.macro setmode, mode, reg
|
|
|
|
mov \reg, #\mode
|
|
|
|
msr cpsr_c, \reg
|
|
|
|
.endm
|
|
|
|
#else
|
|
|
|
.macro setmode, mode, reg
|
|
|
|
msr cpsr_c, #\mode
|
|
|
|
.endm
|
|
|
|
#endif
|
2009-07-24 19:32:57 +08:00
|
|
|
|
2012-02-10 00:47:17 +08:00
|
|
|
/*
|
|
|
|
* Helper macro to enter SVC mode cleanly and mask interrupts. reg is
|
|
|
|
* a scratch register for the macro to overwrite.
|
|
|
|
*
|
|
|
|
* This macro is intended for forcing the CPU into SVC mode at boot time.
|
|
|
|
* you cannot return to the original mode.
|
|
|
|
*/
|
|
|
|
.macro safe_svcmode_maskall reg:req
|
2014-05-09 00:31:40 +08:00
|
|
|
#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
|
2012-02-10 00:47:17 +08:00
|
|
|
mrs \reg , cpsr
|
2012-12-03 23:39:43 +08:00
|
|
|
eor \reg, \reg, #HYP_MODE
|
|
|
|
tst \reg, #MODE_MASK
|
2012-02-10 00:47:17 +08:00
|
|
|
bic \reg , \reg , #MODE_MASK
|
2012-12-03 23:39:43 +08:00
|
|
|
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
|
2012-02-10 00:47:17 +08:00
|
|
|
THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|
|
|
bne 1f
|
2012-10-07 00:03:17 +08:00
|
|
|
orr \reg, \reg, #PSR_A_BIT
|
2015-04-21 21:17:25 +08:00
|
|
|
badr lr, 2f
|
2012-10-07 00:03:17 +08:00
|
|
|
msr spsr_cxsf, \reg
|
2012-02-10 00:47:17 +08:00
|
|
|
__MSR_ELR_HYP(14)
|
|
|
|
__ERET
|
2012-10-07 00:03:17 +08:00
|
|
|
1: msr cpsr_c, \reg
|
2012-02-10 00:47:17 +08:00
|
|
|
2:
|
2012-12-11 01:35:22 +08:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* workaround for possibly broken pre-v6 hardware
|
|
|
|
* (akita, Sharp Zaurus C-1000, PXA270-based)
|
|
|
|
*/
|
|
|
|
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
|
|
|
|
#endif
|
2012-02-10 00:47:17 +08:00
|
|
|
.endm
|
|
|
|
|
2009-07-24 19:32:57 +08:00
|
|
|
/*
|
|
|
|
* STRT/LDRT access macros with ARM and Thumb-2 variants
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
|
2012-01-25 18:38:13 +08:00
|
|
|
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
|
2009-07-24 19:32:57 +08:00
|
|
|
9999:
|
|
|
|
.if \inc == 1
|
2019-02-18 07:56:58 +08:00
|
|
|
\instr\()b\t\cond\().w \reg, [\ptr, #\off]
|
2009-07-24 19:32:57 +08:00
|
|
|
.elseif \inc == 4
|
2019-02-18 07:56:58 +08:00
|
|
|
\instr\t\cond\().w \reg, [\ptr, #\off]
|
2009-07-24 19:32:57 +08:00
|
|
|
.else
|
|
|
|
.error "Unsupported inc macro argument"
|
|
|
|
.endif
|
|
|
|
|
2010-04-19 17:15:03 +08:00
|
|
|
.pushsection __ex_table,"a"
|
2009-07-24 19:32:57 +08:00
|
|
|
.align 3
|
|
|
|
.long 9999b, \abort
|
2010-04-19 17:15:03 +08:00
|
|
|
.popsection
|
2009-07-24 19:32:57 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
|
|
|
|
@ explicit IT instruction needed because of the label
|
|
|
|
@ introduced by the USER macro
|
|
|
|
.ifnc \cond,al
|
|
|
|
.if \rept == 1
|
|
|
|
itt \cond
|
|
|
|
.elseif \rept == 2
|
|
|
|
ittt \cond
|
|
|
|
.else
|
|
|
|
.error "Unsupported rept macro argument"
|
|
|
|
.endif
|
|
|
|
.endif
|
|
|
|
|
|
|
|
@ Slightly optimised to avoid incrementing the pointer twice
|
|
|
|
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
|
|
|
|
.if \rept == 2
|
2010-11-19 20:18:31 +08:00
|
|
|
usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
|
2009-07-24 19:32:57 +08:00
|
|
|
.endif
|
|
|
|
|
|
|
|
add\cond \ptr, #\rept * \inc
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#else /* !CONFIG_THUMB2_KERNEL */
|
|
|
|
|
2012-01-25 18:38:13 +08:00
|
|
|
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
|
2009-07-24 19:32:57 +08:00
|
|
|
.rept \rept
|
|
|
|
9999:
|
|
|
|
.if \inc == 1
|
2019-02-18 07:56:58 +08:00
|
|
|
\instr\()b\t\cond \reg, [\ptr], #\inc
|
2009-07-24 19:32:57 +08:00
|
|
|
.elseif \inc == 4
|
2019-02-18 07:56:58 +08:00
|
|
|
\instr\t\cond \reg, [\ptr], #\inc
|
2009-07-24 19:32:57 +08:00
|
|
|
.else
|
|
|
|
.error "Unsupported inc macro argument"
|
|
|
|
.endif
|
|
|
|
|
2010-04-19 17:15:03 +08:00
|
|
|
.pushsection __ex_table,"a"
|
2009-07-24 19:32:57 +08:00
|
|
|
.align 3
|
|
|
|
.long 9999b, \abort
|
2010-04-19 17:15:03 +08:00
|
|
|
.popsection
|
2009-07-24 19:32:57 +08:00
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#endif /* CONFIG_THUMB2_KERNEL */
|
|
|
|
|
|
|
|
.macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
|
|
|
|
usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
|
|
|
|
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
|
|
|
|
.endm
|
2011-06-24 00:10:05 +08:00
|
|
|
|
|
|
|
/* Utility macro for declaring string literals */
|
|
|
|
.macro string name:req, string
|
|
|
|
.type \name , #object
|
|
|
|
\name:
|
|
|
|
.asciz "\string"
|
|
|
|
.size \name , . - \name
|
|
|
|
.endm
|
|
|
|
|
2014-06-30 23:29:12 +08:00
|
|
|
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
|
|
|
.macro ret\c, reg
|
|
|
|
#if __LINUX_ARM_ARCH__ < 6
|
|
|
|
mov\c pc, \reg
|
|
|
|
#else
|
|
|
|
.ifeqs "\reg", "lr"
|
|
|
|
bx\c \reg
|
|
|
|
.else
|
|
|
|
mov\c pc, \reg
|
|
|
|
.endif
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
.endr
|
|
|
|
|
|
|
|
.macro ret.w, reg
|
|
|
|
ret \reg
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
nop
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2017-11-25 07:49:34 +08:00
|
|
|
.macro bug, msg, line
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
1: .inst 0xde02
|
|
|
|
#else
|
|
|
|
1: .inst 0xe7f001f2
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
|
|
|
.pushsection .rodata.str, "aMS", %progbits, 1
|
|
|
|
2: .asciz "\msg"
|
|
|
|
.popsection
|
|
|
|
.pushsection __bug_table, "aw"
|
|
|
|
.align 2
|
|
|
|
.word 1b, 2b
|
|
|
|
.hword \line
|
|
|
|
.popsection
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-05-13 12:04:29 +08:00
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
#define _ASM_NOKPROBE(entry) \
|
|
|
|
.pushsection "_kprobe_blacklist", "aw" ; \
|
|
|
|
.balign 4 ; \
|
|
|
|
.long entry; \
|
|
|
|
.popsection
|
|
|
|
#else
|
|
|
|
#define _ASM_NOKPROBE(entry)
|
|
|
|
#endif
|
|
|
|
|
2011-06-13 13:46:44 +08:00
|
|
|
#endif /* __ASM_ASSEMBLER_H__ */
|