2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Handle unaligned accesses by emulation.
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
|
|
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
2013-12-20 00:41:05 +08:00
|
|
|
* Copyright (C) 2014 Imagination Technologies Ltd.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This file contains exception handler for address error exception with the
|
|
|
|
* special capability to execute faulting instructions in software. The
|
|
|
|
* handler does not try to handle the case when the program counter points
|
|
|
|
* to an address not aligned to a word boundary.
|
|
|
|
*
|
|
|
|
* Putting data to unaligned addresses is a bad practice even on Intel where
|
|
|
|
* only the performance is affected. Much worse is that such code is non-
|
|
|
|
* portable. Due to several programs that die on MIPS due to alignment
|
|
|
|
* problems I decided to implement this handler anyway though I originally
|
|
|
|
* didn't intend to do this at all for user code.
|
|
|
|
*
|
|
|
|
* For now I enable fixing of address errors by default to make life easier.
|
|
|
|
* I however intend to disable this somewhen in the future when the alignment
|
2013-01-22 19:59:30 +08:00
|
|
|
* problems with user programs have been fixed. For programmers this is the
|
2005-04-17 06:20:36 +08:00
|
|
|
* right way to go.
|
|
|
|
*
|
|
|
|
* Fixing address errors is a per process option. The option is inherited
|
2013-01-22 19:59:30 +08:00
|
|
|
* across fork(2) and execve(2) calls. If you really want to use the
|
2005-04-17 06:20:36 +08:00
|
|
|
* option in your user programs - I discourage the use of the software
|
|
|
|
* emulation strongly - use the following code in your userland stuff:
|
|
|
|
*
|
|
|
|
* #include <sys/sysmips.h>
|
|
|
|
*
|
|
|
|
* ...
|
|
|
|
* sysmips(MIPS_FIXADE, x);
|
|
|
|
* ...
|
|
|
|
*
|
|
|
|
* The argument x is 0 for disabling software emulation, enabled otherwise.
|
|
|
|
*
|
|
|
|
* Below a little program to play around with this feature.
|
|
|
|
*
|
|
|
|
* #include <stdio.h>
|
|
|
|
* #include <sys/sysmips.h>
|
|
|
|
*
|
|
|
|
* struct foo {
|
2013-01-22 19:59:30 +08:00
|
|
|
* unsigned char bar[8];
|
2005-04-17 06:20:36 +08:00
|
|
|
* };
|
|
|
|
*
|
|
|
|
* main(int argc, char *argv[])
|
|
|
|
* {
|
2013-01-22 19:59:30 +08:00
|
|
|
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
|
|
|
|
* unsigned int *p = (unsigned int *) (x.bar + 3);
|
|
|
|
* int i;
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2013-01-22 19:59:30 +08:00
|
|
|
* if (argc > 1)
|
|
|
|
* sysmips(MIPS_FIXADE, atoi(argv[1]));
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2013-01-22 19:59:30 +08:00
|
|
|
* printf("*p = %08lx\n", *p);
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2013-01-22 19:59:30 +08:00
|
|
|
* *p = 0xdeadface;
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2013-01-22 19:59:30 +08:00
|
|
|
* for(i = 0; i <= 7; i++)
|
|
|
|
* printf("%02x ", x.bar[i]);
|
|
|
|
* printf("\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* Coprocessor loads are not supported; I think this case is unimportant
|
|
|
|
* in the practice.
|
|
|
|
*
|
|
|
|
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
|
2013-01-22 19:59:30 +08:00
|
|
|
* exception for the R6000.
|
|
|
|
* A store crossing a page boundary might be executed only partially.
|
|
|
|
* Undo the partial store in this case.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-05-29 07:07:19 +08:00
|
|
|
#include <linux/context_tracking.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/smp.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 05:22:52 +08:00
|
|
|
#include <linux/sched.h>
|
2007-06-29 23:55:48 +08:00
|
|
|
#include <linux/debugfs.h>
|
2010-10-12 19:37:21 +08:00
|
|
|
#include <linux/perf_event.h>
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/asm.h>
|
|
|
|
#include <asm/branch.h>
|
|
|
|
#include <asm/byteorder.h>
|
2009-11-24 09:24:58 +08:00
|
|
|
#include <asm/cop2.h>
|
2015-09-23 01:10:55 +08:00
|
|
|
#include <asm/debug.h>
|
2013-03-26 01:09:02 +08:00
|
|
|
#include <asm/fpu.h>
|
|
|
|
#include <asm/fpu_emulator.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/inst.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-01-22 19:59:30 +08:00
|
|
|
#define STR(x) __STR(x)
|
2005-04-17 06:20:36 +08:00
|
|
|
#define __STR(x) #x
|
|
|
|
|
2007-06-29 23:55:48 +08:00
|
|
|
enum {
|
|
|
|
UNALIGNED_ACTION_QUIET,
|
|
|
|
UNALIGNED_ACTION_SIGNAL,
|
|
|
|
UNALIGNED_ACTION_SHOW,
|
|
|
|
};
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static u32 unaligned_instructions;
|
|
|
|
static u32 unaligned_action;
|
|
|
|
#else
|
|
|
|
#define unaligned_action UNALIGNED_ACTION_QUIET
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2007-06-29 23:55:48 +08:00
|
|
|
extern void show_registers(struct pt_regs *regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
#ifdef __BIG_ENDIAN
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadHW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ (".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lb("%0", "0(%2)")"\n" \
|
|
|
|
"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\t.set\tat\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lwl("%0", "(%2)")"\n" \
|
|
|
|
"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
|
|
|
/* For CPUs without lwl instruction */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_lb("%0", "0(%2)")"\n\t" \
|
|
|
|
"2:"type##_lbu("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_lbu("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_lbu("$1", "3(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadHWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lbu("%0", "0(%2)")"\n" \
|
|
|
|
"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".set\tat\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lwl("%0", "(%2)")"\n" \
|
|
|
|
"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"dsll\t%0, %0, 32\n\t" \
|
|
|
|
"dsrl\t%0, %0, 32\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
"\t.section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
"1:\tldl\t%0, (%2)\n" \
|
|
|
|
"2:\tldr\t%0, 7(%2)\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
"\t.section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
|
|
|
/* For CPUs without lwl and ldl instructions */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_lbu("%0", "0(%2)")"\n\t" \
|
|
|
|
"2:"type##_lbu("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_lbu("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_lbu("$1", "3(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
"1:lb\t%0, 0(%2)\n\t" \
|
|
|
|
"2:lbu\t $1, 1(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"3:lbu\t$1, 2(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"4:lbu\t$1, 3(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"5:lbu\t$1, 4(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"6:lbu\t$1, 5(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"7:lbu\t$1, 6(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"8:lbu\t$1, 7(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n\t" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t5b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t6b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t7b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t8b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreHW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_sb("%1", "1(%2)")"\n" \
|
2013-03-26 02:18:07 +08:00
|
|
|
"srl\t$1, %1, 0x8\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"2:\t"type##_sb("$1", "0(%2)")"\n" \
|
2013-03-26 02:18:07 +08:00
|
|
|
".set\tat\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT));\
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_swl("%1", "(%2)")"\n" \
|
|
|
|
"2:\t"type##_swr("%1", "3(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
"1:\tsdl\t%1,(%2)\n" \
|
|
|
|
"2:\tsdr\t%1, 7(%2)\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_sb("%1", "3(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, %1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"2:"type##_sb("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, $1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_sb("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, $1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_sb("$1", "0(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
".set\tpop\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (res) \
|
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "memory"); \
|
|
|
|
} while(0)
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2015-06-23 19:02:00 +08:00
|
|
|
#define _StoreDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
"1:sb\t%1, 7(%2)\n\t" \
|
|
|
|
"dsrl\t$1, %1, 0x8\n\t" \
|
|
|
|
"2:sb\t$1, 6(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"3:sb\t$1, 5(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"4:sb\t$1, 4(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"5:sb\t$1, 3(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"6:sb\t$1, 2(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"7:sb\t$1, 1(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"8:sb\t$1, 0(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
".set\tpop\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t5b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t6b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t7b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t8b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (res) \
|
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "memory"); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2014-10-28 18:42:23 +08:00
|
|
|
|
|
|
|
#else /* __BIG_ENDIAN */
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadHW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ (".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lb("%0", "1(%2)")"\n" \
|
|
|
|
"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\t.set\tat\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lwl("%0", "3(%2)")"\n" \
|
|
|
|
"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
|
|
|
/* For CPUs without lwl instruction */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_lb("%0", "3(%2)")"\n\t" \
|
|
|
|
"2:"type##_lbu("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_lbu("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_lbu("$1", "0(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadHWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lbu("%0", "1(%2)")"\n" \
|
|
|
|
"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".set\tat\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_lwl("%0", "3(%2)")"\n" \
|
|
|
|
"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"dsll\t%0, %0, 32\n\t" \
|
|
|
|
"dsrl\t%0, %0, 32\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
"\t.section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
"1:\tldl\t%0, 7(%2)\n" \
|
|
|
|
"2:\tldr\t%0, (%2)\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
"\t.section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
|
|
|
/* For CPUs without lwl and ldl instructions */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadWU(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_lbu("%0", "3(%2)")"\n\t" \
|
|
|
|
"2:"type##_lbu("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_lbu("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_lbu("$1", "0(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"sll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _LoadDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
"1:lb\t%0, 7(%2)\n\t" \
|
|
|
|
"2:lbu\t$1, 6(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"3:lbu\t$1, 5(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"4:lbu\t$1, 4(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"5:lbu\t$1, 3(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"6:lbu\t$1, 2(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"7:lbu\t$1, 1(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"8:lbu\t$1, 0(%2)\n\t" \
|
|
|
|
"dsll\t%0, 0x8\n\t" \
|
|
|
|
"or\t%0, $1\n\t" \
|
|
|
|
"li\t%1, 0\n" \
|
|
|
|
".set\tpop\n\t" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%1, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t5b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t6b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t7b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t8b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (value), "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreHW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tnoat\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_sb("%1", "0(%2)")"\n" \
|
2013-03-26 02:18:07 +08:00
|
|
|
"srl\t$1,%1, 0x8\n" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"2:\t"type##_sb("$1", "1(%2)")"\n" \
|
2013-03-26 02:18:07 +08:00
|
|
|
".set\tat\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT));\
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:\t"type##_swl("%1", "3(%2)")"\n" \
|
|
|
|
"2:\t"type##_swr("%1", "(%2)")"\n\t"\
|
2013-03-26 02:18:07 +08:00
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2013-03-26 02:18:07 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
"1:\tsdl\t%1, 7(%2)\n" \
|
|
|
|
"2:\tsdr\t%1, (%2)\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"3:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"4:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t3b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 4b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 4b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=r" (res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
|
|
|
/* For CPUs without swl and sdl instructions */
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreW(addr, value, res, type) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"1:"type##_sb("%1", "0(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, %1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"2:"type##_sb("$1", "1(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, $1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"3:"type##_sb("$1", "2(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
"srl\t$1, $1, 0x8\n\t" \
|
2015-03-09 22:54:50 +08:00
|
|
|
"4:"type##_sb("$1", "3(%2)")"\n\t" \
|
2014-10-28 18:42:23 +08:00
|
|
|
".set\tpop\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (res) \
|
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "memory"); \
|
|
|
|
} while(0)
|
2014-10-28 18:42:23 +08:00
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define _StoreDW(addr, value, res) \
|
2015-03-09 22:54:51 +08:00
|
|
|
do { \
|
2014-10-28 18:42:23 +08:00
|
|
|
__asm__ __volatile__ ( \
|
|
|
|
".set\tpush\n\t" \
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
"1:sb\t%1, 0(%2)\n\t" \
|
|
|
|
"dsrl\t$1, %1, 0x8\n\t" \
|
|
|
|
"2:sb\t$1, 1(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"3:sb\t$1, 2(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"4:sb\t$1, 3(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"5:sb\t$1, 4(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"6:sb\t$1, 5(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"7:sb\t$1, 6(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
"8:sb\t$1, 7(%2)\n\t" \
|
|
|
|
"dsrl\t$1, $1, 0x8\n\t" \
|
|
|
|
".set\tpop\n\t" \
|
|
|
|
"li\t%0, 0\n" \
|
|
|
|
"10:\n\t" \
|
|
|
|
".insn\n\t" \
|
|
|
|
".section\t.fixup,\"ax\"\n\t" \
|
|
|
|
"11:\tli\t%0, %3\n\t" \
|
|
|
|
"j\t10b\n\t" \
|
|
|
|
".previous\n\t" \
|
|
|
|
".section\t__ex_table,\"a\"\n\t" \
|
|
|
|
STR(PTR)"\t1b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t2b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t3b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t4b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t5b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t6b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t7b, 11b\n\t" \
|
|
|
|
STR(PTR)"\t8b, 11b\n\t" \
|
|
|
|
".previous" \
|
|
|
|
: "=&r" (res) \
|
|
|
|
: "r" (value), "r" (addr), "i" (-EFAULT) \
|
2015-03-09 22:54:51 +08:00
|
|
|
: "memory"); \
|
|
|
|
} while(0)
|
|
|
|
|
MIPS: Add Kconfig variable for CPUs with unaligned load/store instructions
MIPSR6 CPUs do not support unaligned load/store instructions
(LWL, LWR, SWL, SWR and LDL, LDR, SDL, SDR for 64bit).
Currently the MIPS tree has some special cases to avoid these
instructions, and the code is testing for !CONFIG_CPU_MIPSR6.
This patch declares a new Kconfig variable:
CONFIG_CPU_HAS_LOAD_STORE_LR.
This variable indicates that the CPU supports these instructions.
Then, the patch does the following:
- Carefully selects this option on all CPUs except MIPSR6.
- Switches all the special cases to test for the new variable,
and inverts the logic:
'#ifndef CONFIG_CPU_MIPSR6' turns into
'#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR'
and vice-versa.
Also, when this variable is NOT selected (e.g. MIPSR6),
CONFIG_GENERIC_CSUM will default to 'y', to compile generic
C checksum code (instead of special assembly code that uses the
unsupported instructions).
This commit should not affect any existing CPU, and is required
for future Lexra CPU support, that misses these instructions too.
Signed-off-by: Yasha Cherikovsky <yasha.che3@gmail.com>
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/20808/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
2018-09-26 19:16:15 +08:00
|
|
|
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
2013-03-26 02:18:07 +08:00
|
|
|
#endif
|
|
|
|
|
2015-03-09 22:54:50 +08:00
|
|
|
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
|
|
|
|
#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
|
|
|
|
#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
|
|
|
|
#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
|
|
|
|
#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
|
|
|
|
#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
|
|
|
|
#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
|
|
|
|
#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
|
|
|
|
#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
|
|
|
|
|
|
|
|
#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
|
|
|
|
#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
|
|
|
|
#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
|
|
|
|
#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
|
|
|
|
#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
|
|
|
|
|
2007-07-29 16:16:19 +08:00
|
|
|
static void emulate_load_store_insn(struct pt_regs *regs,
|
|
|
|
void __user *addr, unsigned int __user *pc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-11-08 07:14:07 +08:00
|
|
|
unsigned long origpc, orig31, value;
|
2005-04-17 06:20:36 +08:00
|
|
|
union mips_instruction insn;
|
2018-11-08 07:14:07 +08:00
|
|
|
unsigned int res;
|
2013-12-13 00:15:15 +08:00
|
|
|
#ifdef CONFIG_EVA
|
|
|
|
mm_segment_t seg;
|
|
|
|
#endif
|
2013-03-26 02:18:07 +08:00
|
|
|
origpc = (unsigned long)pc;
|
|
|
|
orig31 = regs->regs[31];
|
|
|
|
|
2011-06-27 20:41:57 +08:00
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
|
2010-10-12 19:37:21 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This load never faults.
|
|
|
|
*/
|
2005-03-02 03:22:29 +08:00
|
|
|
__get_user(insn.word, pc);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (insn.i_format.opcode) {
|
2013-03-26 02:18:07 +08:00
|
|
|
/*
|
|
|
|
* These are instructions that a compiler doesn't generate. We
|
|
|
|
* can assume therefore that the code is MIPS-aware and
|
|
|
|
* really buggy. Emulating these instructions would break the
|
|
|
|
* semantics anyway.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
case ll_op:
|
|
|
|
case lld_op:
|
|
|
|
case sc_op:
|
|
|
|
case scd_op:
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
/*
|
|
|
|
* For these instructions the only way to create an address
|
|
|
|
* error is an attempted access to kernel/supervisor address
|
|
|
|
* space.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
case ldl_op:
|
|
|
|
case ldr_op:
|
|
|
|
case lwl_op:
|
|
|
|
case lwr_op:
|
|
|
|
case sdl_op:
|
|
|
|
case sdr_op:
|
|
|
|
case swl_op:
|
|
|
|
case swr_op:
|
|
|
|
case lb_op:
|
|
|
|
case lbu_op:
|
|
|
|
case sb_op:
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
/*
|
|
|
|
* The remaining opcodes are the ones that are really of
|
|
|
|
* interest.
|
|
|
|
*/
|
2013-12-13 00:15:15 +08:00
|
|
|
case spec3_op:
|
2017-06-19 23:50:11 +08:00
|
|
|
if (insn.dsp_format.func == lx_op) {
|
|
|
|
switch (insn.dsp_format.op) {
|
|
|
|
case lwx_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.dsp_format.rd] = value;
|
|
|
|
break;
|
|
|
|
case lhx_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
LoadHW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.dsp_format.rd] = value;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto sigill;
|
2013-12-13 00:15:15 +08:00
|
|
|
}
|
2017-06-19 23:50:11 +08:00
|
|
|
}
|
|
|
|
#ifdef CONFIG_EVA
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* we can land here only from kernel accessing user
|
|
|
|
* memory, so we need to "switch" the address limit to
|
|
|
|
* user space, so that address check can work properly.
|
|
|
|
*/
|
|
|
|
seg = get_fs();
|
|
|
|
set_fs(USER_DS);
|
|
|
|
switch (insn.spec3_format.func) {
|
|
|
|
case lhe_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2)) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
LoadHWE(addr, value, res);
|
|
|
|
if (res) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.spec3_format.rt] = value;
|
|
|
|
break;
|
|
|
|
case lwe_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4)) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto sigbus;
|
|
|
|
}
|
2015-03-09 22:54:50 +08:00
|
|
|
LoadWE(addr, value, res);
|
2017-06-19 23:50:11 +08:00
|
|
|
if (res) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.spec3_format.rt] = value;
|
|
|
|
break;
|
|
|
|
case lhue_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2)) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
LoadHWUE(addr, value, res);
|
|
|
|
if (res) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.spec3_format.rt] = value;
|
|
|
|
break;
|
|
|
|
case she_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 2)) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
compute_return_epc(regs);
|
|
|
|
value = regs->regs[insn.spec3_format.rt];
|
|
|
|
StoreHWE(addr, value, res);
|
|
|
|
if (res) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case swe_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4)) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
compute_return_epc(regs);
|
|
|
|
value = regs->regs[insn.spec3_format.rt];
|
|
|
|
StoreWE(addr, value, res);
|
|
|
|
if (res) {
|
|
|
|
set_fs(seg);
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2013-12-13 00:15:15 +08:00
|
|
|
set_fs(seg);
|
2017-06-19 23:50:11 +08:00
|
|
|
goto sigill;
|
2013-12-13 00:15:15 +08:00
|
|
|
}
|
|
|
|
set_fs(seg);
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-19 23:50:11 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case lh_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
2016-08-04 04:45:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_EVA)) {
|
2017-03-21 09:08:07 +08:00
|
|
|
if (uaccess_kernel())
|
2015-03-09 22:54:52 +08:00
|
|
|
LoadHW(addr, value, res);
|
|
|
|
else
|
|
|
|
LoadHWE(addr, value, res);
|
|
|
|
} else {
|
|
|
|
LoadHW(addr, value, res);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
2007-07-29 16:16:19 +08:00
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.i_format.rt] = value;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case lw_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
2016-08-04 04:45:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_EVA)) {
|
2017-03-21 09:08:07 +08:00
|
|
|
if (uaccess_kernel())
|
2015-03-09 22:54:52 +08:00
|
|
|
LoadW(addr, value, res);
|
|
|
|
else
|
|
|
|
LoadWE(addr, value, res);
|
|
|
|
} else {
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
2007-07-29 16:16:19 +08:00
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.i_format.rt] = value;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case lhu_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
2016-08-04 04:45:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_EVA)) {
|
2017-03-21 09:08:07 +08:00
|
|
|
if (uaccess_kernel())
|
2015-03-09 22:54:52 +08:00
|
|
|
LoadHWU(addr, value, res);
|
|
|
|
else
|
|
|
|
LoadHWUE(addr, value, res);
|
|
|
|
} else {
|
|
|
|
LoadHWU(addr, value, res);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
2007-07-29 16:16:19 +08:00
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.i_format.rt] = value;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case lwu_op:
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
LoadWU(addr, value, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
2007-07-29 16:16:19 +08:00
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.i_format.rt] = value;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2005-09-04 06:56:16 +08:00
|
|
|
#endif /* CONFIG_64BIT */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case ld_op:
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
LoadDW(addr, value, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
2007-07-29 16:16:19 +08:00
|
|
|
compute_return_epc(regs);
|
|
|
|
regs->regs[insn.i_format.rt] = value;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2005-09-04 06:56:16 +08:00
|
|
|
#endif /* CONFIG_64BIT */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case sh_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
compute_return_epc(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
value = regs->regs[insn.i_format.rt];
|
2015-03-09 22:54:52 +08:00
|
|
|
|
2016-08-04 04:45:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_EVA)) {
|
2017-03-21 09:08:07 +08:00
|
|
|
if (uaccess_kernel())
|
2015-03-09 22:54:52 +08:00
|
|
|
StoreHW(addr, value, res);
|
|
|
|
else
|
|
|
|
StoreHWE(addr, value, res);
|
|
|
|
} else {
|
|
|
|
StoreHW(addr, value, res);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case sw_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
compute_return_epc(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
value = regs->regs[insn.i_format.rt];
|
2015-03-09 22:54:52 +08:00
|
|
|
|
2016-08-04 04:45:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_EVA)) {
|
2017-03-21 09:08:07 +08:00
|
|
|
if (uaccess_kernel())
|
2015-03-09 22:54:52 +08:00
|
|
|
StoreW(addr, value, res);
|
|
|
|
else
|
|
|
|
StoreWE(addr, value, res);
|
|
|
|
} else {
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case sd_op:
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
compute_return_epc(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
value = regs->regs[insn.i_format.rt];
|
2013-03-26 02:18:07 +08:00
|
|
|
StoreDW(addr, value, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
2005-09-04 06:56:16 +08:00
|
|
|
#endif /* CONFIG_64BIT */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
2018-11-08 07:14:07 +08:00
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case lwc1_op:
|
|
|
|
case ldc1_op:
|
|
|
|
case swc1_op:
|
|
|
|
case sdc1_op:
|
2018-11-08 07:14:07 +08:00
|
|
|
case cop1x_op: {
|
|
|
|
void __user *fault_addr = NULL;
|
|
|
|
|
2013-03-26 01:09:02 +08:00
|
|
|
die_if_kernel("Unaligned FP access in kernel code", regs);
|
|
|
|
BUG_ON(!used_math());
|
|
|
|
|
|
|
|
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
|
|
|
&fault_addr);
|
|
|
|
own_fpu(1); /* Restore FPU state. */
|
|
|
|
|
|
|
|
/* Signal if something went wrong. */
|
2015-04-04 06:27:15 +08:00
|
|
|
process_fpemu_return(res, fault_addr, 0);
|
2013-03-26 01:09:02 +08:00
|
|
|
|
|
|
|
if (res == 0)
|
|
|
|
break;
|
|
|
|
return;
|
2018-11-08 07:14:07 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_MSA
|
|
|
|
|
|
|
|
case msa_op: {
|
|
|
|
unsigned int wd, preempted;
|
|
|
|
enum msa_2b_fmt df;
|
|
|
|
union fpureg *fpr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-06-22 19:21:00 +08:00
|
|
|
if (!cpu_has_msa)
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we've reached this point then userland should have taken
|
|
|
|
* the MSA disabled exception & initialised vector context at
|
|
|
|
* some point in the past.
|
|
|
|
*/
|
|
|
|
BUG_ON(!thread_msa_context_live());
|
|
|
|
|
|
|
|
df = insn.msa_mi10_format.df;
|
|
|
|
wd = insn.msa_mi10_format.wd;
|
|
|
|
fpr = ¤t->thread.fpu.fpr[wd];
|
|
|
|
|
|
|
|
switch (insn.msa_mi10_format.func) {
|
|
|
|
case msa_ld_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
|
|
|
|
goto sigbus;
|
|
|
|
|
2016-02-03 11:35:49 +08:00
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* If we have live MSA context keep track of
|
|
|
|
* whether we get preempted in order to avoid
|
|
|
|
* the register context we load being clobbered
|
|
|
|
* by the live context as it's saved during
|
|
|
|
* preemption. If we don't have live context
|
|
|
|
* then it can't be saved to clobber the value
|
|
|
|
* we load.
|
|
|
|
*/
|
|
|
|
preempted = test_thread_flag(TIF_USEDMSA);
|
|
|
|
|
|
|
|
res = __copy_from_user_inatomic(fpr, addr,
|
|
|
|
sizeof(*fpr));
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the hardware register if it is in use
|
|
|
|
* by the task in this quantum, in order to
|
|
|
|
* avoid having to save & restore the whole
|
|
|
|
* vector context.
|
|
|
|
*/
|
|
|
|
preempt_disable();
|
|
|
|
if (test_thread_flag(TIF_USEDMSA)) {
|
|
|
|
write_msa_wr(wd, fpr, df);
|
|
|
|
preempted = 0;
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
} while (preempted);
|
2015-06-22 19:21:00 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case msa_st_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update from the hardware register if it is in use by
|
|
|
|
* the task in this quantum, in order to avoid having to
|
|
|
|
* save & restore the whole vector context.
|
|
|
|
*/
|
|
|
|
preempt_disable();
|
|
|
|
if (test_thread_flag(TIF_USEDMSA))
|
|
|
|
read_msa_wr(wd, fpr, df);
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
|
|
|
|
compute_return_epc(regs);
|
|
|
|
break;
|
2018-11-08 07:14:07 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_CPU_HAS_MSA */
|
2015-06-22 19:21:00 +08:00
|
|
|
|
2014-10-28 18:42:23 +08:00
|
|
|
#ifndef CONFIG_CPU_MIPSR6
|
2009-11-24 09:24:58 +08:00
|
|
|
/*
|
|
|
|
* COP2 is available to implementor for application specific use.
|
|
|
|
* It's up to applications to register a notifier chain and do
|
|
|
|
* whatever they have to do, including possible sending of signals.
|
2014-10-28 18:42:23 +08:00
|
|
|
*
|
|
|
|
* This instruction has been reallocated in Release 6
|
2009-11-24 09:24:58 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
case lwc2_op:
|
2009-11-24 09:24:58 +08:00
|
|
|
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
|
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case ldc2_op:
|
2009-11-24 09:24:58 +08:00
|
|
|
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
|
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case swc2_op:
|
2009-11-24 09:24:58 +08:00
|
|
|
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
|
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case sdc2_op:
|
2009-11-24 09:24:58 +08:00
|
|
|
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
|
|
|
|
break;
|
2014-10-28 18:42:23 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* Pheeee... We encountered an yet unknown instruction or
|
|
|
|
* cache coherence problem. Die sucker, die ...
|
|
|
|
*/
|
|
|
|
goto sigill;
|
|
|
|
}
|
|
|
|
|
2007-06-29 23:55:48 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
2005-04-17 06:20:36 +08:00
|
|
|
unaligned_instructions++;
|
|
|
|
#endif
|
|
|
|
|
2007-07-29 16:16:19 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
fault:
|
2013-03-26 02:18:07 +08:00
|
|
|
/* roll back jump/branch */
|
|
|
|
regs->cp0_epc = origpc;
|
|
|
|
regs->regs[31] = orig31;
|
|
|
|
/* Did we have an exception handler installed? */
|
|
|
|
if (fixup_exception(regs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
|
|
|
force_sig(SIGSEGV, current);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigbus:
|
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
|
|
|
force_sig(SIGBUS, current);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigill:
|
|
|
|
die_if_kernel
|
|
|
|
("Unhandled kernel unaligned access or invalid instruction", regs);
|
|
|
|
force_sig(SIGILL, current);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Recode table from 16-bit register notation to 32-bit GPR. */
|
|
|
|
const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
|
|
|
|
|
|
|
|
/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
|
2017-08-24 02:17:54 +08:00
|
|
|
static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
|
2013-03-26 02:18:07 +08:00
|
|
|
|
2013-05-25 04:54:08 +08:00
|
|
|
static void emulate_load_store_microMIPS(struct pt_regs *regs,
|
|
|
|
void __user *addr)
|
2013-03-26 02:18:07 +08:00
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
unsigned int res;
|
|
|
|
int i;
|
|
|
|
unsigned int reg = 0, rvar;
|
|
|
|
unsigned long orig31;
|
|
|
|
u16 __user *pc16;
|
|
|
|
u16 halfword;
|
|
|
|
unsigned int word;
|
|
|
|
unsigned long origpc, contpc;
|
|
|
|
union mips_instruction insn;
|
|
|
|
struct mm_decoded_insn mminsn;
|
|
|
|
|
|
|
|
origpc = regs->cp0_epc;
|
|
|
|
orig31 = regs->regs[31];
|
|
|
|
|
|
|
|
mminsn.micro_mips_mode = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This load never faults.
|
|
|
|
*/
|
|
|
|
pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
|
|
|
|
__get_user(halfword, pc16);
|
|
|
|
pc16++;
|
|
|
|
contpc = regs->cp0_epc + 2;
|
|
|
|
word = ((unsigned int)halfword << 16);
|
|
|
|
mminsn.pc_inc = 2;
|
|
|
|
|
|
|
|
if (!mm_insn_16bit(halfword)) {
|
|
|
|
__get_user(halfword, pc16);
|
|
|
|
pc16++;
|
|
|
|
contpc = regs->cp0_epc + 4;
|
|
|
|
mminsn.pc_inc = 4;
|
|
|
|
word |= halfword;
|
|
|
|
}
|
|
|
|
mminsn.insn = word;
|
|
|
|
|
|
|
|
if (get_user(halfword, pc16))
|
|
|
|
goto fault;
|
|
|
|
mminsn.next_pc_inc = 2;
|
|
|
|
word = ((unsigned int)halfword << 16);
|
|
|
|
|
|
|
|
if (!mm_insn_16bit(halfword)) {
|
|
|
|
pc16++;
|
|
|
|
if (get_user(halfword, pc16))
|
|
|
|
goto fault;
|
|
|
|
mminsn.next_pc_inc = 4;
|
|
|
|
word |= halfword;
|
|
|
|
}
|
|
|
|
mminsn.next_insn = word;
|
|
|
|
|
|
|
|
insn = (union mips_instruction)(mminsn.insn);
|
|
|
|
if (mm_isBranchInstr(regs, mminsn, &contpc))
|
|
|
|
insn = (union mips_instruction)(mminsn.next_insn);
|
|
|
|
|
|
|
|
/* Parse instruction to find what to do */
|
|
|
|
|
|
|
|
switch (insn.mm_i_format.opcode) {
|
|
|
|
|
|
|
|
case mm_pool32a_op:
|
|
|
|
switch (insn.mm_x_format.func) {
|
|
|
|
case mm_lwxs_op:
|
|
|
|
reg = insn.mm_x_format.rd;
|
|
|
|
goto loadW;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case mm_pool32b_op:
|
|
|
|
switch (insn.mm_m_format.func) {
|
|
|
|
case mm_lwp_func:
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
if (reg == 31)
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
addr += 4;
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg + 1] = value;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
case mm_swp_func:
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
if (reg == 31)
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
value = regs->regs[reg + 1];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
case mm_ldp_func:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
if (reg == 31)
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 16))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
addr += 8;
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg + 1] = value;
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case mm_sdp_func:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
if (reg == 31)
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 16))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 8;
|
|
|
|
value = regs->regs[reg + 1];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case mm_lwm32_func:
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
rvar = reg & 0xf;
|
|
|
|
if ((rvar > 9) || !reg)
|
|
|
|
goto sigill;
|
|
|
|
if (reg & 0x10) {
|
|
|
|
if (!access_ok
|
|
|
|
(VERIFY_READ, addr, 4 * (rvar + 1)))
|
|
|
|
goto sigbus;
|
|
|
|
} else {
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
if (rvar == 9)
|
|
|
|
rvar = 8;
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
regs->regs[i] = value;
|
|
|
|
}
|
|
|
|
if ((reg & 0xf) == 9) {
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
regs->regs[30] = value;
|
|
|
|
}
|
|
|
|
if (reg & 0x10) {
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[31] = value;
|
|
|
|
}
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
case mm_swm32_func:
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
rvar = reg & 0xf;
|
|
|
|
if ((rvar > 9) || !reg)
|
|
|
|
goto sigill;
|
|
|
|
if (reg & 0x10) {
|
|
|
|
if (!access_ok
|
|
|
|
(VERIFY_WRITE, addr, 4 * (rvar + 1)))
|
|
|
|
goto sigbus;
|
|
|
|
} else {
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
if (rvar == 9)
|
|
|
|
rvar = 8;
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
value = regs->regs[i];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
}
|
|
|
|
if ((reg & 0xf) == 9) {
|
|
|
|
value = regs->regs[30];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
}
|
|
|
|
if (reg & 0x10) {
|
|
|
|
value = regs->regs[31];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
case mm_ldm_func:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
rvar = reg & 0xf;
|
|
|
|
if ((rvar > 9) || !reg)
|
|
|
|
goto sigill;
|
|
|
|
if (reg & 0x10) {
|
|
|
|
if (!access_ok
|
|
|
|
(VERIFY_READ, addr, 8 * (rvar + 1)))
|
|
|
|
goto sigbus;
|
|
|
|
} else {
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 8 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
if (rvar == 9)
|
|
|
|
rvar = 8;
|
|
|
|
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
regs->regs[i] = value;
|
|
|
|
}
|
|
|
|
if ((reg & 0xf) == 9) {
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 8;
|
|
|
|
regs->regs[30] = value;
|
|
|
|
}
|
|
|
|
if (reg & 0x10) {
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[31] = value;
|
|
|
|
}
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case mm_sdm_func:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
rvar = reg & 0xf;
|
|
|
|
if ((rvar > 9) || !reg)
|
|
|
|
goto sigill;
|
|
|
|
if (reg & 0x10) {
|
|
|
|
if (!access_ok
|
|
|
|
(VERIFY_WRITE, addr, 8 * (rvar + 1)))
|
|
|
|
goto sigbus;
|
|
|
|
} else {
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
if (rvar == 9)
|
|
|
|
rvar = 8;
|
|
|
|
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
value = regs->regs[i];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 8;
|
|
|
|
}
|
|
|
|
if ((reg & 0xf) == 9) {
|
|
|
|
value = regs->regs[30];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 8;
|
|
|
|
}
|
|
|
|
if (reg & 0x10) {
|
|
|
|
value = regs->regs[31];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
/* LWC2, SWC2, LDC2, SDC2 are not serviced */
|
|
|
|
}
|
|
|
|
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case mm_pool32c_op:
|
|
|
|
switch (insn.mm_m_format.func) {
|
|
|
|
case mm_lwu_func:
|
|
|
|
reg = insn.mm_m_format.rd;
|
|
|
|
goto loadWU;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* LL,SC,LLD,SCD are not serviced */
|
|
|
|
goto sigbus;
|
|
|
|
|
2018-11-08 07:14:07 +08:00
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
2013-03-26 02:18:07 +08:00
|
|
|
case mm_pool32f_op:
|
|
|
|
switch (insn.mm_x_format.func) {
|
|
|
|
case mm_lwxc1_func:
|
|
|
|
case mm_swxc1_func:
|
|
|
|
case mm_ldxc1_func:
|
|
|
|
case mm_sdxc1_func:
|
|
|
|
goto fpu_emul;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case mm_ldc132_op:
|
|
|
|
case mm_sdc132_op:
|
|
|
|
case mm_lwc132_op:
|
2018-11-08 07:14:07 +08:00
|
|
|
case mm_swc132_op: {
|
|
|
|
void __user *fault_addr = NULL;
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
fpu_emul:
|
|
|
|
/* roll back jump/branch */
|
|
|
|
regs->cp0_epc = origpc;
|
|
|
|
regs->regs[31] = orig31;
|
|
|
|
|
|
|
|
die_if_kernel("Unaligned FP access in kernel code", regs);
|
|
|
|
BUG_ON(!used_math());
|
|
|
|
BUG_ON(!is_fpu_owner());
|
|
|
|
|
|
|
|
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
|
|
|
&fault_addr);
|
|
|
|
own_fpu(1); /* restore FPU state */
|
|
|
|
|
|
|
|
/* If something went wrong, signal */
|
2015-04-04 06:27:15 +08:00
|
|
|
process_fpemu_return(res, fault_addr, 0);
|
2013-03-26 02:18:07 +08:00
|
|
|
|
|
|
|
if (res == 0)
|
|
|
|
goto success;
|
|
|
|
return;
|
2018-11-08 07:14:07 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_MIPS_FP_SUPPORT */
|
2013-03-26 02:18:07 +08:00
|
|
|
|
|
|
|
case mm_lh32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto loadHW;
|
|
|
|
|
|
|
|
case mm_lhu32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto loadHWU;
|
|
|
|
|
|
|
|
case mm_lw32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto loadW;
|
|
|
|
|
|
|
|
case mm_sh32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto storeHW;
|
|
|
|
|
|
|
|
case mm_sw32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto storeW;
|
|
|
|
|
|
|
|
case mm_ld32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto loadDW;
|
|
|
|
|
|
|
|
case mm_sd32_op:
|
|
|
|
reg = insn.mm_i_format.rt;
|
|
|
|
goto storeDW;
|
|
|
|
|
|
|
|
case mm_pool16c_op:
|
|
|
|
switch (insn.mm16_m_format.func) {
|
|
|
|
case mm_lwm16_op:
|
|
|
|
reg = insn.mm16_m_format.rlist;
|
|
|
|
rvar = reg + 1;
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
regs->regs[i] = value;
|
|
|
|
}
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[31] = value;
|
|
|
|
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
case mm_swm16_op:
|
|
|
|
reg = insn.mm16_m_format.rlist;
|
|
|
|
rvar = reg + 1;
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
for (i = 16; rvar; rvar--, i++) {
|
|
|
|
value = regs->regs[i];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
addr += 4;
|
|
|
|
}
|
|
|
|
value = regs->regs[31];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case mm_lhu16_op:
|
|
|
|
reg = reg16to32[insn.mm16_rb_format.rt];
|
|
|
|
goto loadHWU;
|
|
|
|
|
|
|
|
case mm_lw16_op:
|
|
|
|
reg = reg16to32[insn.mm16_rb_format.rt];
|
|
|
|
goto loadW;
|
|
|
|
|
|
|
|
case mm_sh16_op:
|
|
|
|
reg = reg16to32st[insn.mm16_rb_format.rt];
|
|
|
|
goto storeHW;
|
|
|
|
|
|
|
|
case mm_sw16_op:
|
|
|
|
reg = reg16to32st[insn.mm16_rb_format.rt];
|
|
|
|
goto storeW;
|
|
|
|
|
|
|
|
case mm_lwsp16_op:
|
|
|
|
reg = insn.mm16_r5_format.rt;
|
|
|
|
goto loadW;
|
|
|
|
|
|
|
|
case mm_swsp16_op:
|
|
|
|
reg = insn.mm16_r5_format.rt;
|
|
|
|
goto storeW;
|
|
|
|
|
|
|
|
case mm_lwgp16_op:
|
|
|
|
reg = reg16to32[insn.mm16_r3_format.rt];
|
|
|
|
goto loadW;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto sigill;
|
|
|
|
}
|
|
|
|
|
|
|
|
loadHW:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadHW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
loadHWU:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadHWU(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
loadW:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
loadWU:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadWU(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
loadDW:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
storeHW:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreHW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
storeW:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
storeDW:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
goto success;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
success:
|
|
|
|
regs->cp0_epc = contpc; /* advance or branch */
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
unaligned_instructions++;
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
|
|
|
|
fault:
|
|
|
|
/* roll back jump/branch */
|
|
|
|
regs->cp0_epc = origpc;
|
|
|
|
regs->regs[31] = orig31;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Did we have an exception handler installed? */
|
|
|
|
if (fixup_exception(regs))
|
2007-07-29 16:16:19 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-12 06:46:15 +08:00
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
2009-05-06 03:49:47 +08:00
|
|
|
force_sig(SIGSEGV, current);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-29 16:16:19 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sigbus:
|
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
2009-05-06 03:49:47 +08:00
|
|
|
force_sig(SIGBUS, current);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-29 16:16:19 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sigill:
|
2013-03-26 02:18:07 +08:00
|
|
|
die_if_kernel
|
|
|
|
("Unhandled kernel unaligned access or invalid instruction", regs);
|
2009-05-06 03:49:47 +08:00
|
|
|
force_sig(SIGILL, current);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-03-26 02:46:15 +08:00
|
|
|
static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
|
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
unsigned int res;
|
|
|
|
int reg;
|
|
|
|
unsigned long orig31;
|
|
|
|
u16 __user *pc16;
|
|
|
|
unsigned long origpc;
|
|
|
|
union mips16e_instruction mips16inst, oldinst;
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
unsigned int opcode;
|
|
|
|
int extended = 0;
|
2013-03-26 02:46:15 +08:00
|
|
|
|
|
|
|
origpc = regs->cp0_epc;
|
|
|
|
orig31 = regs->regs[31];
|
|
|
|
pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
|
|
|
|
/*
|
|
|
|
* This load never faults.
|
|
|
|
*/
|
|
|
|
__get_user(mips16inst.full, pc16);
|
|
|
|
oldinst = mips16inst;
|
|
|
|
|
|
|
|
/* skip EXTEND instruction */
|
|
|
|
if (mips16inst.ri.opcode == MIPS16e_extend_op) {
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
extended = 1;
|
2013-03-26 02:46:15 +08:00
|
|
|
pc16++;
|
|
|
|
__get_user(mips16inst.full, pc16);
|
|
|
|
} else if (delay_slot(regs)) {
|
|
|
|
/* skip jump instructions */
|
|
|
|
/* JAL/JALX are 32 bits but have OPCODE in first short int */
|
|
|
|
if (mips16inst.ri.opcode == MIPS16e_jal_op)
|
|
|
|
pc16++;
|
|
|
|
pc16++;
|
|
|
|
if (get_user(mips16inst.full, pc16))
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
opcode = mips16inst.ri.opcode;
|
|
|
|
switch (opcode) {
|
2013-03-26 02:46:15 +08:00
|
|
|
case MIPS16e_i64_op: /* I64 or RI64 instruction */
|
|
|
|
switch (mips16inst.i64.func) { /* I64/RI64 func field check */
|
|
|
|
case MIPS16e_ldpc_func:
|
|
|
|
case MIPS16e_ldsp_func:
|
|
|
|
reg = reg16to32[mips16inst.ri64.ry];
|
|
|
|
goto loadDW;
|
|
|
|
|
|
|
|
case MIPS16e_sdsp_func:
|
|
|
|
reg = reg16to32[mips16inst.ri64.ry];
|
|
|
|
goto writeDW;
|
|
|
|
|
|
|
|
case MIPS16e_sdrasp_func:
|
|
|
|
reg = 29; /* GPRSP */
|
|
|
|
goto writeDW;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case MIPS16e_swsp_op:
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
reg = reg16to32[mips16inst.ri.rx];
|
|
|
|
if (extended && cpu_has_mips16e2)
|
|
|
|
switch (mips16inst.ri.imm >> 5) {
|
|
|
|
case 0: /* SWSP */
|
|
|
|
case 1: /* SWGP */
|
|
|
|
break;
|
|
|
|
case 2: /* SHGP */
|
|
|
|
opcode = MIPS16e_sh_op;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2013-03-26 02:46:15 +08:00
|
|
|
case MIPS16e_lwpc_op:
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
reg = reg16to32[mips16inst.ri.rx];
|
|
|
|
break;
|
|
|
|
|
2013-03-26 02:46:15 +08:00
|
|
|
case MIPS16e_lwsp_op:
|
|
|
|
reg = reg16to32[mips16inst.ri.rx];
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
if (extended && cpu_has_mips16e2)
|
|
|
|
switch (mips16inst.ri.imm >> 5) {
|
|
|
|
case 0: /* LWSP */
|
|
|
|
case 1: /* LWGP */
|
|
|
|
break;
|
|
|
|
case 2: /* LHGP */
|
|
|
|
opcode = MIPS16e_lh_op;
|
|
|
|
break;
|
|
|
|
case 4: /* LHUGP */
|
|
|
|
opcode = MIPS16e_lhu_op;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto sigbus;
|
|
|
|
}
|
2013-03-26 02:46:15 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_i8_op:
|
|
|
|
if (mips16inst.i8.func != MIPS16e_swrasp_func)
|
|
|
|
goto sigbus;
|
|
|
|
reg = 29; /* GPRSP */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
reg = reg16to32[mips16inst.rri.ry];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
Implement extended LWSP/SWSP instruction subdecoding for the purpose of
unaligned GP-relative memory access emulation.
With the introduction of the MIPS16e2 ASE[1] the previously must-be-zero
3-bit field at bits 7..5 of the extended encodings of the instructions
selected with the LWSP and SWSP major opcodes has become a `sel' field,
acting as an opcode extension for additional operations. In both cases
the `sel' value of 0 has retained the original operation, that is:
LW rx, offset(sp)
and:
SW rx, offset(sp)
for LWSP and SWSP respectively. In hardware predating the MIPS16e2 ASE
other values may or may not have been decoded, architecturally yielding
unpredictable results, and in our unaligned memory access emulation we
have treated the 3-bit field as a don't-care, that is effectively making
all the possible encodings of the field alias to the architecturally
defined encoding of 0.
For the non-zero values of the `sel' field the MIPS16e2 ASE has in
particular defined these GP-relative operations:
LW rx, offset(gp) # sel = 1
LH rx, offset(gp) # sel = 2
LHU rx, offset(gp) # sel = 4
and
SW rx, offset(gp) # sel = 1
SH rx, offset(gp) # sel = 2
for LWSP and SWSP respectively, which will trap with an Address Error
exception if the effective address calculated is not naturally-aligned
for the operation requested. These operations have been selected for
unaligned access emulation, for consistency with the corresponding
regular MIPS and microMIPS operations.
For other non-zero values of the `sel' field the MIPS16e2 ASE has
defined further operations, which however either never trap with an
Address Error exception, such as LWL or GP-relative SB, or are not
supposed to be emulated, such as LL or SC. These operations have been
selected to exclude from unaligned access emulation, should an Address
Error exception ever happen with them.
Subdecode the `sel' field in unaligned access emulation then for the
extended encodings of the instructions selected with the LWSP and SWSP
major opcodes, whenever support for the MIPS16e2 ASE has been detected
in hardware, and either emulate the operation requested or send SIGBUS
to the originating process, according to the selection described above.
For hardware implementing the MIPS16 ASE, however lacking MIPS16e2 ASE
support retain the original interpretation of the `sel' field.
The effects of this change are illustrated with the following user
program:
$ cat mips16e2-test.c
#include <inttypes.h>
#include <stdio.h>
int main(void)
{
int64_t scratch[16] = { 0 };
int32_t *tmp0, *tmp1, *tmp2;
int i;
scratch[0] = 0xc8c7c6c5c4c3c2c1;
scratch[1] = 0xd0cfcecdcccbcac9;
asm volatile(
"move %0, $sp\n\t"
"move %1, $gp\n\t"
"move $sp, %4\n\t"
"addiu %2, %4, 8\n\t"
"move $gp, %2\n\t"
"lw %2, 2($sp)\n\t"
"sw %2, 16(%4)\n\t"
"lw %2, 2($gp)\n\t"
"sw %2, 24(%4)\n\t"
"lw %2, 1($sp)\n\t"
"sw %2, 32(%4)\n\t"
"lh %2, 1($gp)\n\t"
"sw %2, 40(%4)\n\t"
"lw %2, 3($sp)\n\t"
"sw %2, 48(%4)\n\t"
"lhu %2, 3($gp)\n\t"
"sw %2, 56(%4)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 66($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sw %2, 82($gp)\n\t"
"lw %2, 0(%4)\n\t"
"sw %2, 97($sp)\n\t"
"lw %2, 8(%4)\n\t"
"sh %2, 113($gp)\n\t"
"move $gp, %1\n\t"
"move $sp, %0"
: "=&d" (tmp0), "=&d" (tmp1), "=&d" (tmp2), "=m" (scratch)
: "d" (scratch));
for (i = 0; i < sizeof(scratch) / sizeof(*scratch); i += 2)
printf("%016" PRIx64 "\t%016" PRIx64 "\n",
scratch[i], scratch[i + 1]);
return 0;
}
$
to be compiled with:
$ gcc -mips16 -mips32r2 -Wa,-mmips16e2 -o mips16e2-test mips16e2-test.c
$
With 74Kf hardware, which does not implement the MIPS16e2 ASE, this
program produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000c6c5c4c3
00000000c5c4c3c2 00000000c5c4c3c2
00000000c7c6c5c4 00000000c7c6c5c4
0000c4c3c2c10000 0000000000000000
0000cccbcac90000 0000000000000000
000000c4c3c2c100 0000000000000000
000000cccbcac900 0000000000000000
$
regardless of whether the change has been applied or not.
With the change not applied and interAptive MR2 hardware[2], which does
implement the MIPS16e2 ASE, it produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000cdcccbca
00000000c7c6c5c4 00000000cfcecdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 000000cccbcac900
$
which shows that for GP-relative operations the correct trapping address
calculated from $gp has been obtained from the CP0 BadVAddr register and
so has data from the source operand, however masking and extension has
not been applied for halfword operations.
With the change applied and interAptive MR2 hardware the program
produces the following output:
$ ./mips16e2-test
c8c7c6c5c4c3c2c1 d0cfcecdcccbcac9
00000000c6c5c4c3 00000000cecdcccb
00000000c5c4c3c2 00000000ffffcbca
00000000c7c6c5c4 000000000000cdcc
0000c4c3c2c10000 0000000000000000
0000000000000000 0000cccbcac90000
000000c4c3c2c100 0000000000000000
0000000000000000 0000000000cac900
$
as expected.
References:
[1] "MIPS32 Architecture for Programmers: MIPS16e2 Application-Specific
Extension Technical Reference Manual", Imagination Technologies
Ltd., Document Number: MD01172, Revision 01.00, April 26, 2016
[2] "MIPS32 interAptiv Multiprocessing System Software User's Manual",
Imagination Technologies Ltd., Document Number: MD00904, Revision
02.01, June 15, 2016, Chapter 24 "MIPS16e Application-Specific
Extension to the MIPS32 Instruction Set", pp. 871-883
Signed-off-by: Maciej W. Rozycki <macro@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16095/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-05-23 20:38:19 +08:00
|
|
|
switch (opcode) {
|
2013-03-26 02:46:15 +08:00
|
|
|
|
|
|
|
case MIPS16e_lb_op:
|
|
|
|
case MIPS16e_lbu_op:
|
|
|
|
case MIPS16e_sb_op:
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
case MIPS16e_lh_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadHW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_lhu_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadHWU(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_lw_op:
|
|
|
|
case MIPS16e_lwpc_op:
|
|
|
|
case MIPS16e_lwsp_op:
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_lwu_op:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadWU(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case MIPS16e_ld_op:
|
|
|
|
loadDW:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_READ, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
LoadDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
regs->regs[reg] = value;
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
case MIPS16e_sh_op:
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 2))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreHW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_sw_op:
|
|
|
|
case MIPS16e_swsp_op:
|
|
|
|
case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 4))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIPS16e_sd_op:
|
|
|
|
writeDW:
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
/*
|
|
|
|
* A 32-bit kernel might be running on a 64-bit processor. But
|
|
|
|
* if we're on a 32-bit processor and an i-cache incoherency
|
|
|
|
* or race makes us see a 64-bit instruction here the sdl/sdr
|
|
|
|
* would blow up, so for now we don't handle unaligned 64-bit
|
|
|
|
* instructions on 32-bit kernels.
|
|
|
|
*/
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, 8))
|
|
|
|
goto sigbus;
|
|
|
|
|
|
|
|
MIPS16e_compute_return_epc(regs, &oldinst);
|
|
|
|
value = regs->regs[reg];
|
|
|
|
StoreDW(addr, value, res);
|
|
|
|
if (res)
|
|
|
|
goto fault;
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
/* Cannot handle 64-bit instructions in 32-bit kernel */
|
|
|
|
goto sigill;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* Pheeee... We encountered an yet unknown instruction or
|
|
|
|
* cache coherence problem. Die sucker, die ...
|
|
|
|
*/
|
|
|
|
goto sigill;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
unaligned_instructions++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
fault:
|
|
|
|
/* roll back jump/branch */
|
|
|
|
regs->cp0_epc = origpc;
|
|
|
|
regs->regs[31] = orig31;
|
|
|
|
/* Did we have an exception handler installed? */
|
|
|
|
if (fixup_exception(regs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
|
|
|
force_sig(SIGSEGV, current);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigbus:
|
|
|
|
die_if_kernel("Unhandled kernel unaligned access", regs);
|
|
|
|
force_sig(SIGBUS, current);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigill:
|
|
|
|
die_if_kernel
|
|
|
|
("Unhandled kernel unaligned access or invalid instruction", regs);
|
|
|
|
force_sig(SIGILL, current);
|
|
|
|
}
|
2013-06-21 18:10:46 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
asmlinkage void do_ade(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-29 07:07:19 +08:00
|
|
|
enum ctx_state prev_state;
|
2005-03-02 03:22:29 +08:00
|
|
|
unsigned int __user *pc;
|
2005-04-17 06:20:36 +08:00
|
|
|
mm_segment_t seg;
|
|
|
|
|
2013-05-29 07:07:19 +08:00
|
|
|
prev_state = exception_enter();
|
2010-10-12 19:37:21 +08:00
|
|
|
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
|
2011-06-27 20:41:57 +08:00
|
|
|
1, regs, regs->cp0_badvaddr);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Did we catch a fault trying to load an instruction?
|
|
|
|
*/
|
2013-03-26 02:18:07 +08:00
|
|
|
if (regs->cp0_badvaddr == regs->cp0_epc)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto sigbus;
|
|
|
|
|
2007-07-25 23:19:33 +08:00
|
|
|
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto sigbus;
|
2007-06-29 23:55:48 +08:00
|
|
|
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
|
|
|
|
goto sigbus;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do branch emulation only if we didn't forward the exception.
|
|
|
|
* This is all so but ugly ...
|
|
|
|
*/
|
2013-03-26 02:18:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we running in microMIPS mode?
|
|
|
|
*/
|
|
|
|
if (get_isa16_mode(regs->cp0_epc)) {
|
|
|
|
/*
|
|
|
|
* Did we catch a fault trying to load an instruction in
|
|
|
|
* 16-bit mode?
|
|
|
|
*/
|
|
|
|
if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
|
|
|
|
goto sigbus;
|
|
|
|
if (unaligned_action == UNALIGNED_ACTION_SHOW)
|
|
|
|
show_registers(regs);
|
|
|
|
|
|
|
|
if (cpu_has_mmips) {
|
|
|
|
seg = get_fs();
|
|
|
|
if (!user_mode(regs))
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
emulate_load_store_microMIPS(regs,
|
|
|
|
(void __user *)regs->cp0_badvaddr);
|
|
|
|
set_fs(seg);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-26 02:46:15 +08:00
|
|
|
if (cpu_has_mips16) {
|
|
|
|
seg = get_fs();
|
|
|
|
if (!user_mode(regs))
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
emulate_load_store_MIPS16e(regs,
|
|
|
|
(void __user *)regs->cp0_badvaddr);
|
|
|
|
set_fs(seg);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-26 02:18:07 +08:00
|
|
|
goto sigbus;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unaligned_action == UNALIGNED_ACTION_SHOW)
|
|
|
|
show_registers(regs);
|
|
|
|
pc = (unsigned int __user *)exception_epc(regs);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
seg = get_fs();
|
|
|
|
if (!user_mode(regs))
|
|
|
|
set_fs(KERNEL_DS);
|
2007-07-29 16:16:19 +08:00
|
|
|
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
|
2005-04-17 06:20:36 +08:00
|
|
|
set_fs(seg);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigbus:
|
|
|
|
die_if_kernel("Kernel unaligned instruction access", regs);
|
|
|
|
force_sig(SIGBUS, current);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX On return from the signal handler we should advance the epc
|
|
|
|
*/
|
2013-05-29 07:07:19 +08:00
|
|
|
exception_exit(prev_state);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-06-29 23:55:48 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static int __init debugfs_unaligned(void)
|
|
|
|
{
|
|
|
|
struct dentry *d;
|
|
|
|
|
|
|
|
if (!mips_debugfs_dir)
|
|
|
|
return -ENODEV;
|
|
|
|
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
|
|
|
|
mips_debugfs_dir, &unaligned_instructions);
|
2008-10-17 19:12:35 +08:00
|
|
|
if (!d)
|
|
|
|
return -ENOMEM;
|
2007-06-29 23:55:48 +08:00
|
|
|
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
|
|
|
|
mips_debugfs_dir, &unaligned_action);
|
2008-10-17 19:12:35 +08:00
|
|
|
if (!d)
|
|
|
|
return -ENOMEM;
|
2007-06-29 23:55:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-07-20 15:04:41 +08:00
|
|
|
arch_initcall(debugfs_unaligned);
|
2007-06-29 23:55:48 +08:00
|
|
|
#endif
|