Main MIPS changes for v5.2:
- A set of memblock initialization improvements thanks to Serge Semin, tidying up after our conversion from bootmem to memblock back in v4.20. - Our eBPF JIT the previously supported only MIPS64r2 through MIPS64r5 is improved to also support MIPS64r6. Support for MIPS32 systems is introduced, with the caveat that it only works for programs that don't use 64 bit registers or operations - those will bail out & need to be interpreted. - Improvements to the allocation & configuration of our exception vector that should fix issues seen on some platforms using recent versions of U-Boot. - Some minor improvements to code generated for jump labels, along with enabling them by default for generic kernels. -----BEGIN PGP SIGNATURE----- iIsEABYIADMWIQRgLjeFAZEXQzy86/s+p5+stXUA3QUCXNNB2RUccGF1bC5idXJ0 b25AbWlwcy5jb20ACgkQPqefrLV1AN1zeAD/U/ScowcQE8ynoY97nA70d3UmbETH YETUX5WcOfR65O8A/1hvMX8QJ1x87XUlNTkE6Gdh/itAZJpJWiSo3dnd1GoF =L9IJ -----END PGP SIGNATURE----- Merge tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux Pull MIPS updates from Paul Burton: - A set of memblock initialization improvements thanks to Serge Semin, tidying up after our conversion from bootmem to memblock back in v4.20. - Our eBPF JIT the previously supported only MIPS64r2 through MIPS64r5 is improved to also support MIPS64r6. Support for MIPS32 systems is introduced, with the caveat that it only works for programs that don't use 64 bit registers or operations - those will bail out & need to be interpreted. - Improvements to the allocation & configuration of our exception vector that should fix issues seen on some platforms using recent versions of U-Boot. - Some minor improvements to code generated for jump labels, along with enabling them by default for generic kernels. * tag 'mips_5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (27 commits) mips: Manually call fdt_init_reserved_mem() method mips: Make sure dt memory regions are valid mips: Perform early low memory test mips: Dump memblock regions for debugging mips: Add reserve-nomap memory type support mips: Use memblock to reserve the __nosave memory range mips: Discard post-CMA-init foreach loop mips: Reserve memory for the kernel image resources MIPS: Remove duplicate EBase configuration MIPS: Sync icache for whole exception vector MIPS: Always allocate exception vector for MIPSr2+ MIPS: Use memblock_phys_alloc() for exception vector mips: Combine memblock init and memory reservation loops mips: Discard rudiments from bootmem_init mips: Make sure kernel .bss exists in boot mem pool mips: vdso: drop unnecessary cc-ldoption Revert "MIPS: ralink: fix cpu clock of mt7621 and add dt clk devices" MIPS: generic: Enable CONFIG_JUMP_LABEL MIPS: jump_label: Use compact branches for >= r6 MIPS: jump_label: Remove redundant nops ...
This commit is contained in:
commit
92fab77b6b
|
@ -44,8 +44,7 @@ config MIPS
|
|||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
|
||||
select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS)
|
||||
select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS)
|
||||
select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
|
@ -276,7 +275,7 @@ config BCM47XX
|
|||
select BCM47XX_SPROM
|
||||
select BCM47XX_SSB if !BCM47XX_BCMA
|
||||
help
|
||||
Support for BCM47XX based boards
|
||||
Support for BCM47XX based boards
|
||||
|
||||
config BCM63XX
|
||||
bool "Broadcom BCM63XX based boards"
|
||||
|
@ -295,7 +294,7 @@ config BCM63XX
|
|||
select MIPS_L1_CACHE_SHIFT_4
|
||||
select CLKDEV_LOOKUP
|
||||
help
|
||||
Support for BCM63XX based boards
|
||||
Support for BCM63XX based boards
|
||||
|
||||
config MIPS_COBALT
|
||||
bool "Cobalt Server"
|
||||
|
@ -374,10 +373,10 @@ config MACH_JAZZ
|
|||
select SYS_SUPPORTS_64BIT_KERNEL
|
||||
select SYS_SUPPORTS_100HZ
|
||||
help
|
||||
This a family of machines based on the MIPS R4030 chipset which was
|
||||
used by several vendors to build RISC/os and Windows NT workstations.
|
||||
Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
|
||||
Olivetti M700-10 workstations.
|
||||
This a family of machines based on the MIPS R4030 chipset which was
|
||||
used by several vendors to build RISC/os and Windows NT workstations.
|
||||
Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
|
||||
Olivetti M700-10 workstations.
|
||||
|
||||
config MACH_INGENIC
|
||||
bool "Ingenic SoC based machines"
|
||||
|
@ -573,14 +572,14 @@ config NXP_STB220
|
|||
bool "NXP STB220 board"
|
||||
select SOC_PNX833X
|
||||
help
|
||||
Support for NXP Semiconductors STB220 Development Board.
|
||||
Support for NXP Semiconductors STB220 Development Board.
|
||||
|
||||
config NXP_STB225
|
||||
bool "NXP 225 board"
|
||||
select SOC_PNX833X
|
||||
select SOC_PNX8335
|
||||
help
|
||||
Support for NXP Semiconductors STB225 Development Board.
|
||||
Support for NXP Semiconductors STB225 Development Board.
|
||||
|
||||
config PMC_MSP
|
||||
bool "PMC-Sierra MSP chipsets"
|
||||
|
@ -722,9 +721,9 @@ config SGI_IP28
|
|||
select SYS_SUPPORTS_64BIT_KERNEL
|
||||
select SYS_SUPPORTS_BIG_ENDIAN
|
||||
select MIPS_L1_CACHE_SHIFT_7
|
||||
help
|
||||
This is the SGI Indigo2 with R10000 processor. To compile a Linux
|
||||
kernel that runs on these, say Y here.
|
||||
help
|
||||
This is the SGI Indigo2 with R10000 processor. To compile a Linux
|
||||
kernel that runs on these, say Y here.
|
||||
|
||||
config SGI_IP32
|
||||
bool "SGI IP32 (O2)"
|
||||
|
@ -1168,9 +1167,9 @@ config HOLES_IN_ZONE
|
|||
config SYS_SUPPORTS_RELOCATABLE
|
||||
bool
|
||||
help
|
||||
Selected if the platform supports relocating the kernel.
|
||||
The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
|
||||
to allow access to command line and entropy sources.
|
||||
Selected if the platform supports relocating the kernel.
|
||||
The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
|
||||
to allow access to command line and entropy sources.
|
||||
|
||||
config MIPS_CBPF_JIT
|
||||
def_bool y
|
||||
|
@ -2113,8 +2112,8 @@ config MIPS_PGD_C0_CONTEXT
|
|||
# Set to y for ptrace access to watch registers.
|
||||
#
|
||||
config HARDWARE_WATCHPOINTS
|
||||
bool
|
||||
default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
|
||||
bool
|
||||
default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
|
||||
|
||||
menu "Kernel type"
|
||||
|
||||
|
@ -2178,10 +2177,10 @@ config PAGE_SIZE_4KB
|
|||
bool "4kB"
|
||||
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
|
||||
help
|
||||
This option select the standard 4kB Linux page size. On some
|
||||
R3000-family processors this is the only available page size. Using
|
||||
4kB page size will minimize memory consumption and is therefore
|
||||
recommended for low memory systems.
|
||||
This option select the standard 4kB Linux page size. On some
|
||||
R3000-family processors this is the only available page size. Using
|
||||
4kB page size will minimize memory consumption and is therefore
|
||||
recommended for low memory systems.
|
||||
|
||||
config PAGE_SIZE_8KB
|
||||
bool "8kB"
|
||||
|
@ -2474,7 +2473,6 @@ config SB1_PASS_2_1_WORKAROUNDS
|
|||
depends on CPU_SB1 && CPU_SB1_PASS_2
|
||||
default y
|
||||
|
||||
|
||||
choice
|
||||
prompt "SmartMIPS or microMIPS ASE support"
|
||||
|
||||
|
@ -2682,16 +2680,16 @@ config RANDOMIZE_BASE
|
|||
bool "Randomize the address of the kernel image"
|
||||
depends on RELOCATABLE
|
||||
---help---
|
||||
Randomizes the physical and virtual address at which the
|
||||
kernel image is loaded, as a security feature that
|
||||
deters exploit attempts relying on knowledge of the location
|
||||
of kernel internals.
|
||||
Randomizes the physical and virtual address at which the
|
||||
kernel image is loaded, as a security feature that
|
||||
deters exploit attempts relying on knowledge of the location
|
||||
of kernel internals.
|
||||
|
||||
Entropy is generated using any coprocessor 0 registers available.
|
||||
Entropy is generated using any coprocessor 0 registers available.
|
||||
|
||||
The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
|
||||
The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
|
||||
|
||||
If unsure, say N.
|
||||
If unsure, say N.
|
||||
|
||||
config RANDOMIZE_BASE_MAX_OFFSET
|
||||
hex "Maximum kASLR offset" if EXPERT
|
||||
|
@ -2821,7 +2819,7 @@ choice
|
|||
prompt "Timer frequency"
|
||||
default HZ_250
|
||||
help
|
||||
Allows the configuration of the timer frequency.
|
||||
Allows the configuration of the timer frequency.
|
||||
|
||||
config HZ_24
|
||||
bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ
|
||||
|
@ -3121,10 +3119,10 @@ config ARCH_MMAP_RND_BITS_MAX
|
|||
default 15
|
||||
|
||||
config ARCH_MMAP_RND_COMPAT_BITS_MIN
|
||||
default 8
|
||||
default 8
|
||||
|
||||
config ARCH_MMAP_RND_COMPAT_BITS_MAX
|
||||
default 15
|
||||
default 15
|
||||
|
||||
config I8253
|
||||
bool
|
||||
|
|
|
@ -15,9 +15,9 @@ config BCM47XX_SSB
|
|||
select SSB_DRIVER_GPIO
|
||||
default y
|
||||
help
|
||||
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
|
||||
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
|
||||
|
||||
This will generate an image with support for SSB and MIPS32 R1 instruction set.
|
||||
This will generate an image with support for SSB and MIPS32 R1 instruction set.
|
||||
|
||||
config BCM47XX_BCMA
|
||||
bool "BCMA Support for Broadcom BCM47XX"
|
||||
|
@ -31,8 +31,8 @@ config BCM47XX_BCMA
|
|||
select BCMA_DRIVER_GPIO
|
||||
default y
|
||||
help
|
||||
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
|
||||
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
|
||||
|
||||
This will generate an image with support for BCMA and MIPS32 R2 instruction set.
|
||||
This will generate an image with support for BCMA and MIPS32 R2 instruction set.
|
||||
|
||||
endif
|
||||
|
|
|
@ -5,7 +5,7 @@ choice
|
|||
default BOARD_BCM963XX
|
||||
|
||||
config BOARD_BCM963XX
|
||||
bool "Generic Broadcom 963xx boards"
|
||||
bool "Generic Broadcom 963xx boards"
|
||||
select SSB
|
||||
|
||||
endchoice
|
||||
|
|
|
@ -26,6 +26,7 @@ CONFIG_MIPS_CPS=y
|
|||
CONFIG_HIGHMEM=y
|
||||
CONFIG_NR_CPUS=16
|
||||
CONFIG_MIPS_O32_FP64_SUPPORT=y
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_TRIM_UNUSED_KSYMS=y
|
||||
|
|
|
@ -92,6 +92,7 @@ extern unsigned long mips_machtype;
|
|||
#define BOOT_MEM_ROM_DATA 2
|
||||
#define BOOT_MEM_RESERVED 3
|
||||
#define BOOT_MEM_INIT_RAM 4
|
||||
#define BOOT_MEM_NOMAP 5
|
||||
|
||||
/*
|
||||
* A memory map that's built upon what was determined
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/isa-rev.h>
|
||||
|
||||
#define JUMP_LABEL_NOP_SIZE 4
|
||||
|
||||
|
@ -21,15 +22,20 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
#define B_INSN "b32"
|
||||
# define B_INSN "b32"
|
||||
# define J_INSN "j32"
|
||||
#elif MIPS_ISA_REV >= 6
|
||||
# define B_INSN "bc"
|
||||
# define J_INSN "bc"
|
||||
#else
|
||||
#define B_INSN "b"
|
||||
# define B_INSN "b"
|
||||
# define J_INSN "j"
|
||||
#endif
|
||||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
||||
{
|
||||
asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
|
||||
"2:\tnop\n\t"
|
||||
"2:\t.insn\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
WORD_INSN " 1b, %l[l_yes], %0\n\t"
|
||||
".popsection\n\t"
|
||||
|
@ -42,8 +48,7 @@ l_yes:
|
|||
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
||||
{
|
||||
asm_volatile_goto("1:\tj %l[l_yes]\n\t"
|
||||
"nop\n\t"
|
||||
asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
WORD_INSN " 1b, %l[l_yes], %0\n\t"
|
||||
".popsection\n\t"
|
||||
|
|
|
@ -86,14 +86,18 @@ Ip_u2u1(_ctcmsa);
|
|||
Ip_u2u1s3(_daddiu);
|
||||
Ip_u3u1u2(_daddu);
|
||||
Ip_u1u2(_ddivu);
|
||||
Ip_u3u1u2(_ddivu_r6);
|
||||
Ip_u1(_di);
|
||||
Ip_u2u1msbu3(_dins);
|
||||
Ip_u2u1msbu3(_dinsm);
|
||||
Ip_u2u1msbu3(_dinsu);
|
||||
Ip_u1u2(_divu);
|
||||
Ip_u3u1u2(_divu_r6);
|
||||
Ip_u1u2u3(_dmfc0);
|
||||
Ip_u3u1u2(_dmodu);
|
||||
Ip_u1u2u3(_dmtc0);
|
||||
Ip_u1u2(_dmultu);
|
||||
Ip_u3u1u2(_dmulu);
|
||||
Ip_u2u1u3(_drotr);
|
||||
Ip_u2u1u3(_drotr32);
|
||||
Ip_u2u1(_dsbh);
|
||||
|
@ -131,6 +135,7 @@ Ip_u1u2u3(_mfc0);
|
|||
Ip_u1u2u3(_mfhc0);
|
||||
Ip_u1(_mfhi);
|
||||
Ip_u1(_mflo);
|
||||
Ip_u3u1u2(_modu);
|
||||
Ip_u3u1u2(_movn);
|
||||
Ip_u3u1u2(_movz);
|
||||
Ip_u1u2u3(_mtc0);
|
||||
|
@ -139,6 +144,7 @@ Ip_u1(_mthi);
|
|||
Ip_u1(_mtlo);
|
||||
Ip_u3u1u2(_mul);
|
||||
Ip_u1u2(_multu);
|
||||
Ip_u3u1u2(_mulu);
|
||||
Ip_u3u1u2(_nor);
|
||||
Ip_u3u1u2(_or);
|
||||
Ip_u2u1u3(_ori);
|
||||
|
@ -149,6 +155,8 @@ Ip_u2s3u1(_sb);
|
|||
Ip_u2s3u1(_sc);
|
||||
Ip_u2s3u1(_scd);
|
||||
Ip_u2s3u1(_sd);
|
||||
Ip_u3u1u2(_seleqz);
|
||||
Ip_u3u1u2(_selnez);
|
||||
Ip_u2s3u1(_sh);
|
||||
Ip_u2u1u3(_sll);
|
||||
Ip_u3u2u1(_sllv);
|
||||
|
|
|
@ -55,9 +55,9 @@ enum spec_op {
|
|||
spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
|
||||
dadd_op, daddu_op, dsub_op, dsubu_op,
|
||||
tge_op, tgeu_op, tlt_op, tltu_op,
|
||||
teq_op, spec5_unused_op, tne_op, spec6_unused_op,
|
||||
dsll_op, spec7_unused_op, dsrl_op, dsra_op,
|
||||
dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
|
||||
teq_op, seleqz_op, tne_op, selnez_op,
|
||||
dsll_op, spec5_unused_op, dsrl_op, dsra_op,
|
||||
dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -58,15 +58,14 @@ resume_kernel:
|
|||
local_irq_disable
|
||||
lw t0, TI_PRE_COUNT($28)
|
||||
bnez t0, restore_all
|
||||
need_resched:
|
||||
LONG_L t0, TI_FLAGS($28)
|
||||
andi t1, t0, _TIF_NEED_RESCHED
|
||||
beqz t1, restore_all
|
||||
LONG_L t0, PT_STATUS(sp) # Interrupts off?
|
||||
andi t0, 1
|
||||
beqz t0, restore_all
|
||||
jal preempt_schedule_irq
|
||||
b need_resched
|
||||
PTR_LA ra, restore_all
|
||||
j preempt_schedule_irq
|
||||
#endif
|
||||
|
||||
FEXPORT(ret_from_kernel_thread)
|
||||
|
|
|
@ -40,18 +40,38 @@ void arch_jump_label_transform(struct jump_entry *e,
|
|||
{
|
||||
union mips_instruction *insn_p;
|
||||
union mips_instruction insn;
|
||||
long offset;
|
||||
|
||||
insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
|
||||
|
||||
/* Jump only works within an aligned region its delay slot is in. */
|
||||
BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
|
||||
|
||||
/* Target must have the right alignment and ISA must be preserved. */
|
||||
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
|
||||
|
||||
if (type == JUMP_LABEL_JMP) {
|
||||
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
|
||||
insn.j_format.target = e->target >> J_RANGE_SHIFT;
|
||||
if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
|
||||
offset = e->target - ((unsigned long)insn_p + 4);
|
||||
offset >>= 2;
|
||||
|
||||
/*
|
||||
* The branch offset must fit in the instruction's 26
|
||||
* bit field.
|
||||
*/
|
||||
WARN_ON((offset >= BIT(25)) ||
|
||||
(offset < -(long)BIT(25)));
|
||||
|
||||
insn.j_format.opcode = bc6_op;
|
||||
insn.j_format.target = offset;
|
||||
} else {
|
||||
/*
|
||||
* Jump only works within an aligned region its delay
|
||||
* slot is in.
|
||||
*/
|
||||
WARN_ON((e->target & ~J_RANGE_MASK) !=
|
||||
((e->code + 4) & ~J_RANGE_MASK));
|
||||
|
||||
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
|
||||
insn.j_format.target = e->target >> J_RANGE_SHIFT;
|
||||
}
|
||||
} else {
|
||||
insn.word = 0; /* nop */
|
||||
}
|
||||
|
|
|
@ -41,13 +41,27 @@ char *mips_get_machine_name(void)
|
|||
#ifdef CONFIG_USE_OF
|
||||
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
{
|
||||
return add_memory_region(base, size, BOOT_MEM_RAM);
|
||||
if (base >= PHYS_ADDR_MAX) {
|
||||
pr_warn("Trying to add an invalid memory region, skipped\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Truncate the passed memory region instead of type casting */
|
||||
if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
|
||||
pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
|
||||
size, base, PHYS_ADDR_MAX - base);
|
||||
size = PHYS_ADDR_MAX - base;
|
||||
}
|
||||
|
||||
add_memory_region(base, size, BOOT_MEM_RAM);
|
||||
}
|
||||
|
||||
int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
|
||||
phys_addr_t size, bool nomap)
|
||||
{
|
||||
add_memory_region(base, size, BOOT_MEM_RESERVED);
|
||||
add_memory_region(base, size,
|
||||
nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/decompress/generic.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
@ -178,6 +179,7 @@ static bool __init __maybe_unused memory_region_available(phys_addr_t start,
|
|||
in_ram = true;
|
||||
break;
|
||||
case BOOT_MEM_RESERVED:
|
||||
case BOOT_MEM_NOMAP:
|
||||
if ((start >= start_ && start < end_) ||
|
||||
(start < start_ && start + size >= start_))
|
||||
free = false;
|
||||
|
@ -213,6 +215,9 @@ static void __init print_memory_map(void)
|
|||
case BOOT_MEM_RESERVED:
|
||||
printk(KERN_CONT "(reserved)\n");
|
||||
break;
|
||||
case BOOT_MEM_NOMAP:
|
||||
printk(KERN_CONT "(nomap)\n");
|
||||
break;
|
||||
default:
|
||||
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
|
||||
break;
|
||||
|
@ -371,7 +376,6 @@ static void __init bootmem_init(void)
|
|||
|
||||
static void __init bootmem_init(void)
|
||||
{
|
||||
unsigned long reserved_end;
|
||||
phys_addr_t ramstart = PHYS_ADDR_MAX;
|
||||
int i;
|
||||
|
||||
|
@ -382,10 +386,10 @@ static void __init bootmem_init(void)
|
|||
* will reserve the area used for the initrd.
|
||||
*/
|
||||
init_initrd();
|
||||
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
|
||||
|
||||
memblock_reserve(PHYS_OFFSET,
|
||||
(reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
|
||||
/* Reserve memory occupied by kernel. */
|
||||
memblock_reserve(__pa_symbol(&_text),
|
||||
__pa_symbol(&_end) - __pa_symbol(&_text));
|
||||
|
||||
/*
|
||||
* max_low_pfn is not a number of pages. The number of pages
|
||||
|
@ -394,10 +398,7 @@ static void __init bootmem_init(void)
|
|||
min_low_pfn = ~0UL;
|
||||
max_low_pfn = 0;
|
||||
|
||||
/*
|
||||
* Find the highest page frame number we have available
|
||||
* and the lowest used RAM address
|
||||
*/
|
||||
/* Find the highest and lowest page frame numbers we have available. */
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
|
@ -427,13 +428,6 @@ static void __init bootmem_init(void)
|
|||
max_low_pfn = end;
|
||||
if (start < min_low_pfn)
|
||||
min_low_pfn = start;
|
||||
if (end <= reserved_end)
|
||||
continue;
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/* Skip zones before initrd and initrd itself */
|
||||
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
|
||||
continue;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (min_low_pfn >= max_low_pfn)
|
||||
|
@ -474,6 +468,7 @@ static void __init bootmem_init(void)
|
|||
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
||||
}
|
||||
|
||||
/* Install all valid RAM ranges to the memblock memory region */
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
|
@ -481,98 +476,38 @@ static void __init bootmem_init(void)
|
|||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
||||
+ boot_mem_map.map[i].size);
|
||||
|
||||
if (start <= min_low_pfn)
|
||||
if (start < min_low_pfn)
|
||||
start = min_low_pfn;
|
||||
if (start >= end)
|
||||
continue;
|
||||
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
/* Ignore highmem regions if highmem is unsupported */
|
||||
if (end > max_low_pfn)
|
||||
end = max_low_pfn;
|
||||
|
||||
/*
|
||||
* ... finally, is the area going away?
|
||||
*/
|
||||
#endif
|
||||
if (end <= start)
|
||||
continue;
|
||||
#endif
|
||||
|
||||
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register fully available low RAM pages with the bootmem allocator.
|
||||
*/
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end, size;
|
||||
|
||||
start = PFN_UP(boot_mem_map.map[i].addr);
|
||||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
||||
+ boot_mem_map.map[i].size);
|
||||
|
||||
/*
|
||||
* Reserve usable memory.
|
||||
*/
|
||||
/* Reserve any memory except the ordinary RAM ranges. */
|
||||
switch (boot_mem_map.map[i].type) {
|
||||
case BOOT_MEM_RAM:
|
||||
break;
|
||||
case BOOT_MEM_INIT_RAM:
|
||||
memory_present(0, start, end);
|
||||
continue;
|
||||
default:
|
||||
/* Not usable memory */
|
||||
if (start > min_low_pfn && end < max_low_pfn)
|
||||
memblock_reserve(boot_mem_map.map[i].addr,
|
||||
boot_mem_map.map[i].size);
|
||||
|
||||
case BOOT_MEM_NOMAP: /* Discard the range from the system. */
|
||||
memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start));
|
||||
continue;
|
||||
default: /* Reserve the rest of the memory types at boot time */
|
||||
memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start));
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are rounding up the start address of usable memory
|
||||
* and at the end of the usable range downwards.
|
||||
* In any case the added to the memblock memory regions
|
||||
* (highmem/lowmem, available/reserved, etc) are considered
|
||||
* as present, so inform sparsemem about them.
|
||||
*/
|
||||
if (start >= max_low_pfn)
|
||||
continue;
|
||||
if (start < reserved_end)
|
||||
start = reserved_end;
|
||||
if (end > max_low_pfn)
|
||||
end = max_low_pfn;
|
||||
|
||||
/*
|
||||
* ... finally, is the area going away?
|
||||
*/
|
||||
if (end <= start)
|
||||
continue;
|
||||
size = end - start;
|
||||
|
||||
/* Register lowmem ranges */
|
||||
memory_present(0, start, end);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
/*
|
||||
* The kernel reserves all memory below its _end symbol as bootmem,
|
||||
* but the kernel may now be at a much higher address. The memory
|
||||
* between the original and new locations may be returned to the system.
|
||||
*/
|
||||
if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
|
||||
unsigned long offset;
|
||||
extern void show_kernel_relocation(const char *level);
|
||||
|
||||
offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
|
||||
memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
|
||||
|
||||
#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
|
||||
/*
|
||||
* This information is necessary when debugging the kernel
|
||||
* But is a security vulnerability otherwise!
|
||||
*/
|
||||
show_kernel_relocation(KERN_INFO);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reserve initrd memory if needed.
|
||||
*/
|
||||
|
@ -781,7 +716,6 @@ static void __init request_crashkernel(struct resource *res)
|
|||
*/
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
extern void plat_mem_setup(void);
|
||||
|
||||
/*
|
||||
|
@ -809,6 +743,9 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
|
||||
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
|
||||
BOOT_MEM_INIT_RAM);
|
||||
arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT,
|
||||
PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT,
|
||||
BOOT_MEM_RAM);
|
||||
|
||||
pr_info("Determined physical RAM map:\n");
|
||||
print_memory_map();
|
||||
|
@ -884,13 +821,16 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||
plat_swiotlb_setup();
|
||||
|
||||
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
|
||||
/* Tell bootmem about cma reserved memblock section */
|
||||
for_each_memblock(reserved, reg)
|
||||
if (reg->size != 0)
|
||||
memblock_reserve(reg->base, reg->size);
|
||||
|
||||
reserve_bootmem_region(__pa_symbol(&__nosave_begin),
|
||||
__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
|
||||
/* Reserve for hibernation. */
|
||||
memblock_reserve(__pa_symbol(&__nosave_begin),
|
||||
__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
|
||||
|
||||
fdt_init_reserved_mem();
|
||||
|
||||
memblock_dump_all();
|
||||
|
||||
early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn));
|
||||
}
|
||||
|
||||
static void __init resource_init(void)
|
||||
|
@ -935,6 +875,7 @@ static void __init resource_init(void)
|
|||
res->flags |= IORESOURCE_SYSRAM;
|
||||
break;
|
||||
case BOOT_MEM_RESERVED:
|
||||
case BOOT_MEM_NOMAP:
|
||||
default:
|
||||
res->name = "reserved";
|
||||
}
|
||||
|
|
|
@ -2151,7 +2151,7 @@ static void configure_hwrena(void)
|
|||
|
||||
static void configure_exception_vector(void)
|
||||
{
|
||||
if (cpu_has_veic || cpu_has_vint) {
|
||||
if (cpu_has_mips_r2_r6) {
|
||||
unsigned long sr = set_c0_status(ST0_BEV);
|
||||
/* If available, use WG to set top bits of EBASE */
|
||||
if (cpu_has_ebase_wg) {
|
||||
|
@ -2163,6 +2163,8 @@ static void configure_exception_vector(void)
|
|||
}
|
||||
write_c0_ebase(ebase);
|
||||
write_c0_status(sr);
|
||||
}
|
||||
if (cpu_has_veic || cpu_has_vint) {
|
||||
/* Setting vector spacing enables EI/VI mode */
|
||||
change_c0_intctl(0x3e0, VECTORSPACING);
|
||||
}
|
||||
|
@ -2193,22 +2195,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
|
|||
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
|
||||
*/
|
||||
if (cpu_has_mips_r2_r6) {
|
||||
/*
|
||||
* We shouldn't trust a secondary core has a sane EBASE register
|
||||
* so use the one calculated by the boot CPU.
|
||||
*/
|
||||
if (!is_boot_cpu) {
|
||||
/* If available, use WG to set top bits of EBASE */
|
||||
if (cpu_has_ebase_wg) {
|
||||
#ifdef CONFIG_64BIT
|
||||
write_c0_ebase_64(ebase | MIPS_EBASE_WG);
|
||||
#else
|
||||
write_c0_ebase(ebase | MIPS_EBASE_WG);
|
||||
#endif
|
||||
}
|
||||
write_c0_ebase(ebase);
|
||||
}
|
||||
|
||||
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
|
||||
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
|
||||
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
|
||||
|
@ -2284,19 +2270,27 @@ void __init trap_init(void)
|
|||
extern char except_vec3_generic;
|
||||
extern char except_vec4;
|
||||
extern char except_vec3_r4000;
|
||||
unsigned long i;
|
||||
unsigned long i, vec_size;
|
||||
phys_addr_t ebase_pa;
|
||||
|
||||
check_wait();
|
||||
|
||||
if (cpu_has_veic || cpu_has_vint) {
|
||||
unsigned long size = 0x200 + VECTORSPACING*64;
|
||||
phys_addr_t ebase_pa;
|
||||
if (!cpu_has_mips_r2_r6) {
|
||||
ebase = CAC_BASE;
|
||||
ebase_pa = virt_to_phys((void *)ebase);
|
||||
vec_size = 0x400;
|
||||
|
||||
ebase = (unsigned long)
|
||||
memblock_alloc(size, 1 << fls(size));
|
||||
if (!ebase)
|
||||
memblock_reserve(ebase_pa, vec_size);
|
||||
} else {
|
||||
if (cpu_has_veic || cpu_has_vint)
|
||||
vec_size = 0x200 + VECTORSPACING*64;
|
||||
else
|
||||
vec_size = PAGE_SIZE;
|
||||
|
||||
ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
|
||||
if (!ebase_pa)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
|
||||
__func__, size, 1 << fls(size));
|
||||
__func__, vec_size, 1 << fls(vec_size));
|
||||
|
||||
/*
|
||||
* Try to ensure ebase resides in KSeg0 if possible.
|
||||
|
@ -2309,23 +2303,10 @@ void __init trap_init(void)
|
|||
* EVA is special though as it allows segments to be rearranged
|
||||
* and to become uncached during cache error handling.
|
||||
*/
|
||||
ebase_pa = __pa(ebase);
|
||||
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
|
||||
ebase = CKSEG0ADDR(ebase_pa);
|
||||
} else {
|
||||
ebase = CAC_BASE;
|
||||
|
||||
if (cpu_has_mips_r2_r6) {
|
||||
if (cpu_has_ebase_wg) {
|
||||
#ifdef CONFIG_64BIT
|
||||
ebase = (read_c0_ebase_64() & ~0xfff);
|
||||
#else
|
||||
ebase = (read_c0_ebase() & ~0xfff);
|
||||
#endif
|
||||
} else {
|
||||
ebase += (read_c0_ebase() & 0x3ffff000);
|
||||
}
|
||||
}
|
||||
else
|
||||
ebase = (unsigned long)phys_to_virt(ebase_pa);
|
||||
}
|
||||
|
||||
if (cpu_has_mmips) {
|
||||
|
@ -2459,7 +2440,7 @@ void __init trap_init(void)
|
|||
else
|
||||
set_handler(0x080, &except_vec3_generic, 0x80);
|
||||
|
||||
local_flush_icache_range(ebase, ebase + 0x400);
|
||||
local_flush_icache_range(ebase, ebase + vec_size);
|
||||
|
||||
sort_extable(__start___dbe_table, __stop___dbe_table);
|
||||
|
||||
|
|
|
@ -1141,9 +1141,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
|
|||
unsigned long pc = vcpu->arch.pc;
|
||||
int index;
|
||||
|
||||
get_random_bytes(&index, sizeof(index));
|
||||
index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
|
||||
|
||||
index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
|
||||
tlb = &vcpu->arch.guest_tlb[index];
|
||||
|
||||
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
|
||||
|
|
|
@ -76,14 +76,22 @@ static const struct insn insn_table[insn_invalid] = {
|
|||
[insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||
[insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
|
||||
[insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
|
||||
[insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op),
|
||||
RS | RT | RD},
|
||||
[insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
|
||||
[insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
|
||||
[insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
|
||||
[insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
|
||||
[insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
|
||||
[insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op),
|
||||
RS | RT | RD},
|
||||
[insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op),
|
||||
RS | RT | RD},
|
||||
[insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
|
||||
[insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
|
||||
RS | RT | RD},
|
||||
[insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
|
||||
[insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
|
||||
[insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
|
||||
|
@ -132,12 +140,16 @@ static const struct insn insn_table[insn_invalid] = {
|
|||
[insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
|
||||
[insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
|
||||
[insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op),
|
||||
RS | RT | RD},
|
||||
[insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
|
||||
[insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
|
||||
[insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
|
||||
[insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
|
||||
[insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
|
||||
RS | RT | RD},
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
[insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
|
||||
#else
|
||||
|
@ -163,6 +175,8 @@ static const struct insn insn_table[insn_invalid] = {
|
|||
[insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
|
||||
#endif
|
||||
[insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||
[insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD},
|
||||
[insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD},
|
||||
[insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||
[insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
|
||||
[insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
|
||||
|
|
|
@ -50,21 +50,22 @@ enum opcode {
|
|||
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
|
||||
insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
|
||||
insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
|
||||
insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0,
|
||||
insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd,
|
||||
insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav,
|
||||
insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext,
|
||||
insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu,
|
||||
insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu,
|
||||
insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0,
|
||||
insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0,
|
||||
insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor,
|
||||
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb,
|
||||
insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv,
|
||||
insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav,
|
||||
insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
|
||||
insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
|
||||
insn_xor, insn_xori, insn_yield,
|
||||
insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu,
|
||||
insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu,
|
||||
insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll,
|
||||
insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl,
|
||||
insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins,
|
||||
insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld,
|
||||
insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
|
||||
insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
|
||||
insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
|
||||
insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
|
||||
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
|
||||
insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
|
||||
insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
|
||||
insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync,
|
||||
insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait,
|
||||
insn_wsbh, insn_xor, insn_xori, insn_yield,
|
||||
insn_invalid /* insn_invalid must be last */
|
||||
};
|
||||
|
||||
|
@ -287,13 +288,17 @@ I_u2u1(_cfcmsa)
|
|||
I_u1u2(_ctc1)
|
||||
I_u2u1(_ctcmsa)
|
||||
I_u1u2(_ddivu)
|
||||
I_u3u1u2(_ddivu_r6)
|
||||
I_u1u2u3(_dmfc0)
|
||||
I_u3u1u2(_dmodu)
|
||||
I_u1u2u3(_dmtc0)
|
||||
I_u1u2(_dmultu)
|
||||
I_u3u1u2(_dmulu)
|
||||
I_u2u1s3(_daddiu)
|
||||
I_u3u1u2(_daddu)
|
||||
I_u1(_di);
|
||||
I_u1u2(_divu)
|
||||
I_u3u1u2(_divu_r6)
|
||||
I_u2u1(_dsbh);
|
||||
I_u2u1(_dshd);
|
||||
I_u2u1u3(_dsll)
|
||||
|
@ -327,6 +332,7 @@ I_u2s3u1(_lw)
|
|||
I_u2s3u1(_lwu)
|
||||
I_u1u2u3(_mfc0)
|
||||
I_u1u2u3(_mfhc0)
|
||||
I_u3u1u2(_modu)
|
||||
I_u3u1u2(_movn)
|
||||
I_u3u1u2(_movz)
|
||||
I_u1(_mfhi)
|
||||
|
@ -337,6 +343,7 @@ I_u1(_mthi)
|
|||
I_u1(_mtlo)
|
||||
I_u3u1u2(_mul)
|
||||
I_u1u2(_multu)
|
||||
I_u3u1u2(_mulu)
|
||||
I_u3u1u2(_nor)
|
||||
I_u3u1u2(_or)
|
||||
I_u2u1u3(_ori)
|
||||
|
@ -345,6 +352,8 @@ I_u2s3u1(_sb)
|
|||
I_u2s3u1(_sc)
|
||||
I_u2s3u1(_scd)
|
||||
I_u2s3u1(_sd)
|
||||
I_u3u1u2(_seleqz)
|
||||
I_u3u1u2(_selnez)
|
||||
I_u2s3u1(_sh)
|
||||
I_u2u1u3(_sll)
|
||||
I_u3u2u1(_sllv)
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# MIPS networking code
|
||||
|
||||
obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
|
||||
obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,285 +0,0 @@
|
|||
/*
|
||||
* bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
|
||||
* compiler.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
* Author: Markos Chandras <markos.chandras@imgtec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/isa-rev.h>
|
||||
#include <asm/regdef.h>
|
||||
#include "bpf_jit.h"
|
||||
|
||||
/* ABI
|
||||
*
|
||||
* r_skb_hl skb header length
|
||||
* r_skb_data skb data
|
||||
* r_off(a1) offset register
|
||||
* r_A BPF register A
|
||||
* r_X PF register X
|
||||
* r_skb(a0) *skb
|
||||
* r_M *scratch memory
|
||||
* r_skb_le skb length
|
||||
* r_s0 Scratch register 0
|
||||
* r_s1 Scratch register 1
|
||||
*
|
||||
* On entry:
|
||||
* a0: *skb
|
||||
* a1: offset (imm or imm + X)
|
||||
*
|
||||
* All non-BPF-ABI registers are free for use. On return, we only
|
||||
* care about r_ret. The BPF-ABI registers are assumed to remain
|
||||
* unmodified during the entire filter operation.
|
||||
*/
|
||||
|
||||
#define skb a0
|
||||
#define offset a1
|
||||
#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
|
||||
|
||||
/* We know better :) so prevent assembler reordering etc */
|
||||
.set noreorder
|
||||
|
||||
#define is_offset_negative(TYPE) \
|
||||
/* If offset is negative we have more work to do */ \
|
||||
slti t0, offset, 0; \
|
||||
bgtz t0, bpf_slow_path_##TYPE##_neg; \
|
||||
/* Be careful what follows in DS. */
|
||||
|
||||
#define is_offset_in_header(SIZE, TYPE) \
|
||||
/* Reading from header? */ \
|
||||
addiu $r_s0, $r_skb_hl, -SIZE; \
|
||||
slt t0, $r_s0, offset; \
|
||||
bgtz t0, bpf_slow_path_##TYPE; \
|
||||
|
||||
LEAF(sk_load_word)
|
||||
is_offset_negative(word)
|
||||
FEXPORT(sk_load_word_positive)
|
||||
is_offset_in_header(4, word)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
.set reorder
|
||||
lw $r_A, 0(t1)
|
||||
.set noreorder
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh t0, $r_A
|
||||
rotr $r_A, t0, 16
|
||||
# else
|
||||
sll t0, $r_A, 24
|
||||
srl t1, $r_A, 24
|
||||
srl t2, $r_A, 8
|
||||
or t0, t0, t1
|
||||
andi t2, t2, 0xff00
|
||||
andi t1, $r_A, 0xff00
|
||||
or t0, t0, t2
|
||||
sll t1, t1, 8
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#endif
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_word)
|
||||
|
||||
LEAF(sk_load_half)
|
||||
is_offset_negative(half)
|
||||
FEXPORT(sk_load_half_positive)
|
||||
is_offset_in_header(2, half)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
lhu $r_A, 0(t1)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh $r_A, $r_A
|
||||
# else
|
||||
sll t0, $r_A, 8
|
||||
srl t1, $r_A, 8
|
||||
andi t0, t0, 0xff00
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#endif
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_half)
|
||||
|
||||
LEAF(sk_load_byte)
|
||||
is_offset_negative(byte)
|
||||
FEXPORT(sk_load_byte_positive)
|
||||
is_offset_in_header(1, byte)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
lbu $r_A, 0(t1)
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_byte)
|
||||
|
||||
/*
|
||||
* call skb_copy_bits:
|
||||
* (prototype in linux/skbuff.h)
|
||||
*
|
||||
* int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
|
||||
*
|
||||
* o32 mandates we leave 4 spaces for argument registers in case
|
||||
* the callee needs to use them. Even though we don't care about
|
||||
* the argument registers ourselves, we need to allocate that space
|
||||
* to remain ABI compliant since the callee may want to use that space.
|
||||
* We also allocate 2 more spaces for $r_ra and our return register (*to).
|
||||
*
|
||||
* n64 is a bit different. The *caller* will allocate the space to preserve
|
||||
* the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
|
||||
* good reason but it does not matter that much really.
|
||||
*
|
||||
* (void *to) is returned in r_s0
|
||||
*
|
||||
*/
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
#define DS_OFFSET(SIZE) (4 * SZREG)
|
||||
#else
|
||||
#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
|
||||
#endif
|
||||
#define bpf_slow_path_common(SIZE) \
|
||||
/* Quick check. Are we within reasonable boundaries? */ \
|
||||
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
|
||||
sltu $r_s0, offset, $r_s1; \
|
||||
beqz $r_s0, fault; \
|
||||
/* Load 4th argument in DS */ \
|
||||
LONG_ADDIU a3, zero, SIZE; \
|
||||
PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
|
||||
PTR_LA t0, skb_copy_bits; \
|
||||
PTR_S $r_ra, (5 * SZREG)($r_sp); \
|
||||
/* Assign low slot to a2 */ \
|
||||
PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
|
||||
jalr t0; \
|
||||
/* Reset our destination slot (DS but it's ok) */ \
|
||||
INT_S zero, (4 * SZREG)($r_sp); \
|
||||
/* \
|
||||
* skb_copy_bits returns 0 on success and -EFAULT \
|
||||
* on error. Our data live in a2. Do not bother with \
|
||||
* our data if an error has been returned. \
|
||||
*/ \
|
||||
/* Restore our frame */ \
|
||||
PTR_L $r_ra, (5 * SZREG)($r_sp); \
|
||||
INT_L $r_s0, (4 * SZREG)($r_sp); \
|
||||
bltz v0, fault; \
|
||||
PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
|
||||
move $r_ret, zero; \
|
||||
|
||||
NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(4)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh t0, $r_s0
|
||||
jr $r_ra
|
||||
rotr $r_A, t0, 16
|
||||
# else
|
||||
sll t0, $r_s0, 24
|
||||
srl t1, $r_s0, 24
|
||||
srl t2, $r_s0, 8
|
||||
or t0, t0, t1
|
||||
andi t2, t2, 0xff00
|
||||
andi t1, $r_s0, 0xff00
|
||||
or t0, t0, t2
|
||||
sll t1, t1, 8
|
||||
jr $r_ra
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#else
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
#endif
|
||||
|
||||
END(bpf_slow_path_word)
|
||||
|
||||
NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(2)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
jr $r_ra
|
||||
wsbh $r_A, $r_s0
|
||||
# else
|
||||
sll t0, $r_s0, 8
|
||||
andi t1, $r_s0, 0xff00
|
||||
andi t0, t0, 0xff00
|
||||
srl t1, t1, 8
|
||||
jr $r_ra
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#else
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
#endif
|
||||
|
||||
END(bpf_slow_path_half)
|
||||
|
||||
NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(1)
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
|
||||
END(bpf_slow_path_byte)
|
||||
|
||||
/*
|
||||
* Negative entry points
|
||||
*/
|
||||
.macro bpf_is_end_of_data
|
||||
li t0, SKF_LL_OFF
|
||||
/* Reading link layer data? */
|
||||
slt t1, offset, t0
|
||||
bgtz t1, fault
|
||||
/* Be careful what follows in DS. */
|
||||
.endm
|
||||
/*
|
||||
* call skb_copy_bits:
|
||||
* (prototype in linux/filter.h)
|
||||
*
|
||||
* void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
|
||||
* int k, unsigned int size)
|
||||
*
|
||||
* see above (bpf_slow_path_common) for ABI restrictions
|
||||
*/
|
||||
#define bpf_negative_common(SIZE) \
|
||||
PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
|
||||
PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
|
||||
PTR_S $r_ra, (5 * SZREG)($r_sp); \
|
||||
jalr t0; \
|
||||
li a2, SIZE; \
|
||||
PTR_L $r_ra, (5 * SZREG)($r_sp); \
|
||||
/* Check return pointer */ \
|
||||
beqz v0, fault; \
|
||||
PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
|
||||
/* Preserve our pointer */ \
|
||||
move $r_s0, v0; \
|
||||
/* Set return value */ \
|
||||
move $r_ret, zero; \
|
||||
|
||||
bpf_slow_path_word_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(4)
|
||||
jr $r_ra
|
||||
lw $r_A, 0($r_s0)
|
||||
END(sk_load_word_negative)
|
||||
|
||||
bpf_slow_path_half_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(2)
|
||||
jr $r_ra
|
||||
lhu $r_A, 0($r_s0)
|
||||
END(sk_load_half_negative)
|
||||
|
||||
bpf_slow_path_byte_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(1)
|
||||
jr $r_ra
|
||||
lbu $r_A, 0($r_s0)
|
||||
END(sk_load_byte_negative)
|
||||
|
||||
fault:
|
||||
jr $r_ra
|
||||
addiu $r_ret, zero, 1
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/isa-rev.h>
|
||||
#include <asm/uasm.h>
|
||||
|
||||
/* Registers used by JIT */
|
||||
|
@ -125,15 +126,21 @@ static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
|
|||
}
|
||||
|
||||
/* Simply emit the instruction if the JIT memory space has been allocated */
|
||||
#define emit_instr(ctx, func, ...) \
|
||||
do { \
|
||||
if ((ctx)->target != NULL) { \
|
||||
u32 *p = &(ctx)->target[ctx->idx]; \
|
||||
uasm_i_##func(&p, ##__VA_ARGS__); \
|
||||
} \
|
||||
(ctx)->idx++; \
|
||||
#define emit_instr_long(ctx, func64, func32, ...) \
|
||||
do { \
|
||||
if ((ctx)->target != NULL) { \
|
||||
u32 *p = &(ctx)->target[ctx->idx]; \
|
||||
if (IS_ENABLED(CONFIG_64BIT)) \
|
||||
uasm_i_##func64(&p, ##__VA_ARGS__); \
|
||||
else \
|
||||
uasm_i_##func32(&p, ##__VA_ARGS__); \
|
||||
} \
|
||||
(ctx)->idx++; \
|
||||
} while (0)
|
||||
|
||||
#define emit_instr(ctx, func, ...) \
|
||||
emit_instr_long(ctx, func, func, ##__VA_ARGS__)
|
||||
|
||||
static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
|
||||
{
|
||||
unsigned long target_va, base_va;
|
||||
|
@ -274,17 +281,17 @@ static int gen_int_prologue(struct jit_ctx *ctx)
|
|||
* If RA we are doing a function call and may need
|
||||
* extra 8-byte tmp area.
|
||||
*/
|
||||
stack_adjust += 16;
|
||||
stack_adjust += 2 * sizeof(long);
|
||||
if (ctx->flags & EBPF_SAVE_S0)
|
||||
stack_adjust += 8;
|
||||
stack_adjust += sizeof(long);
|
||||
if (ctx->flags & EBPF_SAVE_S1)
|
||||
stack_adjust += 8;
|
||||
stack_adjust += sizeof(long);
|
||||
if (ctx->flags & EBPF_SAVE_S2)
|
||||
stack_adjust += 8;
|
||||
stack_adjust += sizeof(long);
|
||||
if (ctx->flags & EBPF_SAVE_S3)
|
||||
stack_adjust += 8;
|
||||
stack_adjust += sizeof(long);
|
||||
if (ctx->flags & EBPF_SAVE_S4)
|
||||
stack_adjust += 8;
|
||||
stack_adjust += sizeof(long);
|
||||
|
||||
BUILD_BUG_ON(MAX_BPF_STACK & 7);
|
||||
locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
|
||||
|
@ -298,41 +305,49 @@ static int gen_int_prologue(struct jit_ctx *ctx)
|
|||
* On tail call we skip this instruction, and the TCC is
|
||||
* passed in $v1 from the caller.
|
||||
*/
|
||||
emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
|
||||
emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
|
||||
if (stack_adjust)
|
||||
emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
|
||||
emit_instr_long(ctx, daddiu, addiu,
|
||||
MIPS_R_SP, MIPS_R_SP, -stack_adjust);
|
||||
else
|
||||
return 0;
|
||||
|
||||
store_offset = stack_adjust - 8;
|
||||
store_offset = stack_adjust - sizeof(long);
|
||||
|
||||
if (ctx->flags & EBPF_SAVE_RA) {
|
||||
emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_RA, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S0) {
|
||||
emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_S0, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S1) {
|
||||
emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_S1, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S2) {
|
||||
emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_S2, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S3) {
|
||||
emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_S3, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S4) {
|
||||
emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, sd, sw,
|
||||
MIPS_R_S4, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
|
||||
if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
|
||||
emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
|
||||
emit_instr_long(ctx, daddu, addu,
|
||||
MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -341,7 +356,7 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
|
|||
{
|
||||
const struct bpf_prog *prog = ctx->skf;
|
||||
int stack_adjust = ctx->stack_size;
|
||||
int store_offset = stack_adjust - 8;
|
||||
int store_offset = stack_adjust - sizeof(long);
|
||||
enum reg_val_type td;
|
||||
int r0 = MIPS_R_V0;
|
||||
|
||||
|
@ -353,33 +368,40 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
|
|||
}
|
||||
|
||||
if (ctx->flags & EBPF_SAVE_RA) {
|
||||
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_RA, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S0) {
|
||||
emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_S0, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S1) {
|
||||
emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_S1, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S2) {
|
||||
emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_S2, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S3) {
|
||||
emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_S3, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
if (ctx->flags & EBPF_SAVE_S4) {
|
||||
emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
|
||||
store_offset -= 8;
|
||||
emit_instr_long(ctx, ld, lw,
|
||||
MIPS_R_S4, store_offset, MIPS_R_SP);
|
||||
store_offset -= sizeof(long);
|
||||
}
|
||||
emit_instr(ctx, jr, dest_reg);
|
||||
|
||||
if (stack_adjust)
|
||||
emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
|
||||
emit_instr_long(ctx, daddiu, addiu,
|
||||
MIPS_R_SP, MIPS_R_SP, stack_adjust);
|
||||
else
|
||||
emit_instr(ctx, nop);
|
||||
|
||||
|
@ -646,6 +668,10 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
s64 t64s;
|
||||
int bpf_op = BPF_OP(insn->code);
|
||||
|
||||
if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
|| (bpf_op == BPF_DW)))
|
||||
return -EINVAL;
|
||||
|
||||
switch (insn->code) {
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
|
||||
|
@ -678,8 +704,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
if (insn->imm == 1) /* Mult by 1 is a nop */
|
||||
break;
|
||||
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
|
||||
emit_instr(ctx, dmultu, MIPS_R_AT, dst);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT);
|
||||
} else {
|
||||
emit_instr(ctx, dmultu, MIPS_R_AT, dst);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
}
|
||||
break;
|
||||
case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
|
||||
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
|
||||
|
@ -701,8 +731,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
if (insn->imm == 1) /* Mult by 1 is a nop */
|
||||
break;
|
||||
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
|
||||
emit_instr(ctx, multu, dst, MIPS_R_AT);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
emit_instr(ctx, mulu, dst, dst, MIPS_R_AT);
|
||||
} else {
|
||||
emit_instr(ctx, multu, dst, MIPS_R_AT);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
}
|
||||
break;
|
||||
case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
|
||||
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
|
||||
|
@ -733,6 +767,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
break;
|
||||
}
|
||||
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT);
|
||||
else
|
||||
emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
|
||||
break;
|
||||
}
|
||||
emit_instr(ctx, divu, dst, MIPS_R_AT);
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, mflo, dst);
|
||||
|
@ -755,6 +796,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
break;
|
||||
}
|
||||
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT);
|
||||
else
|
||||
emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
|
||||
break;
|
||||
}
|
||||
emit_instr(ctx, ddivu, dst, MIPS_R_AT);
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, mflo, dst);
|
||||
|
@ -820,11 +868,23 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
emit_instr(ctx, and, dst, dst, src);
|
||||
break;
|
||||
case BPF_MUL:
|
||||
emit_instr(ctx, dmultu, dst, src);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
emit_instr(ctx, dmulu, dst, dst, src);
|
||||
} else {
|
||||
emit_instr(ctx, dmultu, dst, src);
|
||||
emit_instr(ctx, mflo, dst);
|
||||
}
|
||||
break;
|
||||
case BPF_DIV:
|
||||
case BPF_MOD:
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, ddivu_r6,
|
||||
dst, dst, src);
|
||||
else
|
||||
emit_instr(ctx, modu, dst, dst, src);
|
||||
break;
|
||||
}
|
||||
emit_instr(ctx, ddivu, dst, src);
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, mflo, dst);
|
||||
|
@ -904,6 +964,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
break;
|
||||
case BPF_DIV:
|
||||
case BPF_MOD:
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, divu_r6, dst, dst, src);
|
||||
else
|
||||
emit_instr(ctx, modu, dst, dst, src);
|
||||
break;
|
||||
}
|
||||
emit_instr(ctx, divu, dst, src);
|
||||
if (bpf_op == BPF_DIV)
|
||||
emit_instr(ctx, mflo, dst);
|
||||
|
@ -1007,8 +1074,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
|
||||
emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
|
||||
/* SP known to be non-zero, movz becomes boolean not */
|
||||
emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
|
||||
emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
|
||||
if (MIPS_ISA_REV >= 6) {
|
||||
emit_instr(ctx, seleqz, MIPS_R_T9,
|
||||
MIPS_R_SP, MIPS_R_T8);
|
||||
} else {
|
||||
emit_instr(ctx, movz, MIPS_R_T9,
|
||||
MIPS_R_SP, MIPS_R_T8);
|
||||
emit_instr(ctx, movn, MIPS_R_T9,
|
||||
MIPS_R_ZERO, MIPS_R_T8);
|
||||
}
|
||||
emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
|
||||
cmp_eq = bpf_op == BPF_JGT;
|
||||
dst = MIPS_R_AT;
|
||||
|
@ -1235,7 +1309,7 @@ jeq_common:
|
|||
|
||||
case BPF_JMP | BPF_CALL:
|
||||
ctx->flags |= EBPF_SAVE_RA;
|
||||
t64s = (s64)insn->imm + (s64)__bpf_call_base;
|
||||
t64s = (s64)insn->imm + (long)__bpf_call_base;
|
||||
emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
|
||||
emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
|
||||
/* delay slot */
|
||||
|
@ -1367,6 +1441,17 @@ jeq_common:
|
|||
if (src < 0)
|
||||
return src;
|
||||
if (BPF_MODE(insn->code) == BPF_XADD) {
|
||||
/*
|
||||
* If mem_off does not fit within the 9 bit ll/sc
|
||||
* instruction immediate field, use a temp reg.
|
||||
*/
|
||||
if (MIPS_ISA_REV >= 6 &&
|
||||
(mem_off >= BIT(8) || mem_off < -BIT(8))) {
|
||||
emit_instr(ctx, daddiu, MIPS_R_T6,
|
||||
dst, mem_off);
|
||||
mem_off = 0;
|
||||
dst = MIPS_R_T6;
|
||||
}
|
||||
switch (BPF_SIZE(insn->code)) {
|
||||
case BPF_W:
|
||||
if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
|
||||
|
@ -1721,7 +1806,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
unsigned int image_size;
|
||||
u8 *image_ptr;
|
||||
|
||||
if (!prog->jit_requested || !cpu_has_mips64r2)
|
||||
if (!prog->jit_requested || MIPS_ISA_REV < 2)
|
||||
return prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
|
|
|
@ -39,12 +39,12 @@ choice
|
|||
Select the devicetree.
|
||||
|
||||
config DTB_PIC32_NONE
|
||||
bool "None"
|
||||
bool "None"
|
||||
|
||||
config DTB_PIC32_MZDA_SK
|
||||
bool "PIC32MZDA Starter Kit"
|
||||
depends on PIC32MZDA
|
||||
select BUILTIN_DTB
|
||||
bool "PIC32MZDA Starter Kit"
|
||||
depends on PIC32MZDA
|
||||
select BUILTIN_DTB
|
||||
|
||||
endchoice
|
||||
|
||||
|
|
|
@ -46,9 +46,7 @@ endif
|
|||
VDSO_LDFLAGS := \
|
||||
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
|
||||
$(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
|
||||
-nostdlib -shared \
|
||||
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
|
||||
$(call cc-ldoption, -Wl$(comma)--build-id)
|
||||
-nostdlib -shared -Wl,--hash-style=sysv -Wl,--build-id
|
||||
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
|
|
Loading…
Reference in New Issue