2017-07-11 09:08:08 +08:00
|
|
|
# This file is included by the global makefile so that you can add your own
|
2021-10-13 14:36:22 +08:00
|
|
|
# architecture-specific flags and dependencies.
|
2017-07-11 09:08:08 +08:00
|
|
|
#
|
|
|
|
# This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
# License. See the file "COPYING" in the main directory of this archive
|
|
|
|
# for more details.
|
|
|
|
#
|
|
|
|
|
|
|
|
OBJCOPYFLAGS := -O binary
|
|
|
|
LDFLAGS_vmlinux :=
|
2018-02-13 13:13:16 +08:00
|
|
|
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
|
|
|
|
LDFLAGS_vmlinux := --no-relax
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
|
|
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
|
2018-02-13 13:13:16 +08:00
|
|
|
endif
|
2020-02-21 10:47:55 +08:00
|
|
|
|
2021-06-07 06:09:40 +08:00
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
2020-02-21 10:47:55 +08:00
|
|
|
KBUILD_CFLAGS_MODULE += -mcmodel=medany
|
|
|
|
endif
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
export BITS
|
|
|
|
ifeq ($(CONFIG_ARCH_RV64I),y)
|
|
|
|
BITS := 64
|
|
|
|
UTS_MACHINE := riscv64
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=lp64
|
|
|
|
KBUILD_AFLAGS += -mabi=lp64
|
2018-07-29 09:14:47 +08:00
|
|
|
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf64lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
else
|
|
|
|
BITS := 32
|
|
|
|
UTS_MACHINE := riscv32
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=ilp32
|
|
|
|
KBUILD_AFLAGS += -mabi=ilp32
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf32lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
endif
|
|
|
|
|
2021-05-15 05:37:41 +08:00
|
|
|
ifeq ($(CONFIG_LD_IS_LLD),y)
|
|
|
|
KBUILD_CFLAGS += -mno-relax
|
|
|
|
KBUILD_AFLAGS += -mno-relax
|
2021-08-05 23:01:02 +08:00
|
|
|
ifndef CONFIG_AS_IS_LLVM
|
2021-05-15 05:37:41 +08:00
|
|
|
KBUILD_CFLAGS += -Wa,-mno-relax
|
|
|
|
KBUILD_AFLAGS += -Wa,-mno-relax
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2018-10-09 10:18:32 +08:00
|
|
|
# ISA string setting
|
2019-04-15 17:14:35 +08:00
|
|
|
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
|
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
|
2018-10-09 10:18:33 +08:00
|
|
|
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
|
2018-10-09 10:18:32 +08:00
|
|
|
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
|
|
|
|
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
|
|
|
|
KBUILD_AFLAGS += -march=$(riscv-march-y)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mno-save-restore
|
|
|
|
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
|
|
|
|
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medlow
|
|
|
|
endif
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDANY),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medany
|
|
|
|
endif
|
2019-08-29 14:57:00 +08:00
|
|
|
ifeq ($(CONFIG_PERF_EVENTS),y)
|
|
|
|
KBUILD_CFLAGS += -fno-omit-frame-pointer
|
|
|
|
endif
|
2018-03-15 16:50:41 +08:00
|
|
|
|
|
|
|
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
# GCC versions that support the "-mstrict-align" option default to allowing
|
|
|
|
# unaligned accesses. While unaligned accesses are explicitly allowed in the
|
|
|
|
# RISC-V ISA, they're emulated by machine mode traps on all extant
|
|
|
|
# architectures. It's faster to have GCC emit only aligned accesses.
|
|
|
|
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
|
|
|
|
|
riscv: Enable per-task stack canaries
This enables the use of per-task stack canary values if GCC has
support for emitting the stack canary reference relative to the
value of tp, which holds the task struct pointer in the riscv
kernel.
After compare arm64 and x86 implementations, seems arm64's is more
flexible and readable. The key point is how gcc get the offset of
stack_canary from gs/el0_sp.
x86: Use a fix offset from gs, not flexible.
struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
char gs_base[40]; // :(
unsigned long stack_canary;
};
arm64: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=sysreg
-mstack-protector-guard-reg=sp_el0
-mstack-protector-guard-offset=xxx
riscv: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=tls
-mstack-protector-guard-reg=tp
-mstack-protector-guard-offset=xxx
GCC's implementation has been merged:
commit c931e8d5a96463427040b0d11f9c4352ac22b2b0
Author: Cooper Qu <cooper.qu@linux.alibaba.com>
Date: Mon Jul 13 16:15:08 2020 +0800
RISC-V: Add support for TLS stack protector canary access
In the end, these codes are inserted by gcc before return:
* 0xffffffe00020b396 <+120>: ld a5,1008(tp) # 0x3f0
* 0xffffffe00020b39a <+124>: xor a5,a5,a4
* 0xffffffe00020b39c <+126>: mv a0,s5
* 0xffffffe00020b39e <+128>: bnez a5,0xffffffe00020b61c <_do_fork+766>
0xffffffe00020b3a2 <+132>: ld ra,136(sp)
0xffffffe00020b3a4 <+134>: ld s0,128(sp)
0xffffffe00020b3a6 <+136>: ld s1,120(sp)
0xffffffe00020b3a8 <+138>: ld s2,112(sp)
0xffffffe00020b3aa <+140>: ld s3,104(sp)
0xffffffe00020b3ac <+142>: ld s4,96(sp)
0xffffffe00020b3ae <+144>: ld s5,88(sp)
0xffffffe00020b3b0 <+146>: ld s6,80(sp)
0xffffffe00020b3b2 <+148>: ld s7,72(sp)
0xffffffe00020b3b4 <+150>: addi sp,sp,144
0xffffffe00020b3b6 <+152>: ret
...
* 0xffffffe00020b61c <+766>: auipc ra,0x7f8
* 0xffffffe00020b620 <+770>: jalr -1764(ra) # 0xffffffe000a02f38 <__stack_chk_fail>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Cooper Qu <cooper.qu@linux.alibaba.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:29:18 +08:00
|
|
|
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
|
|
|
prepare: stack_protector_prepare
|
|
|
|
stack_protector_prepare: prepare0
|
|
|
|
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
|
|
|
|
-mstack-protector-guard-reg=tp \
|
|
|
|
-mstack-protector-guard-offset=$(shell \
|
|
|
|
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
|
|
|
|
include/generated/asm-offsets.h))
|
|
|
|
endif
|
|
|
|
|
2018-05-31 23:42:01 +08:00
|
|
|
# arch specific predefines for sparse
|
|
|
|
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
|
|
|
|
|
2018-11-12 13:55:15 +08:00
|
|
|
# Default target when executing plain make
|
|
|
|
boot := arch/riscv/boot
|
2021-04-13 14:35:14 +08:00
|
|
|
ifeq ($(CONFIG_XIP_KERNEL),y)
|
|
|
|
KBUILD_IMAGE := $(boot)/xipImage
|
|
|
|
else
|
2018-11-12 13:55:15 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2017-07-11 09:08:08 +08:00
|
|
|
head-y := arch/riscv/kernel/head.o
|
|
|
|
|
riscv: Introduce alternative mechanism to apply errata solution
Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.
Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.
This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-03-22 22:26:03 +08:00
|
|
|
core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
|
2021-09-27 19:40:01 +08:00
|
|
|
core-$(CONFIG_KVM) += arch/riscv/kvm/
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
libs-y += arch/riscv/lib/
|
2020-09-18 06:37:14 +08:00
|
|
|
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
2017-07-11 09:08:08 +08:00
|
|
|
|
2018-11-05 22:35:37 +08:00
|
|
|
PHONY += vdso_install
|
|
|
|
vdso_install:
|
|
|
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
|
|
|
|
2021-11-02 23:51:43 +08:00
|
|
|
ifeq ($(KBUILD_EXTMOD),)
|
2021-08-26 13:52:45 +08:00
|
|
|
ifeq ($(CONFIG_MMU),y)
|
2021-08-05 01:32:14 +08:00
|
|
|
prepare: vdso_prepare
|
|
|
|
vdso_prepare: prepare0
|
|
|
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
2021-08-26 13:52:45 +08:00
|
|
|
endif
|
2021-11-02 23:51:43 +08:00
|
|
|
endif
|
2021-08-05 01:32:14 +08:00
|
|
|
|
2021-04-13 14:35:14 +08:00
|
|
|
ifneq ($(CONFIG_XIP_KERNEL),y)
|
2020-12-13 21:50:38 +08:00
|
|
|
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
|
2020-03-16 08:47:43 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/loader.bin
|
2019-10-28 20:10:42 +08:00
|
|
|
else
|
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
|
|
|
endif
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
|
|
|
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2019-10-28 20:10:42 +08:00
|
|
|
all: $(notdir $(KBUILD_IMAGE))
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2019-10-28 20:10:42 +08:00
|
|
|
$(BOOT_TARGETS): vmlinux
|
2018-11-12 13:55:15 +08:00
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
2019-10-28 20:10:42 +08:00
|
|
|
@$(kecho) ' Kernel: $(boot)/$@ is ready'
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2020-11-04 14:14:59 +08:00
|
|
|
Image.%: Image
|
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
|
|
|
2021-07-29 22:21:47 +08:00
|
|
|
install: install-image = Image
|
|
|
|
zinstall: install-image = Image.gz
|
|
|
|
install zinstall:
|
|
|
|
$(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
|
|
|
|
$(boot)/$(install-image) System.map "$(INSTALL_PATH)"
|
2020-11-04 14:15:00 +08:00
|
|
|
|
2021-09-12 11:45:38 +08:00
|
|
|
PHONY += rv32_randconfig
|
|
|
|
rv32_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/32-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|
|
|
|
|
|
|
|
PHONY += rv64_randconfig
|
|
|
|
rv64_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|