Merge OCK linux-6.6/devel branch into TK5 release branch

This commit is contained in:
Jianping Liu 2024-11-13 15:59:05 +08:00
commit 9e51d2580d
65 changed files with 2751 additions and 873 deletions

View File

@ -133,18 +133,22 @@ config LOONGARCH
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC
select HAVE_RETHOOK
select HAVE_RSEQ
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STACKPROTECTOR
select ARCH_HAS_PHYS_TO_DMA
select HAVE_SYSCALL_TRACEPOINTS
@ -259,6 +263,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
@ -639,6 +646,8 @@ config PARAVIRT_TIME_ACCOUNTING
If in doubt, say N here.
source "kernel/livepatch/Kconfig"
endmenu
config ARCH_SELECT_MEMORY_MODEL

View File

@ -26,4 +26,16 @@ config UNWINDER_PROLOGUE
Some of the addresses it reports may be incorrect (but better than the
Guess unwinder).
config UNWINDER_ORC
bool "ORC unwinder"
depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
unwinding kernel stack traces. It uses a custom data format which is
a simplified version of the DWARF Call Frame Information standard.
Enabling this option will increase the kernel's runtime memory usage
by roughly 2-4MB, depending on your kernel config.
endchoice

View File

@ -25,6 +25,18 @@ endif
32bit-emul = elf32loongarch
64bit-emul = elf64loongarch
ifdef CONFIG_UNWINDER_ORC
orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h
orc_hash_sh := $(srctree)/scripts/orc_hash.sh
targets += $(orc_hash_h)
quiet_cmd_orc_hash = GEN $@
cmd_orc_hash = mkdir -p $(dir $@); \
$(CONFIG_SHELL) $(orc_hash_sh) < $< > $@
$(orc_hash_h): $(srctree)/arch/loongarch/include/asm/orc_types.h $(orc_hash_sh) FORCE
$(call if_changed,orc_hash)
archprepare: $(orc_hash_h)
endif
ifdef CONFIG_DYNAMIC_FTRACE
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
@ -68,8 +80,6 @@ LDFLAGS_vmlinux += -static -n -nostdlib
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += $(call cc-option,-mexplicit-relocs)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
@ -78,6 +88,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
KBUILD_AFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
ifdef CONFIG_OBJTOOL
KBUILD_CFLAGS += -fno-jump-tables
endif
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)

View File

@ -4,6 +4,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_IRQ_TIME_ACCOUNTING=y
@ -13,6 +14,7 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
@ -34,18 +36,22 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr"
CONFIG_CMDLINE_EXTEND=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_ARCH_IOREMAP=y
CONFIG_CPU_HAS_LSX=y
CONFIG_CPU_HAS_LASX=y
CONFIG_CPU_HAS_LBT=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_LIVEPATCH=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@ -56,6 +62,8 @@ CONFIG_ACPI_DOCK=y
CONFIG_ACPI_IPMI=m
CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@ -74,6 +82,8 @@ CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
CONFIG_Z3FOLD_DEPRECATED=y
CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_FREELIST_RANDOM=y
# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set
@ -565,7 +575,6 @@ CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_CMTP=m
CONFIG_BT_HIDP=m
CONFIG_BT_HS=y
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
# CONFIG_BT_HCIBTUSB_BCM is not set
@ -617,8 +626,6 @@ CONFIG_ISCSI_IBFT=m
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_MTD=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
@ -652,7 +659,7 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
CONFIG_NVME_RDMA=m
CONFIG_NVME_FC=m
@ -679,7 +686,7 @@ CONFIG_MISC_RTSX_PCI=m
CONFIG_MISC_RTSX_USB=m
CONFIG_UACCE=m
CONFIG_PVPANIC=y
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
@ -689,6 +696,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_SAS_ATA=y
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
@ -879,6 +887,7 @@ CONFIG_R8169=m
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
CONFIG_STMMAC_ETH=y
CONFIG_DWMAC_LOONGSON=m
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
@ -1470,11 +1479,11 @@ CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_VKMS=m
CONFIG_DRM_UDL=m
CONFIG_DRM_AST_LOONGSON=y
CONFIG_DRM_MGAG200=m
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
CONFIG_DRM_AST_LOONGSON=y
CONFIG_DRM_BOCHS=m
CONFIG_DRM_CIRRUS_QEMU=m
CONFIG_FB=y
@ -1912,6 +1921,7 @@ CONFIG_COMEDI_NI_PCIDIO=m
CONFIG_COMEDI_NI_PCIMIO=m
CONFIG_STAGING=y
CONFIG_COMMON_CLK_LOONGSON2=y
CONFIG_LOONGARCH_IOMMU=m
CONFIG_LOONGSON2_GUTS=y
CONFIG_LOONGSON2_PM=y
CONFIG_PM_DEVFREQ=y
@ -2141,6 +2151,7 @@ CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ARC4=m
@ -2156,6 +2167,7 @@ CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
@ -2177,7 +2189,6 @@ CONFIG_SIGNED_PE_FILE_VERIFICATION=y
CONFIG_SECONDARY_TRUSTED_KEYRING=y
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_SYSTEM_REVOCATION_LIST=y
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=m
CONFIG_DMA_CMA=y
@ -2193,13 +2204,16 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_HARDLOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_LIST=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
CONFIG_FUNCTION_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_STRICT_DEVMEM is not set
CONFIG_UNWINDER_ORC=y
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_LOONGARCH_IOMMU=m
CONFIG_CMDLINE_EXTEND=y
CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr"

View File

@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += orc_hash.h
generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h

View File

@ -44,6 +44,7 @@
do { \
instrumentation_begin(); \
__BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
annotate_reachable(); \
instrumentation_end(); \
} while (0)

View File

@ -6,6 +6,8 @@
#include <asm/ptrace.h>
#include <linux/kprobes.h>
extern void *exception_table[];
void show_registers(struct pt_regs *regs);
asmlinkage void cache_parity_error(void);

View File

@ -63,7 +63,7 @@ ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
{
regs_set_return_value(&fregs->regs, ip);
instruction_pointer_set(&fregs->regs, ip);
}
#define ftrace_regs_get_argument(fregs, n) \

View File

@ -6,6 +6,7 @@
#define _ASM_MODULE_H
#include <asm/inst.h>
#include <asm/orc_types.h>
#include <asm-generic/module.h>
#define RELA_STACK_DEPTH 16
@ -21,6 +22,12 @@ struct mod_arch_specific {
struct mod_section plt;
struct mod_section plt_idx;
#ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
#endif
/* For CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
};

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_HEADER_H
#define _ORC_HEADER_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/orc_hash.h>
/*
* The header is currently a 20-byte hash of the ORC entry definition; see
* scripts/orc_hash.sh.
*/
#define ORC_HEADER \
__used __section(".orc_header") __aligned(4) \
static const u8 orc_header[] = { ORC_HASH }
#endif /* _ORC_HEADER_H */

View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_LOOKUP_H
#define _ORC_LOOKUP_H
/*
* This is a lookup table for speeding up access to the .orc_unwind table.
* Given an input address offset, the corresponding lookup table entry
* specifies a subset of the .orc_unwind table to search.
*
* Each block represents the end of the previous range and the start of the
* next range. An extra block is added to give the last range an end.
*
* The block size should be a power of 2 to avoid a costly 'div' instruction.
*
* A block size of 256 was chosen because it roughly doubles unwinder
* performance while only adding ~5% to the ORC data footprint.
*/
#define LOOKUP_BLOCK_ORDER 8
#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER)
#ifndef LINKER_SCRIPT
extern unsigned int orc_lookup[];
extern unsigned int orc_lookup_end[];
#define LOOKUP_START_IP (unsigned long)_stext
#define LOOKUP_STOP_IP (unsigned long)_etext
#endif /* LINKER_SCRIPT */
#endif /* _ORC_LOOKUP_H */

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_TYPES_H
#define _ORC_TYPES_H
#include <linux/types.h>
/*
* The ORC_REG_* registers are base registers which are used to find other
* registers on the stack.
*
* ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
* address of the previous frame: the caller's SP before it called the current
* function.
*
* ORC_REG_UNDEFINED means the corresponding register's value didn't change in
* the current frame.
*
* The most commonly used base registers are SP and FP -- which the previous SP
* is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is
* usually based on.
*
* The rest of the base registers are needed for special cases like entry code
* and GCC realigned stacks.
*/
#define ORC_REG_UNDEFINED 0
#define ORC_REG_PREV_SP 1
#define ORC_REG_SP 2
#define ORC_REG_FP 3
#define ORC_REG_MAX 4
#define ORC_TYPE_UNDEFINED 0
#define ORC_TYPE_END_OF_STACK 1
#define ORC_TYPE_CALL 2
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
* CFI, simplified for ease of access by the in-kernel unwinder. It tells the
* unwinder how to find the previous SP and FP (and sometimes entry regs) on
* the stack for a given code address. Each instance of the struct corresponds
* to one or more code locations.
*/
struct orc_entry {
s16 sp_offset;
s16 fp_offset;
s16 ra_offset;
unsigned int sp_reg:4;
unsigned int fp_reg:4;
unsigned int ra_reg:4;
unsigned int type:3;
unsigned int signal:1;
};
#endif /* __ASSEMBLY__ */
#endif /* _ORC_TYPES_H */

View File

@ -13,6 +13,7 @@
#include <asm/asm-offsets.h>
#include <asm/loongarch.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
/* Make the addition of cfi info a little easier. */
.macro cfi_rel_offset reg offset=0 docfi=0
@ -173,6 +174,7 @@
li.w t0, CSR_CRMD_WE
csrxchg t0, t0, LOONGARCH_CSR_CRMD
#endif
UNWIND_HINT_REGS
.endm
.macro SAVE_ALL docfi=0
@ -230,6 +232,7 @@
.macro RESTORE_SP_AND_RET docfi=0
cfi_ld sp, PT_R3, \docfi
UNWIND_HINT_FUNC
ertn
.endm

View File

@ -86,6 +86,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
#define TIF_PATCH_PENDING 21 /* pending live patching update */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@ -105,6 +106,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
#define _TIF_USEDLBT (1<<TIF_USEDLBT)
#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */

View File

@ -16,6 +16,7 @@
enum unwinder_type {
UNWINDER_GUESS,
UNWINDER_PROLOGUE,
UNWINDER_ORC,
};
struct unwind_state {
@ -24,7 +25,7 @@ struct unwind_state {
struct task_struct *task;
bool first, error, reset;
int graph_idx;
unsigned long sp, pc, ra;
unsigned long sp, fp, pc, ra;
};
bool default_next_frame(struct unwind_state *state);
@ -61,14 +62,17 @@ static __always_inline void __unwind_start(struct unwind_state *state,
state->sp = regs->regs[3];
state->pc = regs->csr_era;
state->ra = regs->regs[1];
state->fp = regs->regs[22];
} else if (task && task != current) {
state->sp = thread_saved_fp(task);
state->pc = thread_saved_ra(task);
state->ra = 0;
state->fp = 0;
} else {
state->sp = (unsigned long)__builtin_frame_address(0);
state->pc = (unsigned long)__builtin_return_address(0);
state->ra = 0;
state->fp = 0;
}
state->task = task;
get_stack_info(state->sp, state->task, &state->stack_info);
@ -77,6 +81,18 @@ static __always_inline void __unwind_start(struct unwind_state *state,
static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state)
{
return unwind_done(state) ? 0 : state->pc;
if (unwind_done(state))
return 0;
return __kernel_text_address(state->pc) ? state->pc : 0;
}
#ifdef CONFIG_UNWINDER_ORC
void unwind_init(void);
void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size);
#else
static inline void unwind_init(void) {}
static inline void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {}
#endif
#endif /* _ASM_UNWIND_H */

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_UNWIND_HINTS_H
#define _ASM_LOONGARCH_UNWIND_HINTS_H
#include <linux/objtool.h>
#include <asm/orc_types.h>
#ifdef __ASSEMBLY__
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
.endm
.macro UNWIND_HINT_END_OF_STACK
UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK
.endm
.macro UNWIND_HINT_REGS
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_REGS
.endm
.macro UNWIND_HINT_FUNC
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */

View File

@ -3,6 +3,8 @@
# Makefile for the Linux/LoongArch kernel.
#
OBJECT_FILES_NON_STANDARD_head.o := y
extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
@ -22,6 +24,7 @@ obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_FUNCTION_TRACER
@ -64,6 +67,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o

View File

@ -14,11 +14,13 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
.text
.cfi_sections .debug_frame
.align 5
SYM_CODE_START(handle_syscall)
UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
@ -57,6 +59,7 @@ SYM_CODE_START(handle_syscall)
cfi_st fp, PT_R22
SAVE_STATIC
UNWIND_HINT_REGS
#ifdef CONFIG_KGDB
li.w t1, CSR_CRMD_WE
@ -75,6 +78,7 @@ SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, sp
bl syscall_exit_to_user_mode
@ -84,6 +88,7 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
SYM_CODE_START(ret_from_kernel_thread)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, s1
jirl ra, s0, 0

View File

@ -15,6 +15,7 @@
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
#define FPU_REG_WIDTH 8
#define LSX_REG_WIDTH 16
@ -526,3 +527,13 @@ SYM_FUNC_END(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
jr ra
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD _restore_fp
#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD _restore_lsx
#endif
#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD _restore_lasx
#endif
#endif

View File

@ -32,6 +32,7 @@ SYM_FUNC_START(__arch_cpu_idle)
SYM_FUNC_END(__arch_cpu_idle)
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
@ -49,6 +50,7 @@ SYM_CODE_START(handle_vint)
SYM_CODE_END(handle_vint)
SYM_CODE_START(except_vec_cex)
UNWIND_HINT_UNDEFINED
b cache_parity_error
SYM_CODE_END(except_vec_cex)
@ -67,6 +69,7 @@ SYM_CODE_END(except_vec_cex)
.macro BUILD_HANDLER exception handler prep
.align 5
SYM_CODE_START(handle_\exception)
UNWIND_HINT_UNDEFINED
666:
BACKUP_T0T1
SAVE_ALL
@ -77,7 +80,9 @@ SYM_CODE_END(except_vec_cex)
668:
RESTORE_ALL_AND_RET
SYM_CODE_END(handle_\exception)
.pushsection ".data", "aw", %progbits
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
.popsection
.endm
BUILD_HANDLER ade ade badv
@ -94,6 +99,7 @@ SYM_CODE_END(except_vec_cex)
BUILD_HANDLER reserved reserved none /* others */
SYM_CODE_START(handle_sys)
UNWIND_HINT_UNDEFINED
la_abs t0, handle_syscall
jr t0
SYM_CODE_END(handle_sys)

View File

@ -11,6 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
#define SCR_REG_WIDTH 8
@ -153,3 +154,5 @@ SYM_FUNC_END(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
jr ra
STACK_FRAME_NON_STANDARD _restore_ftop_context

View File

@ -73,6 +73,7 @@ SYM_FUNC_START(ftrace_stub)
SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_common)
UNWIND_HINT_UNDEFINED
PTR_ADDI a0, ra, -8 /* arg0: ip */
move a1, t0 /* arg1: parent_ip */
la.pcrel t1, function_trace_op
@ -113,12 +114,14 @@ ftrace_common_return:
SYM_CODE_END(ftrace_common)
SYM_CODE_START(ftrace_caller)
UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=0
b ftrace_common
SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_CODE_START(ftrace_regs_caller)
UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
@ -126,6 +129,7 @@ SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
UNWIND_HINT_UNDEFINED
PTR_L a0, sp, PT_ERA
PTR_ADDI a0, a0, -8 /* arg0: self_addr */
PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
@ -134,6 +138,7 @@ SYM_CODE_START(ftrace_graph_caller)
SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
/* Save return value regs */
PTR_ADDI sp, sp, -FGRET_REGS_SIZE
PTR_S a0, sp, FGRET_REGS_A0
@ -155,6 +160,7 @@ SYM_CODE_END(return_to_handler)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
UNWIND_HINT_UNDEFINED
jr t0
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */

View File

@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/inst.h>
#include <asm/unwind.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{
@ -515,15 +516,28 @@ static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *s, *alt = NULL, *orc = NULL, *orc_ip = NULL, *ftrace = NULL;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
alt = s;
if (!strcmp(".orc_unwind", secstrs + s->sh_name))
orc = s;
if (!strcmp(".orc_unwind_ip", secstrs + s->sh_name))
orc_ip = s;
if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
module_init_ftrace_plt(hdr, s, mod);
ftrace = s;
}
if (alt)
apply_alternatives((void *)alt->sh_addr, (void *)alt->sh_addr + alt->sh_size);
if (orc && orc_ip)
unwind_module_init(mod, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size);
if (ftrace)
module_init_ftrace_plt(hdr, ftrace, mod);
return 0;
}

View File

@ -15,6 +15,7 @@
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
UNWIND_HINT_UNDEFINED
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
@ -90,6 +91,7 @@ SYM_CODE_END(relocate_new_kernel)
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
UNWIND_HINT_UNDEFINED
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
@ -106,6 +108,5 @@ SYM_CODE_END(kexec_smp_wait)
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
.section ".data"
SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)

View File

@ -76,6 +76,7 @@
.endm
SYM_CODE_START(arch_rethook_trampoline)
UNWIND_HINT_UNDEFINED
addi.d sp, sp, -PT_SIZE
save_all_base_regs

View File

@ -48,6 +48,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/unwind.h>
#include "legacy_boot.h"
#define SMBIOS_BIOSSIZE_OFFSET 0x09
@ -649,6 +650,7 @@ static void __init prefill_possible_map(void)
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
unwind_init();
init_environ();
efi_init();

View File

@ -29,6 +29,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
regs->regs[22] = 0;
}
for (unwind_start(&state, task, regs);
@ -39,6 +40,46 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
}
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
unsigned long addr;
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
struct unwind_state state;
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
regs->regs[22] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
/*
* A NULL or invalid return address probably means there's some
* generated code which __kernel_text_address() doesn't know about.
*/
if (!addr)
return -EINVAL;
if (!consume_entry(cookie, addr))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state))
return -EINVAL;
return 0;
}
static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{

View File

@ -53,6 +53,32 @@
#include "access-helper.h"
void *exception_table[EXCCODE_INT_START] = {
[0 ... EXCCODE_INT_START - 1] = handle_reserved,
[EXCCODE_TLBI] = handle_tlb_load,
[EXCCODE_TLBL] = handle_tlb_load,
[EXCCODE_TLBS] = handle_tlb_store,
[EXCCODE_TLBM] = handle_tlb_modify,
[EXCCODE_TLBNR] = handle_tlb_protect,
[EXCCODE_TLBNX] = handle_tlb_protect,
[EXCCODE_TLBPE] = handle_tlb_protect,
[EXCCODE_ADE] = handle_ade,
[EXCCODE_ALE] = handle_ale,
[EXCCODE_BCE] = handle_bce,
[EXCCODE_SYS] = handle_sys,
[EXCCODE_BP] = handle_bp,
[EXCCODE_INE] = handle_ri,
[EXCCODE_IPE] = handle_ri,
[EXCCODE_FPDIS] = handle_fpu,
[EXCCODE_LSXDIS] = handle_lsx,
[EXCCODE_LASXDIS] = handle_lasx,
[EXCCODE_FPE] = handle_fpe,
[EXCCODE_WATCH] = handle_watch,
[EXCCODE_BTDIS] = handle_lbt,
};
EXPORT_SYMBOL_GPL(exception_table);
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
@ -1150,19 +1176,9 @@ void __init trap_init(void)
for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
set_handler(i * VECSIZE, handle_vint, VECSIZE);
set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
/* Set exception vector handler */
for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
set_handler(i * VECSIZE, exception_table[i], VECSIZE);
cache_error_setup();

View File

@ -0,0 +1,528 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/objtool.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/exception.h>
#include <asm/orc_header.h>
#include <asm/orc_lookup.h>
#include <asm/orc_types.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/stacktrace.h>
#include <asm/tlb.h>
#include <asm/unwind.h>
ORC_HEADER;
#define orc_warn(fmt, ...) \
printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
extern int __start_orc_unwind_ip[];
extern int __stop_orc_unwind_ip[];
extern struct orc_entry __start_orc_unwind[];
extern struct orc_entry __stop_orc_unwind[];
static bool orc_init __ro_after_init;
static unsigned int lookup_num_blocks __ro_after_init;
/* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = {
.sp_reg = ORC_REG_FP,
.sp_offset = 16,
.fp_reg = ORC_REG_PREV_SP,
.fp_offset = -16,
.ra_reg = ORC_REG_PREV_SP,
.ra_offset = -8,
.type = ORC_TYPE_CALL
};
/*
* If we crash with IP==0, the last successfully executed instruction
* was probably an indirect function call with a NULL function pointer,
* and we don't have unwind information for NULL.
* This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
* pointer into its parent and then continue normally from there.
*/
static struct orc_entry orc_null_entry = {
.sp_reg = ORC_REG_SP,
.sp_offset = sizeof(long),
.fp_reg = ORC_REG_UNDEFINED,
.type = ORC_TYPE_CALL
};
static inline unsigned long orc_ip(const int *ip)
{
return (unsigned long)ip + *ip;
}
static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
unsigned int num_entries, unsigned long ip)
{
int *first = ip_table;
int *mid = first, *found = first;
int *last = ip_table + num_entries - 1;
if (!num_entries)
return NULL;
/*
* Do a binary range search to find the rightmost duplicate of a given
* starting address. Some entries are section terminators which are
* "weak" entries for ensuring there are no gaps. They should be
* ignored when they conflict with a real entry.
*/
while (first <= last) {
mid = first + ((last - first) / 2);
if (orc_ip(mid) <= ip) {
found = mid;
first = mid + 1;
} else
last = mid - 1;
}
return u_table + (found - ip_table);
}
#ifdef CONFIG_MODULES
static struct orc_entry *orc_module_find(unsigned long ip)
{
struct module *mod;
mod = __module_address(ip);
if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
return NULL;
return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip);
}
#else
static struct orc_entry *orc_module_find(unsigned long ip)
{
return NULL;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static struct orc_entry *orc_find(unsigned long ip);
/*
* Ftrace dynamic trampolines do not have orc entries of their own.
* But they are copies of the ftrace entries that are static and
* defined in ftrace_*.S, which do have orc entries.
*
* If the unwinder comes across a ftrace trampoline, then find the
* ftrace function that was used to create it, and use that ftrace
* function's orc entry, as the placement of the return code in
* the stack will be identical.
*/
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
unsigned long tramp_addr, offset;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
/* Set tramp_addr to the start of the code copied by the trampoline */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
tramp_addr = (unsigned long)ftrace_regs_caller;
else
tramp_addr = (unsigned long)ftrace_caller;
/* Now place tramp_addr to the location within the trampoline ip is at */
offset = ip - ops->trampoline;
tramp_addr += offset;
/* Prevent unlikely recursion */
if (ip == tramp_addr)
return NULL;
return orc_find(tramp_addr);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
return NULL;
}
#endif
static struct orc_entry *orc_find(unsigned long ip)
{
static struct orc_entry *orc;
if (ip == 0)
return &orc_null_entry;
/* For non-init vmlinux addresses, use the fast lookup table: */
if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
unsigned int idx, start, stop;
idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
if (unlikely((idx >= lookup_num_blocks-1))) {
orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
idx, lookup_num_blocks, (void *)ip);
return NULL;
}
start = orc_lookup[idx];
stop = orc_lookup[idx + 1] + 1;
if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
(__start_orc_unwind + stop > __stop_orc_unwind))) {
orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
idx, lookup_num_blocks, start, stop, (void *)ip);
return NULL;
}
return __orc_find(__start_orc_unwind_ip + start,
__start_orc_unwind + start, stop - start, ip);
}
/* vmlinux .init slow lookup: */
if (is_kernel_inittext(ip))
return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
/* Module lookup: */
orc = orc_module_find(ip);
if (orc)
return orc;
return orc_ftrace_find(ip);
}
#ifdef CONFIG_MODULES
static DEFINE_MUTEX(sort_mutex);
static int *cur_orc_ip_table = __start_orc_unwind_ip;
static struct orc_entry *cur_orc_table = __start_orc_unwind;
static void orc_sort_swap(void *_a, void *_b, int size)
{
int delta = _b - _a;
int *a = _a, *b = _b, tmp;
struct orc_entry *orc_a, *orc_b;
/* Swap the .orc_unwind_ip entries: */
tmp = *a;
*a = *b + delta;
*b = tmp - delta;
/* Swap the corresponding .orc_unwind entries: */
orc_a = cur_orc_table + (a - cur_orc_ip_table);
orc_b = cur_orc_table + (b - cur_orc_ip_table);
swap(*orc_a, *orc_b);
}
static int orc_sort_cmp(const void *_a, const void *_b)
{
const int *a = _a, *b = _b;
unsigned long a_val = orc_ip(a);
unsigned long b_val = orc_ip(b);
struct orc_entry *orc_a;
if (a_val > b_val)
return 1;
if (a_val < b_val)
return -1;
/*
* The "weak" section terminator entries need to always be first
* to ensure the lookup code skips them in favor of real entries.
* These terminator entries exist to handle any gaps created by
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}
void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
void *_orc, size_t orc_size)
{
int *orc_ip = _orc_ip;
struct orc_entry *orc = _orc;
unsigned int num_entries = orc_ip_size / sizeof(int);
WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(*orc) != 0 ||
num_entries != orc_size / sizeof(*orc));
/*
* The 'cur_orc_*' globals allow the orc_sort_swap() callback to
* associate an .orc_unwind_ip table entry with its corresponding
* .orc_unwind entry so they can both be swapped.
*/
mutex_lock(&sort_mutex);
cur_orc_ip_table = orc_ip;
cur_orc_table = orc;
sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
mutex_unlock(&sort_mutex);
mod->arch.orc_unwind_ip = orc_ip;
mod->arch.orc_unwind = orc;
mod->arch.num_orcs = num_entries;
}
#endif
void __init unwind_init(void)
{
int i;
size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
size_t num_entries = orc_ip_size / sizeof(int);
struct orc_entry *orc;
if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(struct orc_entry) != 0 ||
num_entries != orc_size / sizeof(struct orc_entry)) {
orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
return;
}
/*
* Note, the orc_unwind and orc_unwind_ip tables were already
* sorted at build time via the 'sorttable' tool.
* It's ready for binary search straight away, no need to sort it.
*/
/* Initialize the fast lookup table: */
lookup_num_blocks = orc_lookup_end - orc_lookup;
for (i = 0; i < lookup_num_blocks-1; i++) {
orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
if (!orc) {
orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
return;
}
orc_lookup[i] = orc - __start_orc_unwind;
}
/* Initialize the ending block: */
orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP);
if (!orc) {
orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
return;
}
orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
orc_init = true;
}
static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len)
{
unsigned long begin = info->begin;
unsigned long end = info->end;
return (info->type != STACK_TYPE_UNKNOWN &&
addr >= begin && addr < end && addr + len > begin && addr + len <= end);
}
static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len)
{
struct stack_info *info = &state->stack_info;
if (on_stack(info, addr, len))
return true;
return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len);
}
unsigned long unwind_get_return_address(struct unwind_state *state)
{
return __unwind_get_return_address(state);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
__unwind_start(state, task, regs);
state->type = UNWINDER_ORC;
if (!unwind_done(state) && !__kernel_text_address(state->pc))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_start);
static bool is_entry_func(unsigned long addr)
{
extern u32 kernel_entry;
extern u32 kernel_entry_end;
return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end;
}
static inline unsigned long bt_address(unsigned long ra)
{
extern unsigned long eentry;
if (__kernel_text_address(ra))
return ra;
if (__module_text_address(ra))
return ra;
if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
unsigned long func;
unsigned long type = (ra - eentry) / VECSIZE;
unsigned long offset = (ra - eentry) % VECSIZE;
switch (type) {
case 0 ... EXCCODE_INT_START - 1:
func = (unsigned long)exception_table[type];
break;
case EXCCODE_INT_START ... EXCCODE_INT_END:
func = (unsigned long)handle_vint;
break;
default:
func = (unsigned long)handle_reserved;
break;
}
return func + offset;
}
return ra;
}
bool unwind_next_frame(struct unwind_state *state)
{
unsigned long *p, pc;
struct pt_regs *regs;
struct orc_entry *orc;
struct stack_info *info = &state->stack_info;
if (unwind_done(state))
return false;
/* Don't let modules unload while we're reading their ORC data. */
preempt_disable();
if (is_entry_func(state->pc))
goto end;
orc = orc_find(state->pc);
if (!orc) {
/*
* As a fallback, try to assume this code uses a frame pointer.
* This is useful for generated code, like BPF, which ORC
* doesn't know about. This is just a guess, so the rest of
* the unwind is no longer considered reliable.
*/
orc = &orc_fp_entry;
state->error = true;
} else {
if (orc->type == ORC_TYPE_UNDEFINED)
goto err;
if (orc->type == ORC_TYPE_END_OF_STACK)
goto end;
}
switch (orc->sp_reg) {
case ORC_REG_SP:
if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
orc->type = ORC_TYPE_REGS;
else
state->sp = state->sp + orc->sp_offset;
break;
case ORC_REG_FP:
state->sp = state->fp;
break;
default:
orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc);
goto err;
}
switch (orc->fp_reg) {
case ORC_REG_PREV_SP:
p = (unsigned long *)(state->sp + orc->fp_offset);
if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
goto err;
state->fp = *p;
break;
case ORC_REG_UNDEFINED:
/* Nothing. */
break;
default:
orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc);
goto err;
}
switch (orc->type) {
case ORC_TYPE_CALL:
if (orc->ra_reg == ORC_REG_PREV_SP) {
p = (unsigned long *)(state->sp + orc->ra_offset);
if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
goto err;
pc = unwind_graph_addr(state, *p, state->sp);
pc -= LOONGARCH_INSN_SIZE;
} else if (orc->ra_reg == ORC_REG_UNDEFINED) {
if (!state->ra || state->ra == state->pc)
goto err;
pc = unwind_graph_addr(state, state->ra, state->sp);
pc -= LOONGARCH_INSN_SIZE;
state->ra = 0;
} else {
orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc);
goto err;
}
break;
case ORC_TYPE_REGS:
if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
regs = (struct pt_regs *)info->next_sp;
else
regs = (struct pt_regs *)state->sp;
if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs)))
goto err;
if ((info->end == (unsigned long)regs + sizeof(*regs)) &&
!regs->regs[3] && !regs->regs[1])
goto end;
if (user_mode(regs))
goto end;
pc = regs->csr_era;
if (!__kernel_text_address(pc))
goto err;
state->sp = regs->regs[3];
state->ra = regs->regs[1];
state->fp = regs->regs[22];
get_stack_info(state->sp, state->task, info);
break;
default:
orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc);
goto err;
}
state->pc = bt_address(pc);
if (!state->pc) {
pr_err("cannot find unwind pc at %pK\n", (void *)pc);
goto err;
}
if (!__kernel_text_address(state->pc))
goto err;
preempt_enable();
return true;
err:
state->error = true;
end:
preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);

View File

@ -2,6 +2,7 @@
#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/orc_lookup.h>
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
@ -123,6 +124,8 @@ SECTIONS
}
#endif
ORC_UNWIND_TABLE
.sdata : {
*(.sdata)
}

View File

@ -8,7 +8,7 @@
#include <asm/asmmacro.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/unwind_hints.h>
#define HGPR_OFFSET(x) (PT_R0 + 8*x)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
@ -112,6 +112,7 @@
.text
.cfi_sections .debug_frame
SYM_CODE_START(kvm_exc_entry)
UNWIND_HINT_UNDEFINED
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
addi.d a2, a2, KVM_VCPU_ARCH
@ -273,3 +274,13 @@ SYM_FUNC_END(kvm_restore_lasx)
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu
#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD kvm_restore_lsx
#endif
#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD kvm_restore_lasx
#endif
#endif

View File

@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
/*
@ -204,3 +205,5 @@ SYM_FUNC_START(__clear_user_fast)
_asm_extable 28b, .Lsmall_fixup
_asm_extable 29b, .Lexit
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast

View File

@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
/*
@ -278,3 +279,5 @@ SYM_FUNC_START(__copy_user_fast)
_asm_extable 58b, .Lexit
_asm_extable 59b, .Lexit
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast

View File

@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
.section .noinstr.text, "ax"
@ -197,3 +198,5 @@ SYM_FUNC_START(__memcpy_fast)
jr ra
SYM_FUNC_END(__memcpy_fast)
_ASM_NOKPROBE(__memcpy_fast)
STACK_FRAME_NON_STANDARD __memcpy_small

View File

@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
@ -166,3 +167,5 @@ SYM_FUNC_START(__memset_fast)
jr ra
SYM_FUNC_END(__memset_fast)
_ASM_NOKPROBE(__memset_fast)
STACK_FRAME_NON_STANDARD __memset_fast

View File

@ -9,8 +9,9 @@
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@ -266,24 +267,20 @@ static void setup_tlb_handler(int cpu)
setup_ptwalker();
local_flush_tlb_all();
if (cpu_has_ptw) {
exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw;
exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw;
exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw;
exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw;
}
/* The tlb handlers are generated only once */
if (cpu == 0) {
memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
if (!cpu_has_ptw) {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
} else {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
}
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++)
set_handler(i * VECSIZE, exception_table[i], VECSIZE);
} else {
int vec_sz __maybe_unused;
void *addr __maybe_unused;

View File

@ -18,6 +18,7 @@
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
UNWIND_HINT_UNDEFINED
SAVE_ALL
csrrd a2, LOONGARCH_CSR_BADV
move a0, sp
@ -32,6 +33,7 @@
tlb_do_page_fault 1
SYM_CODE_START(handle_tlb_protect)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
move a0, sp
@ -44,6 +46,7 @@ SYM_CODE_START(handle_tlb_protect)
SYM_CODE_END(handle_tlb_protect)
SYM_CODE_START(handle_tlb_load)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -190,6 +193,7 @@ nopage_tlb_load:
SYM_CODE_END(handle_tlb_load)
SYM_CODE_START(handle_tlb_load_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_0
@ -197,6 +201,7 @@ SYM_CODE_START(handle_tlb_load_ptw)
SYM_CODE_END(handle_tlb_load_ptw)
SYM_CODE_START(handle_tlb_store)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -346,6 +351,7 @@ nopage_tlb_store:
SYM_CODE_END(handle_tlb_store)
SYM_CODE_START(handle_tlb_store_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@ -353,6 +359,7 @@ SYM_CODE_START(handle_tlb_store_ptw)
SYM_CODE_END(handle_tlb_store_ptw)
SYM_CODE_START(handle_tlb_modify)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -500,6 +507,7 @@ nopage_tlb_modify:
SYM_CODE_END(handle_tlb_modify)
SYM_CODE_START(handle_tlb_modify_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@ -507,6 +515,7 @@ SYM_CODE_START(handle_tlb_modify_ptw)
SYM_CODE_END(handle_tlb_modify_ptw)
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3

View File

@ -4,6 +4,7 @@
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
OBJECT_FILES_NON_STANDARD := y
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile

View File

@ -5249,13 +5249,22 @@ static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64
struct kvm_vpsp vpsp = {
.kvm = kvm,
.write_guest = kvm_write_guest,
.read_guest = kvm_read_guest
.read_guest = kvm_read_guest,
.gfn_to_pfn = gfn_to_pfn,
};
switch (nr) {
case KVM_HC_PSP_OP:
ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3);
break;
if (sev_guest(kvm)) {
vpsp.vm_handle = to_kvm_svm(kvm)->sev_info.handle;
vpsp.is_csv_guest = 1;
}
switch (nr) {
case KVM_HC_PSP_COPY_FORWARD_OP:
ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2);
break;
case KVM_HC_PSP_FORWARD_OP:
ret = kvm_pv_psp_forward_op(&vpsp, a0, a1, a2);
break;
default:
ret = -KVM_ENOSYS;
break;

View File

@ -9879,7 +9879,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
}
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 &&
!(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) {
!(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION
|| nr == KVM_HC_PSP_OP_OBSOLETE
|| nr == KVM_HC_PSP_COPY_FORWARD_OP
|| nr == KVM_HC_PSP_FORWARD_OP))) {
ret = -KVM_EPERM;
goto out;
}
@ -9916,7 +9919,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
kvm_sched_yield(vcpu, a0);
ret = 0;
break;
case KVM_HC_PSP_OP:
case KVM_HC_PSP_OP_OBSOLETE:
case KVM_HC_PSP_COPY_FORWARD_OP:
case KVM_HC_PSP_FORWARD_OP:
ret = -KVM_ENOSYS;
if (kvm_arch_hypercall)
ret = kvm_arch_hypercall(vcpu->kvm, nr, a0, a1, a2, a3);

View File

@ -14,6 +14,7 @@
#include <linux/psp.h>
#include <linux/psp-hygon.h>
#include <uapi/linux/psp-hygon.h>
#include <linux/bitfield.h>
#include <asm/csv.h>
@ -760,12 +761,12 @@ static int vpsp_dequeue_cmd(int prio, int index,
* Populate the command from the virtual machine to the queue to
* support execution in ringbuffer mode
*/
static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags)
static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags)
{
struct csv_cmdptr_entry cmdptr = { };
int index = -1;
cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid);
cmdptr.cmd_buf_ptr = phy_addr;
cmdptr.cmd_id = cmd;
cmdptr.cmd_flags = flags;
@ -1065,12 +1066,91 @@ end:
return rb_supported;
}
int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret);
static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
if (*hygon_psp_hooks.psp_dead)
return -EBUSY;
sev = psp->sev_data;
/* Get the physical address of the command buffer */
phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0;
phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout);
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
sev->int_rcvd = 0;
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, &reg, *hygon_psp_hooks.psp_timeout);
if (ret) {
if (psp_ret)
*psp_ret = 0;
dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
*hygon_psp_hooks.psp_dead = true;
return ret;
}
*hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout;
if (psp_ret)
*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
ret = -EIO;
}
return ret;
}
int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret)
{
int rc;
int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled);
if (is_vendor_hygon() && mutex_enabled) {
if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex,
PSP_MUTEX_TIMEOUT) != 1) {
return -EBUSY;
}
} else {
mutex_lock(hygon_psp_hooks.sev_cmd_mutex);
}
rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret);
if (is_vendor_hygon() && mutex_enabled)
psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex);
else
mutex_unlock(hygon_psp_hooks.sev_cmd_mutex);
return rc;
}
/*
* Try to obtain the result again by the command index, this
* interface is used in ringbuffer mode
*/
int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data,
int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr,
struct vpsp_ret *psp_ret)
{
int ret = 0;
@ -1093,8 +1173,7 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data,
/* dequeue command from queue*/
vpsp_dequeue_cmd(prio, index, &cmd);
ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data,
(int *)psp_ret);
ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret);
psp_ret->status = VPSP_FINISH;
vpsp_psp_mutex_unlock();
if (unlikely(ret)) {
@ -1137,7 +1216,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result);
* vpsp_try_get_result interface will be used to obtain the result
* later again
*/
int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret)
{
int ret = 0;
int rb_supported;
@ -1152,10 +1231,10 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
(struct vpsp_cmd *)&cmd);
if (rb_supported) {
/* fill command in ringbuffer's queue and get index */
index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0);
index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0);
if (unlikely(index < 0)) {
/* do mailbox command if queuing failed*/
ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret);
ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret);
if (unlikely(ret)) {
if (ret == -EIO) {
ret = 0;
@ -1171,14 +1250,14 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
}
/* try to get result from the ringbuffer command */
ret = vpsp_try_get_result(vid, prio, index, data, psp_ret);
ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret);
if (unlikely(ret)) {
pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret);
goto end;
}
} else {
/* mailbox mode */
ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret);
ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret);
if (unlikely(ret)) {
if (ret == -EIO) {
ret = 0;

View File

@ -30,6 +30,8 @@ enum HYGON_PSP_OPCODE {
HYGON_PSP_MUTEX_ENABLE = 1,
HYGON_PSP_MUTEX_DISABLE,
HYGON_VPSP_CTRL_OPT,
HYGON_PSP_OP_PIN_USER_PAGE,
HYGON_PSP_OP_UNPIN_USER_PAGE,
HYGON_PSP_OPCODE_MAX_NR,
};
@ -38,16 +40,26 @@ enum VPSP_DEV_CTRL_OPCODE {
VPSP_OP_VID_DEL,
VPSP_OP_SET_DEFAULT_VID_PERMISSION,
VPSP_OP_GET_DEFAULT_VID_PERMISSION,
VPSP_OP_SET_GPA,
};
struct vpsp_dev_ctrl {
unsigned char op;
/**
* To be compatible with old user mode,
* struct vpsp_dev_ctrl must be kept at 132 bytes.
*/
unsigned char resv[3];
union {
unsigned int vid;
// Set or check the permissions for the default VID
unsigned int def_vid_perm;
struct {
u64 gpa_start;
u64 gpa_end;
} gpa;
unsigned char reserved[128];
} data;
} __packed data;
};
uint64_t atomic64_exchange(volatile uint64_t *dst, uint64_t val)
@ -169,19 +181,15 @@ DEFINE_RWLOCK(vpsp_rwlock);
#define VPSP_VID_MAX_ENTRIES 2048
#define VPSP_VID_NUM_MAX 64
struct vpsp_vid_entry {
uint32_t vid;
pid_t pid;
};
static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES];
static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES];
static uint32_t g_vpsp_vid_num;
static int compare_vid_entries(const void *a, const void *b)
{
return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid;
return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid;
}
static void swap_vid_entries(void *a, void *b, int size)
{
struct vpsp_vid_entry entry;
struct vpsp_context entry;
memcpy(&entry, a, size);
memcpy(a, b, size);
@ -206,43 +214,41 @@ int vpsp_get_default_vid_permission(void)
EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission);
/**
* When the virtual machine executes the 'tkm' command,
* it needs to retrieve the corresponding 'vid'
* by performing a binary search using 'kvm->userspace_pid'.
* get a vpsp context from pid
*/
int vpsp_get_vid(uint32_t *vid, pid_t pid)
int vpsp_get_context(struct vpsp_context **ctx, pid_t pid)
{
struct vpsp_vid_entry new_entry = {.pid = pid};
struct vpsp_vid_entry *existing_entry = NULL;
struct vpsp_context new_entry = {.pid = pid};
struct vpsp_context *existing_entry = NULL;
read_lock(&vpsp_rwlock);
existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num,
sizeof(struct vpsp_vid_entry), compare_vid_entries);
existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num,
sizeof(struct vpsp_context), compare_vid_entries);
read_unlock(&vpsp_rwlock);
if (!existing_entry)
return -ENOENT;
if (vid) {
*vid = existing_entry->vid;
pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid);
}
if (ctx)
*ctx = existing_entry;
return 0;
}
EXPORT_SYMBOL_GPL(vpsp_get_vid);
EXPORT_SYMBOL_GPL(vpsp_get_context);
/**
* Upon qemu startup, this section checks whether
* the '-device psp,vid' parameter is specified.
* If set, it utilizes the 'vpsp_add_vid' function
* to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'.
* to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'.
* The insertion is done in ascending order of 'pid'.
*/
static int vpsp_add_vid(uint32_t vid)
{
pid_t cur_pid = task_pid_nr(current);
struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid};
struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid};
if (vpsp_get_vid(NULL, cur_pid) == 0)
if (vpsp_get_context(NULL, cur_pid) == 0)
return -EEXIST;
if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES)
return -ENOMEM;
@ -250,8 +256,8 @@ static int vpsp_add_vid(uint32_t vid)
return -EINVAL;
write_lock(&vpsp_rwlock);
memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry));
sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry),
memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context));
sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context),
compare_vid_entries, swap_vid_entries);
pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num);
write_unlock(&vpsp_rwlock);
@ -270,12 +276,12 @@ static int vpsp_del_vid(void)
write_lock(&vpsp_rwlock);
for (i = 0; i < g_vpsp_vid_num; ++i) {
if (g_vpsp_vid_array[i].pid == cur_pid) {
if (g_vpsp_context_array[i].pid == cur_pid) {
--g_vpsp_vid_num;
pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n",
g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num);
memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1],
sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i));
g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num);
memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1],
sizeof(struct vpsp_context) * (g_vpsp_vid_num - i));
ret = 0;
goto end;
}
@ -286,6 +292,85 @@ end:
return ret;
}
static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end)
{
pid_t cur_pid = task_pid_nr(current);
struct vpsp_context *ctx = NULL;
vpsp_get_context(&ctx, cur_pid);
if (!ctx) {
pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid);
return -ENOENT;
}
ctx->gpa_start = gpa_start;
ctx->gpa_end = gpa_end;
pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n",
gpa_start, gpa_end, cur_pid);
return 0;
}
/**
* Try to pin a page
*
* @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE
*/
static int psp_pin_user_page(u64 vaddr)
{
struct page *page;
long npinned = 0;
int ref_count = 0;
// check must be aligned to PAGE_SIZE
if (vaddr & (PAGE_SIZE - 1)) {
pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE);
return -EFAULT;
}
npinned = pin_user_pages_fast(vaddr, 1, FOLL_WRITE, &page);
if (npinned != 1) {
pr_err("PSP: pin_user_pages_fast fail\n");
return -ENOMEM;
}
ref_count = page_ref_count(page);
pr_debug("pin user page with address %llx, page ref_count %d\n", vaddr, ref_count);
return 0;
}
/**
* Try to unpin a page
*
* @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE
*/
static int psp_unpin_user_page(u64 vaddr)
{
struct page *page;
long npinned = 0;
int ref_count = 0;
// check must be aligned to PAGE_SIZE
if (vaddr & (PAGE_SIZE - 1)) {
pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE);
return -EFAULT;
}
// page reference count increment by 1
npinned = get_user_pages_fast(vaddr, 1, FOLL_WRITE, &page);
if (npinned != 1) {
pr_err("PSP: pin_user_pages_fast fail\n");
return -ENOMEM;
}
// page reference count decrement by 2
put_page(page);
put_page(page);
ref_count = page_ref_count(page);
pr_debug("unpin user page with address %llx, page ref_count %d\n", vaddr, ref_count);
return 0;
}
static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl)
{
int ret = 0;
@ -308,6 +393,10 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl)
ctrl->data.def_vid_perm = vpsp_get_default_vid_permission();
break;
case VPSP_OP_SET_GPA:
ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end);
break;
default:
ret = -EINVAL;
break;
@ -364,6 +453,14 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg)
return -EFAULT;
break;
case HYGON_PSP_OP_PIN_USER_PAGE:
ret = psp_pin_user_page((u64)arg);
break;
case HYGON_PSP_OP_UNPIN_USER_PAGE:
ret = psp_unpin_user_page((u64)arg);
break;
default:
printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode);
return -EINVAL;
@ -507,100 +604,6 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret)
return ret;
}
int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
phys_addr_t phys_addr;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed)
return -ENODEV;
if (*hygon_psp_hooks.psp_dead)
return -EBUSY;
sev = psp->sev_data;
if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
return -EINVAL;
/* Get the physical address of the command buffer */
phys_addr = PUT_PSP_VID(__psp_pa(data), vid);
phys_lsb = data ? lower_32_bits(phys_addr) : 0;
phys_msb = data ? upper_32_bits(phys_addr) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
hygon_psp_hooks.sev_cmd_buffer_len(cmd), false);
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
sev->int_rcvd = 0;
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, &reg, *hygon_psp_hooks.psp_timeout);
if (ret) {
if (psp_ret)
*psp_ret = 0;
dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
*hygon_psp_hooks.psp_dead = true;
return ret;
}
*hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout;
if (psp_ret)
*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
ret = -EIO;
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
hygon_psp_hooks.sev_cmd_buffer_len(cmd), false);
return ret;
}
int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret)
{
int rc;
int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled);
if (!hygon_psp_hooks.sev_dev_hooks_installed)
return -ENODEV;
if (mutex_enabled) {
if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex,
PSP_MUTEX_TIMEOUT) != 1) {
return -EBUSY;
}
} else {
mutex_lock(hygon_psp_hooks.sev_cmd_mutex);
}
rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret);
if (is_vendor_hygon() && mutex_enabled)
psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex);
else
mutex_unlock(hygon_psp_hooks.sev_cmd_mutex);
return rc;
}
int psp_do_cmd(int cmd, void *data, int *psp_ret)
{
int rc;

View File

@ -13,399 +13,103 @@
#include <linux/psp-sev.h>
#include <linux/psp.h>
#include <linux/psp-hygon.h>
#include <asm/cpuid.h>
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "vpsp: " fmt
#define VTKM_VM_BIND 0x904
/*
* The file mainly implements the base execution
* logic of virtual PSP in kernel mode, which mainly includes:
* (1) Obtain the VM command and preprocess the pointer
* mapping table information in the command buffer
* (2) The command that has been converted will interact
* with the channel of the psp through the driver and
* try to obtain the execution result
* (3) The executed command data is recovered according to
* the multilevel pointer of the mapping table, and then returned to the VM
* The file mainly implements the base execution logic of virtual PSP in kernel mode,
* which mainly includes:
* (1) Preprocess the guest data in the host kernel
* (2) The command that has been converted will interact with the channel of the
* psp through the driver and try to obtain the execution result
* (3) The executed command data is recovered, and then returned to the VM
*
* The primary implementation logic of virtual PSP in kernel mode
* call trace:
* guest command(vmmcall)
* |
* | |-> kvm_pv_psp_cmd_pre_op
* | |
* | | -> guest_addr_map_table_op
* | |
* | | -> guest_multiple_level_gpa_replace
* guest command(vmmcall, KVM_HC_PSP_COPY_FORWARD_OP)
* |
* kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver
* |
* |
* |-> kvm_pv_psp_cmd_post_op
* kvm_pv_psp_copy_op----> | -> kvm_pv_psp_cmd_pre_op
* |
* | -> guest_addr_map_table_op
* |
* | -> guest_multiple_level_gpa_restore
* | -> vpsp_try_do_cmd/vpsp_try_get_result
* | |<=> psp device driver
* |
* |
* |-> kvm_pv_psp_cmd_post_op
*
* guest command(vmmcall, KVM_HC_PSP_FORWARD_OP)
* |
* kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result
* |<=> psp device driver
*/
#define TKM_CMD_ID_MIN 0x120
#define TKM_CMD_ID_MAX 0x12f
struct psp_cmdresp_head {
uint32_t buf_size;
uint32_t cmdresp_size;
uint32_t cmdresp_code;
} __packed;
/**
* struct map_tbl - multilevel pointer address mapping table
*
* @parent_pa: parent address block's physics address
* @offset: offset in parent address block
* @size: submemory size
* @align: submemory align size, hva need to keep size alignment in kernel
* @hva: submemory copy block in kernel virtual address
*/
struct map_tbl {
uint64_t parent_pa;
uint32_t offset;
uint32_t size;
uint32_t align;
uint64_t hva;
} __packed;
struct addr_map_tbls {
uint32_t tbl_nums;
struct map_tbl tbl[];
} __packed;
/* gpa and hva conversion maintenance table for internal use */
struct gpa2hva_t {
void *hva;
gpa_t gpa;
};
struct gpa2hva_tbls {
uint32_t max_nums;
uint32_t tbl_nums;
struct gpa2hva_t tbl[];
};
/* save command data for restoring later */
struct vpsp_hbuf_wrapper {
void *data;
uint32_t data_size;
struct addr_map_tbls *map_tbls;
struct gpa2hva_tbls *g2h_tbls;
};
/* Virtual PSP host memory information maintenance, used in ringbuffer mode */
struct vpsp_hbuf_wrapper
g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0};
void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls)
static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size)
{
int i;
pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums);
for (i = 0; i < tbls->tbl_nums; i++) {
pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx",
i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset,
tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva);
}
pr_info("\n");
}
void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls)
{
int i;
pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums,
tbls->max_nums);
for (i = 0; i < tbls->tbl_nums; i++)
pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i,
(uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa);
pr_info("\n");
}
static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa)
{
uint32_t fill_idx = tbls->tbl_nums;
if (fill_idx >= tbls->max_nums)
if (!vpsp_ctx || !addr)
return -EFAULT;
tbls->tbl[fill_idx].hva = hva;
tbls->tbl[fill_idx].gpa = gpa;
tbls->tbl_nums = fill_idx + 1;
if (addr >= vpsp_ctx->gpa_start && (addr + size) <= vpsp_ctx->gpa_end)
return 0;
return -EFAULT;
}
static int check_psp_mem_range(struct vpsp_context *vpsp_ctx,
void *data, uint32_t size)
{
if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) !=
((uintptr_t)data & ~PSP_2MB_MASK)) {
pr_err("data %llx, size %d crossing 2MB\n", (u64)data, size);
return -EFAULT;
}
if (vpsp_ctx)
return check_gpa_range(vpsp_ctx, (gpa_t)data, size);
return 0;
}
static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva)
{
int i;
for (i = 0; i < g2h->tbl_nums; i++) {
if (g2h->tbl[i].hva == hva)
g2h->tbl[i].hva = NULL;
}
}
static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa)
{
int i;
for (i = 0; i < g2h->tbl_nums; i++) {
if (g2h->tbl[i].gpa == gpa)
return (void *)g2h->tbl[i].hva;
}
return NULL;
}
static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva)
{
int i;
for (i = 0; i < g2h->tbl_nums; i++) {
if (g2h->tbl[i].hva == hva)
return g2h->tbl[i].gpa;
}
return 0;
}
/*
* The virtual machine multilevel pointer command buffer handles the
* execution entity, synchronizes the data in the original gpa to the
* newly allocated hva(host virtual address) and updates the mapping
* relationship in the parent memory
*/
static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp,
struct map_tbl *tbl, struct gpa2hva_tbls *g2h)
{
int ret = 0;
uint32_t sub_block_size;
uint64_t sub_paddr;
void *parent_kva = NULL;
/* kmalloc memory for child block */
sub_block_size = max(tbl->size, tbl->align);
tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL);
if (!tbl->hva)
return -ENOMEM;
/* get child gpa from parent gpa */
if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset,
&sub_paddr, sizeof(sub_paddr)))) {
pr_err("[%s]: kvm_read_guest for parent gpa failed\n",
__func__);
ret = -EFAULT;
goto e_free;
}
/* copy child block data from gpa to hva */
if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva,
tbl->size))) {
pr_err("[%s]: kvm_read_guest for sub_data failed\n",
__func__);
ret = -EFAULT;
goto e_free;
}
/* get hva from gpa */
parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa);
if (unlikely(!parent_kva)) {
pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n",
__func__);
ret = -EFAULT;
goto e_free;
}
/* replace pa of hva from gpa */
*(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva);
/* fill in gpa and hva to map table for restoring later */
if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) {
pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n",
__func__);
ret = -EFAULT;
goto e_free;
}
return ret;
e_free:
kfree((const void *)tbl->hva);
return ret;
}
/* The virtual machine multi-level pointer command memory handles the
* execution entity, synchronizes the data in the hva(host virtual
* address) back to the memory corresponding to the gpa, and restores
* the mapping relationship in the original parent memory
*/
static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp,
struct map_tbl *tbl, struct gpa2hva_tbls *g2h)
{
int ret = 0;
gpa_t sub_gpa;
void *parent_hva = NULL;
/* get gpa from hva */
sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva);
if (unlikely(!sub_gpa)) {
pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n",
__func__);
ret = -EFAULT;
goto end;
}
/* copy child block data from hva to gpa */
if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva,
tbl->size))) {
pr_err("[%s]: kvm_write_guest for sub_gpa failed\n",
__func__);
ret = -EFAULT;
goto end;
}
/* get parent hva from parent gpa */
parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa);
if (unlikely(!parent_hva)) {
pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n",
__func__);
ret = -EFAULT;
goto end;
}
/* restore gpa from pa of hva in parent block */
*(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa;
/* free child block memory */
clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva);
kfree((const void *)tbl->hva);
tbl->hva = 0;
end:
return ret;
}
/*
* The virtual machine multilevel pointer command memory processing
* executes upper-layer abstract interfaces, including replacing and
* restoring two sub-processing functions
*/
static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h,
struct addr_map_tbls *map_tbls, int op)
{
int ret = 0;
int i;
uint64_t *sub_paddr_ptr;
if (op) {
for (i = map_tbls->tbl_nums - 1; i >= 0; i--) {
/* check if the gpa of root points to itself */
if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) {
sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva
+ map_tbls->tbl[i].offset);
/* if the child paddr is equal to the parent paddr */
if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) {
*sub_paddr_ptr = g2h->tbl[0].gpa;
continue;
}
}
/* restore new pa of kva with the gpa from guest */
if (unlikely(guest_multiple_level_gpa_restore(vpsp,
&map_tbls->tbl[i], g2h))) {
pr_err("[%s]: guest_multiple_level_gpa_restore failed\n",
__func__);
ret = -EFAULT;
goto end;
}
}
} else {
for (i = 0; i < map_tbls->tbl_nums; i++) {
/* check if the gpa of root points to itself */
if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) {
sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva
+ map_tbls->tbl[i].offset);
/* if the child paddr is equal to the parent paddr */
if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) {
*sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva);
map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva;
continue;
}
}
/* check if parent_pa is valid */
if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) {
pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n",
__func__, i, map_tbls->tbl[i].parent_pa);
ret = -EFAULT;
goto end;
}
/* replace the gpa from guest with the new pa of kva */
if (unlikely(guest_multiple_level_gpa_replace(vpsp,
&map_tbls->tbl[i], g2h))) {
pr_err("[%s]: guest_multiple_level_gpa_replace failed\n",
__func__);
ret = -EFAULT;
goto end;
}
}
}
end:
return ret;
}
static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls
*map_tbl, void *data)
{
int i;
if (g2h) {
for (i = 0; i < g2h->tbl_nums; i++) {
if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) {
kfree(g2h->tbl[i].hva);
g2h->tbl[i].hva = NULL;
}
}
kfree(g2h);
}
kfree(map_tbl);
kfree(data);
}
/*
* Obtain the VM command and preprocess the pointer mapping table
* information in the command buffer, the processed data will be
* used to interact with the psp device
/**
* Copy the guest data to the host kernel buffer
* and record the host buffer address in 'hbuf'.
* This 'hbuf' is used to restore context information
* during asynchronous processing.
*/
static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf)
struct vpsp_hbuf_wrapper *hbuf)
{
int ret = 0;
void *data = NULL;
struct psp_cmdresp_head psp_head;
uint32_t data_size;
struct addr_map_tbls map_head, *map_tbls = NULL;
uint32_t map_tbl_size;
struct gpa2hva_tbls *g2h = NULL;
uint32_t g2h_tbl_size;
if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head,
sizeof(struct psp_cmdresp_head))))
return -EFAULT;
data_size = psp_head.buf_size;
if (check_psp_mem_range(NULL, (void *)data_gpa, data_size))
return -EFAULT;
data = kzalloc(data_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@ -415,87 +119,18 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
goto end;
}
if (table_gpa) {
/* parse address map table from guest */
if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head,
sizeof(struct addr_map_tbls)))) {
pr_err("[%s]: kvm_read_guest for map_head failed\n",
__func__);
ret = -EFAULT;
goto end;
}
map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums
* sizeof(struct map_tbl);
map_tbls = kzalloc(map_tbl_size, GFP_KERNEL);
if (!map_tbls) {
ret = -ENOMEM;
goto end;
}
if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls,
map_tbl_size))) {
pr_err("[%s]: kvm_read_guest for map_tbls failed\n",
__func__);
ret = -EFAULT;
goto end;
}
/* init for gpa2hva table*/
g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums
+ 1) * sizeof(struct gpa2hva_t);
g2h = kzalloc(g2h_tbl_size, GFP_KERNEL);
if (!g2h) {
ret = -ENOMEM;
goto end;
}
g2h->max_nums = map_head.tbl_nums + 1;
/* fill the root parent address */
if (gpa2hva_tbl_fill(g2h, data, data_gpa)) {
pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n",
__func__);
ret = -EFAULT;
goto end;
}
if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) {
pr_err("[%s]: guest_addr_map_table_op for replacing failed\n",
__func__);
ret = -EFAULT;
goto end;
}
}
hbuf->data = data;
hbuf->data_size = data_size;
hbuf->map_tbls = map_tbls;
hbuf->g2h_tbls = g2h;
end:
return ret;
}
/*
* The executed command data is recovered according to the multilevel
* pointer of the mapping table when the command has finished
* interacting with the psp device
*/
static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
struct vpsp_hbuf_wrapper *hbuf)
struct vpsp_hbuf_wrapper *hbuf)
{
int ret = 0;
if (hbuf->map_tbls) {
if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls,
hbuf->map_tbls, 1)) {
pr_err("[%s]: guest_addr_map_table_op for restoring failed\n",
__func__);
ret = -EFAULT;
goto end;
}
}
/* restore cmdresp's buffer from context */
if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data,
hbuf->data_size))) {
@ -504,12 +139,9 @@ static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
ret = -EFAULT;
goto end;
}
end:
/* release memory and clear hbuf */
kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data);
kfree(hbuf->data);
memset(hbuf, 0, sizeof(*hbuf));
return ret;
}
@ -520,38 +152,325 @@ static int cmd_type_is_tkm(int cmd)
return 0;
}
/*
* The primary implementation interface of virtual PSP in kernel mode
static int cmd_type_is_allowed(int cmd)
{
if (cmd >= TKM_PSP_CMDID_OFFSET && cmd <= TKM_CMD_ID_MAX)
return 1;
return 0;
}
struct psp_cmdresp_vtkm_vm_bind {
struct psp_cmdresp_head head;
uint16_t vid;
uint32_t vm_handle;
uint8_t reserved[46];
} __packed;
static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint32_t *pret)
{
int ret = 0;
struct psp_cmdresp_vtkm_vm_bind *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->head.buf_size = sizeof(*data);
data->head.cmdresp_size = sizeof(*data);
data->head.cmdresp_code = VTKM_VM_BIND;
data->vid = vid;
data->vm_handle = vm_handle;
ret = psp_do_cmd(cmd_id, data, pret);
if (ret == -EIO)
ret = 0;
kfree(data);
return ret;
}
static unsigned long vpsp_get_me_mask(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned long me_mask;
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV or SME is supported */
if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return 0;
me_mask = 1UL << (ebx & 0x3f);
return me_mask;
}
static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa)
{
phys_addr_t hpa = 0;
unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT);
unsigned long me_mask = sme_get_me_mask();
struct page *page;
if (me_mask == 0 && vpsp->is_csv_guest)
me_mask = vpsp_get_me_mask();
if (!is_error_pfn(pfn))
hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask;
else {
pr_err("[%s] pfn: %lx is invalid, gpa %lx",
__func__, pfn, data_gpa);
return 0;
}
/*
* Using gfn_to_pfn causes the refcount to increment
* atomically by one, which needs to be released.
*/
page = pfn_to_page(pfn);
if (PageCompound(page))
page = compound_head(page);
put_page(page);
pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa);
return hpa;
}
static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx,
uint64_t data, uint32_t cmd)
{
int ret;
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
struct psp_cmdresp_head psp_head;
if (!cmd_type_is_allowed(vcmd->cmd_id)) {
pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id);
return -EINVAL;
}
if (vpsp->is_csv_guest) {
/**
* If the gpa address range exists,
* it means there must be a legal vid
*/
if (!vpsp_ctx || !vpsp_ctx->gpa_start || !vpsp_ctx->gpa_end) {
pr_err("[%s]: No set gpa range or vid in csv guest\n", __func__);
return -EPERM;
}
ret = check_psp_mem_range(vpsp_ctx, (void *)data, 0);
if (ret)
return -EFAULT;
} else {
if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id)
&& !vpsp_get_default_vid_permission()) {
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
return -EPERM;
}
// the 'data' is gpa address
if (unlikely(vpsp->read_guest(vpsp->kvm, data, &psp_head,
sizeof(struct psp_cmdresp_head))))
return -EFAULT;
ret = check_psp_mem_range(vpsp_ctx, (void *)data, psp_head.buf_size);
if (ret)
return -EFAULT;
}
return 0;
}
static int
check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp,
struct vpsp_context *vpsp_ctx,
uint64_t data, uint32_t cmd)
{
int ret = 0;
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
if (!cmd_type_is_allowed(vcmd->cmd_id)) {
pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id);
return -EINVAL;
}
if (vpsp->is_csv_guest) {
pr_err("[%s]: unsupported run on csv guest\n", __func__);
ret = -EPERM;
} else {
if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id)
&& !vpsp_get_default_vid_permission()) {
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
ret = -EPERM;
}
}
return ret;
}
static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx,
uint32_t cmd, uint32_t *psp_ret)
{
int ret;
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
if (vpsp_ctx && !vpsp_ctx->vm_is_bound && vpsp->is_csv_guest) {
ret = kvm_bind_vtkm(vpsp->vm_handle, vcmd->cmd_id,
vpsp_ctx->vid, psp_ret);
if (ret || *psp_ret) {
pr_err("[%s] kvm bind vtkm failed with ret: %d, pspret: %d\n",
__func__, ret, *psp_ret);
return ret;
}
vpsp_ctx->vm_is_bound = 1;
}
return 0;
}
/**
* @brief Directly convert the gpa address into hpa and forward it to PSP,
* It is another form of kvm_pv_psp_copy_op, mainly used for csv VMs.
*
* @param vpsp points to kvm related data
* @param cmd psp cmd id, bit 31 indicates queue priority
* @param data_gpa guest physical address of input data
* @param psp_ret indicates Asynchronous context information
*
* Since the csv guest memory cannot be read or written directly,
* the shared asynchronous context information is shared through psp_ret and return value.
*/
int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa,
gpa_t table_gpa)
int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
gpa_t data_gpa, uint32_t psp_ret)
{
int ret;
uint64_t data_hpa;
uint32_t index = 0, vid = 0;
struct vpsp_ret psp_async = {0};
struct vpsp_context *vpsp_ctx = NULL;
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
uint8_t prio = CSV_COMMAND_PRIORITY_LOW;
phys_addr_t hpa;
vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid);
ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd);
if (unlikely(ret)) {
pr_err("directly operation not allowed\n");
goto end;
}
ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async);
if (unlikely(ret || *(uint32_t *)&psp_async)) {
pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n",
ret, *(uint32_t *)&psp_async);
goto end;
}
if (vpsp_ctx)
vid = vpsp_ctx->vid;
*((uint32_t *)&psp_async) = psp_ret;
hpa = gpa_to_hpa(vpsp, data_gpa);
if (unlikely(!hpa)) {
ret = -EFAULT;
goto end;
}
data_hpa = PUT_PSP_VID(hpa, vid);
switch (psp_async.status) {
case VPSP_INIT:
/* try to send command to the device for execution*/
ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async);
if (unlikely(ret)) {
pr_err("[%s]: vpsp_do_cmd failed\n", __func__);
goto end;
}
break;
case VPSP_RUNNING:
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
CSV_COMMAND_PRIORITY_LOW;
index = psp_async.index;
/* try to get the execution result from ringbuffer*/
ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async);
if (unlikely(ret)) {
pr_err("[%s]: vpsp_try_get_result failed\n", __func__);
goto end;
}
break;
default:
pr_err("[%s]: invalid command status\n", __func__);
break;
}
end:
/**
* In order to indicate both system errors and PSP errors,
* the psp_async.pret field needs to be reused.
*/
psp_async.format = VPSP_RET_PSP_FORMAT;
if (ret) {
psp_async.format = VPSP_RET_SYS_FORMAT;
if (ret > 0)
ret = -ret;
psp_async.pret = (uint16_t)ret;
}
return *((int *)&psp_async);
}
EXPORT_SYMBOL_GPL(kvm_pv_psp_forward_op);
/**
* @brief copy data in gpa to host memory and send it to psp for processing.
*
* @param vpsp points to kvm related data
* @param cmd psp cmd id, bit 31 indicates queue priority
* @param data_gpa guest physical address of input data
* @param psp_ret_gpa guest physical address of psp_ret
*/
int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa)
{
int ret = 0;
struct vpsp_ret psp_ret = {0};
struct vpsp_hbuf_wrapper hbuf = {0};
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
struct vpsp_context *vpsp_ctx = NULL;
phys_addr_t data_paddr = 0;
uint8_t prio = CSV_COMMAND_PRIORITY_LOW;
uint32_t index = 0;
uint32_t vid = 0;
// only tkm cmd need vid
if (cmd_type_is_tkm(vcmd->cmd_id)) {
// check the permission to use the default vid when no vid is set
ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid);
if (ret && !vpsp_get_default_vid_permission()) {
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
return -EFAULT;
}
vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid);
ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd);
if (unlikely(ret)) {
pr_err("copy operation not allowed\n");
return -EPERM;
}
if (vpsp_ctx)
vid = vpsp_ctx->vid;
if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret,
sizeof(psp_ret))))
return -EFAULT;
switch (psp_ret.status) {
case VPSP_INIT:
/* multilevel pointer replace*/
ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf);
/* copy data from guest */
ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf);
if (unlikely(ret)) {
psp_ret.status = VPSP_FINISH;
pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n",
@ -560,25 +479,22 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
goto end;
}
data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid);
/* try to send command to the device for execution*/
ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data,
(struct vpsp_ret *)&psp_ret);
ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret);
if (unlikely(ret)) {
pr_err("[%s]: vpsp_do_cmd failed\n", __func__);
pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__);
ret = -EFAULT;
goto end;
}
switch (psp_ret.status) {
case VPSP_RUNNING:
/* backup host memory message for restoring later*/
if (psp_ret.status == VPSP_RUNNING) {
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
CSV_COMMAND_PRIORITY_LOW;
g_hbuf_wrap[prio][psp_ret.index] = hbuf;
break;
case VPSP_FINISH:
/* restore multilevel pointer data */
} else if (psp_ret.status == VPSP_FINISH) {
ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf);
if (unlikely(ret)) {
pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n",
@ -586,11 +502,6 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
ret = -EFAULT;
goto end;
}
break;
default:
ret = -EFAULT;
break;
}
break;
@ -598,35 +509,31 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
CSV_COMMAND_PRIORITY_LOW;
index = psp_ret.index;
data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid);
/* try to get the execution result from ringbuffer*/
ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data,
(struct vpsp_ret *)&psp_ret);
ret = vpsp_try_get_result(prio, index, data_paddr,
(struct vpsp_ret *)&psp_ret);
if (unlikely(ret)) {
pr_err("[%s]: vpsp_try_get_result failed\n", __func__);
ret = -EFAULT;
goto end;
}
switch (psp_ret.status) {
case VPSP_RUNNING:
break;
case VPSP_FINISH:
/* restore multilevel pointer data */
if (psp_ret.status == VPSP_RUNNING) {
ret = 0;
goto end;
} else if (psp_ret.status == VPSP_FINISH) {
/* copy data to guest */
ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa,
&g_hbuf_wrap[prio][index]);
if (unlikely(ret)) {
pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n",
__func__);
ret = -EFAULT;
goto end;
}
break;
default:
ret = -EFAULT;
break;
goto end;
}
ret = -EFAULT;
break;
default:
@ -638,4 +545,5 @@ end:
/* return psp_ret to guest */
vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret));
return ret;
} EXPORT_SYMBOL_GPL(kvm_pv_psp_op);
}
EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op);

View File

@ -116,6 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#define __stringify_label(n) #n
#define __annotate_reachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
".long " __stringify_label(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define annotate_reachable() __annotate_reachable(__COUNTER__)
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
@ -125,9 +133,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
#else /* !CONFIG_OBJTOOL */
#define annotate_reachable()
#define annotate_unreachable()
#define __annotate_jump_table
#endif /* CONFIG_OBJTOOL */

View File

@ -433,31 +433,54 @@ struct vpsp_cmd {
*
* @pret: the return code from device
* @resv: reserved bits
* @format: indicates that the error is a unix error code(is 0) or a psp error(is 1)
* @index: used to distinguish the position of command in the ringbuffer
* @status: indicates the current status of the related command
*/
struct vpsp_ret {
u32 pret : 16;
u32 resv : 2;
u32 resv : 1;
u32 format : 1;
u32 index : 12;
u32 status : 2;
};
#define VPSP_RET_SYS_FORMAT 1
#define VPSP_RET_PSP_FORMAT 0
struct kvm_vpsp {
struct kvm *kvm;
int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len);
int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
kvm_pfn_t (*gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
u32 vm_handle;
u8 is_csv_guest;
};
#define PSP_2MB_MASK (2*1024*1024 - 1)
#define PSP_HUGEPAGE_2MB (2*1024*1024)
#define PSP_HUGEPAGE_NUM_MAX 128
#define TKM_CMD_ID_MIN 0x120
#define TKM_CMD_ID_MAX 0x12f
#define TKM_PSP_CMDID TKM_CMD_ID_MIN
#define TKM_PSP_CMDID_OFFSET 0x128
#define PSP_VID_MASK 0xff
#define PSP_VID_SHIFT 56
#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT))
#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK)
#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT))
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
struct vpsp_context {
u32 vid;
pid_t pid;
u64 gpa_start;
u64 gpa_end;
int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret);
// `vm_is_bound` indicates whether the binding operation has been performed
u32 vm_is_bound;
u32 vm_handle; // only for csv
};
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
int psp_do_cmd(int cmd, void *data, int *psp_ret);
@ -472,20 +495,20 @@ int csv_check_stat_queue_status(int *psp_ret);
*/
int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret);
int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index,
void *data, struct vpsp_ret *psp_ret);
int vpsp_try_get_result(uint8_t prio, uint32_t index,
phys_addr_t phy_addr, struct vpsp_ret *psp_ret);
int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret);
int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret);
int vpsp_get_vid(uint32_t *vid, pid_t pid);
int vpsp_get_context(struct vpsp_context **ctx, pid_t pid);
int vpsp_get_default_vid_permission(void);
int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa,
gpa_t table_gpa);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa);
static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; }
int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
gpa_t data_gpa, uint32_t psp_ret);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; }
@ -498,22 +521,31 @@ static inline int
csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; }
static inline int
vpsp_try_get_result(uint32_t vid, uint8_t prio,
uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; }
vpsp_try_get_result(uint8_t prio,
uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; }
static inline int
vpsp_try_do_cmd(uint32_t vid, int cmd,
void *data, struct vpsp_ret *psp_ret) { return -ENODEV; }
static inline int
vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; }
vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr,
struct vpsp_ret *psp_ret) { return -ENODEV; }
static inline int
vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; }
static inline int
vpsp_get_default_vid_permission(void) { return -ENODEV; }
static inline int
kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa,
gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; }
kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa,
gpa_t psp_ret_gpa) { return -ENODEV; }
static inline int
kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; }
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data);

View File

@ -31,7 +31,9 @@
#define KVM_HC_SCHED_YIELD 11
#define KVM_HC_MAP_GPA_RANGE 12
#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */
#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */
#define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */
#define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */
#define KVM_HC_PSP_FORWARD_OP 103 /* Specific to Hygon platform */
/*
* hypercalls use architecture specific

View File

@ -31,9 +31,12 @@ HOSTLDLIBS_sign-file = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null |
ifdef CONFIG_UNWINDER_ORC
ifeq ($(ARCH),x86_64)
ARCH := x86
SRCARCH := x86
endif
HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include
ifeq ($(ARCH),loongarch)
SRCARCH := loongarch
endif
HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/$(SRCARCH)/include
HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED
endif

View File

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_INST_H
#define _ASM_INST_H
#include <linux/bitops.h>
#define LOONGARCH_INSN_NOP 0x03400000
enum reg0i15_op {
break_op = 0x54,
};
enum reg0i26_op {
b_op = 0x14,
bl_op = 0x15,
};
enum reg1i21_op {
beqz_op = 0x10,
bnez_op = 0x11,
bceqz_op = 0x12, /* bits[9:8] = 0x00 */
bcnez_op = 0x12, /* bits[9:8] = 0x01 */
};
enum reg2_op {
ertn_op = 0x1920e,
};
enum reg2i12_op {
addid_op = 0x0b,
andi_op = 0x0d,
ldd_op = 0xa3,
std_op = 0xa7,
};
enum reg2i14_op {
ldptrd_op = 0x26,
stptrd_op = 0x27,
};
enum reg2i16_op {
jirl_op = 0x13,
beq_op = 0x16,
bne_op = 0x17,
blt_op = 0x18,
bge_op = 0x19,
bltu_op = 0x1a,
bgeu_op = 0x1b,
};
struct reg0i15_format {
unsigned int immediate : 15;
unsigned int opcode : 17;
};
struct reg0i26_format {
unsigned int immediate_h : 10;
unsigned int immediate_l : 16;
unsigned int opcode : 6;
};
struct reg1i21_format {
unsigned int immediate_h : 5;
unsigned int rj : 5;
unsigned int immediate_l : 16;
unsigned int opcode : 6;
};
struct reg2_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int opcode : 22;
};
struct reg2i12_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int immediate : 12;
unsigned int opcode : 10;
};
struct reg2i14_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int immediate : 14;
unsigned int opcode : 8;
};
struct reg2i16_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int immediate : 16;
unsigned int opcode : 6;
};
union loongarch_instruction {
unsigned int word;
struct reg0i15_format reg0i15_format;
struct reg0i26_format reg0i26_format;
struct reg1i21_format reg1i21_format;
struct reg2_format reg2_format;
struct reg2i12_format reg2i12_format;
struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format;
};
#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
enum loongarch_gpr {
LOONGARCH_GPR_ZERO = 0,
LOONGARCH_GPR_RA = 1,
LOONGARCH_GPR_TP = 2,
LOONGARCH_GPR_SP = 3,
LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */
LOONGARCH_GPR_A1, /* Reused as V1 for return value */
LOONGARCH_GPR_A2,
LOONGARCH_GPR_A3,
LOONGARCH_GPR_A4,
LOONGARCH_GPR_A5,
LOONGARCH_GPR_A6,
LOONGARCH_GPR_A7,
LOONGARCH_GPR_T0 = 12,
LOONGARCH_GPR_T1,
LOONGARCH_GPR_T2,
LOONGARCH_GPR_T3,
LOONGARCH_GPR_T4,
LOONGARCH_GPR_T5,
LOONGARCH_GPR_T6,
LOONGARCH_GPR_T7,
LOONGARCH_GPR_T8,
LOONGARCH_GPR_FP = 22,
LOONGARCH_GPR_S0 = 23,
LOONGARCH_GPR_S1,
LOONGARCH_GPR_S2,
LOONGARCH_GPR_S3,
LOONGARCH_GPR_S4,
LOONGARCH_GPR_S5,
LOONGARCH_GPR_S6,
LOONGARCH_GPR_S7,
LOONGARCH_GPR_S8,
LOONGARCH_GPR_MAX
};
#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
enum loongarch_gpr rj, \
enum loongarch_gpr rd, \
int offset) \
{ \
insn->reg2i16_format.opcode = OP; \
insn->reg2i16_format.immediate = offset; \
insn->reg2i16_format.rj = rj; \
insn->reg2i16_format.rd = rd; \
}
DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
#endif /* _ASM_INST_H */

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_TYPES_H
#define _ORC_TYPES_H
#include <linux/types.h>
/*
* The ORC_REG_* registers are base registers which are used to find other
* registers on the stack.
*
* ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
* address of the previous frame: the caller's SP before it called the current
* function.
*
* ORC_REG_UNDEFINED means the corresponding register's value didn't change in
* the current frame.
*
* The most commonly used base registers are SP and FP -- which the previous SP
* is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is
* usually based on.
*
* The rest of the base registers are needed for special cases like entry code
* and GCC realigned stacks.
*/
#define ORC_REG_UNDEFINED 0
#define ORC_REG_PREV_SP 1
#define ORC_REG_SP 2
#define ORC_REG_FP 3
#define ORC_REG_MAX 4
#define ORC_TYPE_UNDEFINED 0
#define ORC_TYPE_END_OF_STACK 1
#define ORC_TYPE_CALL 2
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
* CFI, simplified for ease of access by the in-kernel unwinder. It tells the
* unwinder how to find the previous SP and FP (and sometimes entry regs) on
* the stack for a given code address. Each instance of the struct corresponds
* to one or more code locations.
*/
struct orc_entry {
s16 sp_offset;
s16 fp_offset;
s16 ra_offset;
unsigned int sp_reg:4;
unsigned int fp_reg:4;
unsigned int ra_reg:4;
unsigned int type:3;
unsigned int signal:1;
};
#endif /* __ASSEMBLY__ */
#endif /* _ORC_TYPES_H */

View File

@ -87,4 +87,15 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
return (word << shift) | (word >> ((-shift) & 31));
}
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
#endif

View File

@ -57,6 +57,10 @@ ifeq ($(SRCARCH),x86)
BUILD_ORC := y
endif
ifeq ($(SRCARCH),loongarch)
BUILD_ORC := y
endif
export BUILD_ORC
export srctree OUTPUT CFLAGS SRCARCH AWK
include $(srctree)/tools/build/Makefile.include

View File

@ -0,0 +1,3 @@
objtool-y += decode.o
objtool-y += special.o
objtool-y += orc.o

View File

@ -0,0 +1,365 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <objtool/check.h>
#include <objtool/warn.h>
#include <asm/inst.h>
#include <asm/orc_types.h>
#include <linux/objtool_types.h>
#ifndef EM_LOONGARCH
#define EM_LOONGARCH 258
#endif
int arch_ftrace_match(char *name)
{
return !strcmp(name, "_mcount");
}
unsigned long arch_jump_destination(struct instruction *insn)
{
return insn->offset + (insn->immediate << 2);
}
unsigned long arch_dest_reloc_offset(int addend)
{
return addend;
}
bool arch_pc_relative_reloc(struct reloc *reloc)
{
return false;
}
bool arch_callee_saved_reg(unsigned char reg)
{
switch (reg) {
case CFI_RA:
case CFI_FP:
case CFI_S0 ... CFI_S8:
return true;
default:
return false;
}
}
int arch_decode_hint_reg(u8 sp_reg, int *base)
{
switch (sp_reg) {
case ORC_REG_UNDEFINED:
*base = CFI_UNDEFINED;
break;
case ORC_REG_SP:
*base = CFI_SP;
break;
case ORC_REG_FP:
*base = CFI_FP;
break;
default:
return -1;
}
return 0;
}
static bool is_loongarch(const struct elf *elf)
{
if (elf->ehdr.e_machine == EM_LOONGARCH)
return true;
WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
return false;
}
#define ADD_OP(op) \
if (!(op = calloc(1, sizeof(*op)))) \
return -1; \
else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
static bool decode_insn_reg0i26_fomat(union loongarch_instruction inst,
struct instruction *insn)
{
switch (inst.reg0i26_format.opcode) {
case b_op:
insn->type = INSN_JUMP_UNCONDITIONAL;
insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 |
inst.reg0i26_format.immediate_l, 25);
break;
case bl_op:
insn->type = INSN_CALL;
insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 |
inst.reg0i26_format.immediate_l, 25);
break;
default:
return false;
}
return true;
}
static bool decode_insn_reg1i21_fomat(union loongarch_instruction inst,
struct instruction *insn)
{
switch (inst.reg1i21_format.opcode) {
case beqz_op:
case bnez_op:
case bceqz_op:
insn->type = INSN_JUMP_CONDITIONAL;
insn->immediate = sign_extend64(inst.reg1i21_format.immediate_h << 16 |
inst.reg1i21_format.immediate_l, 20);
break;
default:
return false;
}
return true;
}
static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
struct instruction *insn,
struct stack_op **ops_list,
struct stack_op *op)
{
switch (inst.reg2i12_format.opcode) {
case addid_op:
if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) {
/* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
op->src.reg = inst.reg2i12_format.rj;
op->src.offset = insn->immediate;
op->dest.type = OP_DEST_REG;
op->dest.reg = inst.reg2i12_format.rd;
}
}
if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) {
/* addi.d sp,fp,si12 */
struct symbol *func = find_func_containing(insn->sec, insn->offset);
if (!func)
return false;
func->frame_pointer = true;
}
break;
case ldd_op:
if (inst.reg2i12_format.rj == CFI_SP) {
/* ld.d rd,sp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_REG_INDIRECT;
op->src.reg = CFI_SP;
op->src.offset = insn->immediate;
op->dest.type = OP_DEST_REG;
op->dest.reg = inst.reg2i12_format.rd;
}
}
break;
case std_op:
if (inst.reg2i12_format.rj == CFI_SP) {
/* st.d rd,sp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = inst.reg2i12_format.rd;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_SP;
op->dest.offset = insn->immediate;
}
}
break;
case andi_op:
if (inst.reg2i12_format.rd == 0 &&
inst.reg2i12_format.rj == 0 &&
inst.reg2i12_format.immediate == 0)
/* andi r0,r0,0 */
insn->type = INSN_NOP;
break;
default:
return false;
}
return true;
}
static bool decode_insn_reg2i14_fomat(union loongarch_instruction inst,
struct instruction *insn,
struct stack_op **ops_list,
struct stack_op *op)
{
switch (inst.reg2i14_format.opcode) {
case ldptrd_op:
if (inst.reg2i14_format.rj == CFI_SP) {
/* ldptr.d rd,sp,si14 */
insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13);
ADD_OP(op) {
op->src.type = OP_SRC_REG_INDIRECT;
op->src.reg = CFI_SP;
op->src.offset = insn->immediate;
op->dest.type = OP_DEST_REG;
op->dest.reg = inst.reg2i14_format.rd;
}
}
break;
case stptrd_op:
if (inst.reg2i14_format.rj == CFI_SP) {
/* stptr.d ra,sp,0 */
if (inst.reg2i14_format.rd == LOONGARCH_GPR_RA &&
inst.reg2i14_format.immediate == 0)
break;
/* stptr.d rd,sp,si14 */
insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13);
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = inst.reg2i14_format.rd;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_SP;
op->dest.offset = insn->immediate;
}
}
break;
default:
return false;
}
return true;
}
static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst,
struct instruction *insn)
{
switch (inst.reg2i16_format.opcode) {
case jirl_op:
if (inst.reg2i16_format.rd == 0 &&
inst.reg2i16_format.rj == CFI_RA &&
inst.reg2i16_format.immediate == 0) {
/* jirl r0,ra,0 */
insn->type = INSN_RETURN;
} else if (inst.reg2i16_format.rd == CFI_RA) {
/* jirl ra,rj,offs16 */
insn->type = INSN_CALL_DYNAMIC;
} else if (inst.reg2i16_format.rd == CFI_A0 &&
inst.reg2i16_format.immediate == 0) {
/*
* jirl a0,t0,0
* this is a special case in loongarch_suspend_enter,
* just treat it as a call instruction.
*/
insn->type = INSN_CALL_DYNAMIC;
} else if (inst.reg2i16_format.rd == 0 &&
inst.reg2i16_format.immediate == 0) {
/* jirl r0,rj,0 */
insn->type = INSN_JUMP_DYNAMIC;
} else if (inst.reg2i16_format.rd == 0 &&
inst.reg2i16_format.immediate != 0) {
/*
* jirl r0,t0,12
* this is a rare case in JUMP_VIRT_ADDR,
* just ignore it due to it is harmless for tracing.
*/
break;
} else {
/* jirl rd,rj,offs16 */
insn->type = INSN_JUMP_UNCONDITIONAL;
insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15);
}
break;
case beq_op:
case bne_op:
case blt_op:
case bge_op:
case bltu_op:
case bgeu_op:
insn->type = INSN_JUMP_CONDITIONAL;
insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15);
break;
default:
return false;
}
return true;
}
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
struct instruction *insn)
{
struct stack_op **ops_list = &insn->stack_ops;
const struct elf *elf = file->elf;
struct stack_op *op = NULL;
union loongarch_instruction inst;
if (!is_loongarch(elf))
return -1;
if (maxlen < LOONGARCH_INSN_SIZE)
return 0;
insn->len = LOONGARCH_INSN_SIZE;
insn->type = INSN_OTHER;
insn->immediate = 0;
inst = *(union loongarch_instruction *)(sec->data->d_buf + offset);
if (decode_insn_reg0i26_fomat(inst, insn))
return 0;
if (decode_insn_reg1i21_fomat(inst, insn))
return 0;
if (decode_insn_reg2i12_fomat(inst, insn, ops_list, op))
return 0;
if (decode_insn_reg2i14_fomat(inst, insn, ops_list, op))
return 0;
if (decode_insn_reg2i16_fomat(inst, insn))
return 0;
if (inst.word == 0)
insn->type = INSN_NOP;
else if (inst.reg0i15_format.opcode == break_op) {
/* break */
insn->type = INSN_BUG;
} else if (inst.reg2_format.opcode == ertn_op) {
/* ertn */
insn->type = INSN_RETURN;
}
return 0;
}
const char *arch_nop_insn(int len)
{
static u32 nop;
if (len != LOONGARCH_INSN_SIZE)
WARN("invalid NOP size: %d\n", len);
nop = LOONGARCH_INSN_NOP;
return (const char *)&nop;
}
const char *arch_ret_insn(int len)
{
static u32 ret;
if (len != LOONGARCH_INSN_SIZE)
WARN("invalid RET size: %d\n", len);
emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
return (const char *)&ret;
}
void arch_initial_func_cfi_state(struct cfi_init_state *state)
{
int i;
for (i = 0; i < CFI_NUM_REGS; i++) {
state->regs[i].base = CFI_UNDEFINED;
state->regs[i].offset = 0;
}
/* initial CFA (call frame address) */
state->cfa.base = CFI_SP;
state->cfa.offset = 0;
}

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_CFI_REGS_H
#define _OBJTOOL_ARCH_CFI_REGS_H
#define CFI_RA 1
#define CFI_SP 3
#define CFI_A0 4
#define CFI_FP 22
#define CFI_S0 23
#define CFI_S1 24
#define CFI_S2 25
#define CFI_S3 26
#define CFI_S4 27
#define CFI_S5 28
#define CFI_S6 29
#define CFI_S7 30
#define CFI_S8 31
#define CFI_NUM_REGS 32
#define CFI_BP CFI_FP
#endif /* _OBJTOOL_ARCH_CFI_REGS_H */

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_ELF_H
#define _OBJTOOL_ARCH_ELF_H
/*
* See the following link for more info about ELF Relocation types:
* https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html#_relocations
*/
#ifndef R_LARCH_NONE
#define R_LARCH_NONE 0
#endif
#ifndef R_LARCH_32
#define R_LARCH_32 1
#endif
#ifndef R_LARCH_64
#define R_LARCH_64 2
#endif
#ifndef R_LARCH_32_PCREL
#define R_LARCH_32_PCREL 99
#endif
#define R_NONE R_LARCH_NONE
#define R_ABS32 R_LARCH_32
#define R_ABS64 R_LARCH_64
#define R_DATA32 R_LARCH_32_PCREL
#define R_DATA64 R_LARCH_32_PCREL
#define R_TEXT32 R_LARCH_32_PCREL
#define R_TEXT64 R_LARCH_32_PCREL
#endif /* _OBJTOOL_ARCH_ELF_H */

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_SPECIAL_H
#define _OBJTOOL_ARCH_SPECIAL_H
/*
* See more info about struct exception_table_entry
* in arch/loongarch/include/asm/extable.h
*/
#define EX_ENTRY_SIZE 12
#define EX_ORIG_OFFSET 0
#define EX_NEW_OFFSET 4
/*
* See more info about struct jump_entry
* in include/linux/jump_label.h
*/
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
#define JUMP_KEY_OFFSET 8
/*
* See more info about struct alt_instr
* in arch/loongarch/include/asm/alternative.h
*/
#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0
#define ALT_NEW_OFFSET 4
#define ALT_FEATURE_OFFSET 8
#define ALT_ORIG_LEN_OFFSET 10
#define ALT_NEW_LEN_OFFSET 11
#endif /* _OBJTOOL_ARCH_SPECIAL_H */

View File

@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/objtool_types.h>
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
{
struct cfi_reg *fp = &cfi->regs[CFI_FP];
struct cfi_reg *ra = &cfi->regs[CFI_RA];
memset(orc, 0, sizeof(*orc));
if (!cfi) {
/*
* This is usually either unreachable nops/traps (which don't
* trigger unreachable instruction warnings), or
* STACK_FRAME_NON_STANDARD functions.
*/
orc->type = ORC_TYPE_UNDEFINED;
return 0;
}
switch (cfi->type) {
case UNWIND_HINT_TYPE_UNDEFINED:
orc->type = ORC_TYPE_UNDEFINED;
return 0;
case UNWIND_HINT_TYPE_END_OF_STACK:
orc->type = ORC_TYPE_END_OF_STACK;
return 0;
case UNWIND_HINT_TYPE_CALL:
orc->type = ORC_TYPE_CALL;
break;
case UNWIND_HINT_TYPE_REGS:
orc->type = ORC_TYPE_REGS;
break;
case UNWIND_HINT_TYPE_REGS_PARTIAL:
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
orc->signal = cfi->signal;
switch (cfi->cfa.base) {
case CFI_SP:
orc->sp_reg = ORC_REG_SP;
break;
case CFI_FP:
orc->sp_reg = ORC_REG_FP;
break;
default:
WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
switch (fp->base) {
case CFI_UNDEFINED:
orc->fp_reg = ORC_REG_UNDEFINED;
orc->fp_offset = 0;
break;
case CFI_CFA:
orc->fp_reg = ORC_REG_PREV_SP;
orc->fp_offset = fp->offset;
break;
case CFI_FP:
orc->fp_reg = ORC_REG_FP;
break;
default:
WARN_INSN(insn, "unknown FP base reg %d", fp->base);
return -1;
}
switch (ra->base) {
case CFI_UNDEFINED:
orc->ra_reg = ORC_REG_UNDEFINED;
orc->ra_offset = 0;
break;
case CFI_CFA:
orc->ra_reg = ORC_REG_PREV_SP;
orc->ra_offset = ra->offset;
break;
case CFI_FP:
orc->ra_reg = ORC_REG_FP;
break;
default:
WARN_INSN(insn, "unknown RA base reg %d", ra->base);
return -1;
}
orc->sp_offset = cfi->cfa.offset;
return 0;
}
int write_orc_entry(struct elf *elf, struct section *orc_sec,
struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o)
{
struct orc_entry *orc;
/* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
/* populate reloc for ip */
if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
insn_sec, insn_off))
return -1;
return 0;
}
static const char *reg_name(unsigned int reg)
{
switch (reg) {
case ORC_REG_SP:
return "sp";
case ORC_REG_FP:
return "fp";
case ORC_REG_PREV_SP:
return "prevsp";
default:
return "?";
}
}
static const char *orc_type_name(unsigned int type)
{
switch (type) {
case UNWIND_HINT_TYPE_CALL:
return "call";
case UNWIND_HINT_TYPE_REGS:
return "regs";
case UNWIND_HINT_TYPE_REGS_PARTIAL:
return "regs (partial)";
default:
return "?";
}
}
static void print_reg(unsigned int reg, int offset)
{
if (reg == ORC_REG_UNDEFINED)
printf(" (und) ");
else
printf("%s + %3d", reg_name(reg), offset);
}
void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i)
{
printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:");
print_reg(orc[i].sp_reg, orc[i].sp_offset);
printf(" fp:");
print_reg(orc[i].fp_reg, orc[i].fp_offset);
printf(" ra:");
print_reg(orc[i].ra_reg, orc[i].ra_offset);
printf(" signal:%d\n", orc[i].signal);
}

View File

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <objtool/special.h>
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
struct reloc *reloc)
{
return false;
}
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
return NULL;
}

View File

@ -1,5 +1,6 @@
objtool-y += special.o
objtool-y += decode.o
objtool-y += orc.o
inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt

View File

@ -0,0 +1,188 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/objtool_types.h>
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
{
struct cfi_reg *bp = &cfi->regs[CFI_BP];
memset(orc, 0, sizeof(*orc));
if (!cfi) {
/*
* This is usually either unreachable nops/traps (which don't
* trigger unreachable instruction warnings), or
* STACK_FRAME_NON_STANDARD functions.
*/
orc->type = ORC_TYPE_UNDEFINED;
return 0;
}
switch (cfi->type) {
case UNWIND_HINT_TYPE_UNDEFINED:
orc->type = ORC_TYPE_UNDEFINED;
return 0;
case UNWIND_HINT_TYPE_END_OF_STACK:
orc->type = ORC_TYPE_END_OF_STACK;
return 0;
case UNWIND_HINT_TYPE_CALL:
orc->type = ORC_TYPE_CALL;
break;
case UNWIND_HINT_TYPE_REGS:
orc->type = ORC_TYPE_REGS;
break;
case UNWIND_HINT_TYPE_REGS_PARTIAL:
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
orc->signal = cfi->signal;
switch (cfi->cfa.base) {
case CFI_SP:
orc->sp_reg = ORC_REG_SP;
break;
case CFI_SP_INDIRECT:
orc->sp_reg = ORC_REG_SP_INDIRECT;
break;
case CFI_BP:
orc->sp_reg = ORC_REG_BP;
break;
case CFI_BP_INDIRECT:
orc->sp_reg = ORC_REG_BP_INDIRECT;
break;
case CFI_R10:
orc->sp_reg = ORC_REG_R10;
break;
case CFI_R13:
orc->sp_reg = ORC_REG_R13;
break;
case CFI_DI:
orc->sp_reg = ORC_REG_DI;
break;
case CFI_DX:
orc->sp_reg = ORC_REG_DX;
break;
default:
WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
switch (bp->base) {
case CFI_UNDEFINED:
orc->bp_reg = ORC_REG_UNDEFINED;
break;
case CFI_CFA:
orc->bp_reg = ORC_REG_PREV_SP;
break;
case CFI_BP:
orc->bp_reg = ORC_REG_BP;
break;
default:
WARN_INSN(insn, "unknown BP base reg %d", bp->base);
return -1;
}
orc->sp_offset = cfi->cfa.offset;
orc->bp_offset = bp->offset;
return 0;
}
int write_orc_entry(struct elf *elf, struct section *orc_sec,
struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o)
{
struct orc_entry *orc;
/* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
/* populate reloc for ip */
if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
insn_sec, insn_off))
return -1;
return 0;
}
static const char *reg_name(unsigned int reg)
{
switch (reg) {
case ORC_REG_PREV_SP:
return "prevsp";
case ORC_REG_DX:
return "dx";
case ORC_REG_DI:
return "di";
case ORC_REG_BP:
return "bp";
case ORC_REG_SP:
return "sp";
case ORC_REG_R10:
return "r10";
case ORC_REG_R13:
return "r13";
case ORC_REG_BP_INDIRECT:
return "bp(ind)";
case ORC_REG_SP_INDIRECT:
return "sp(ind)";
default:
return "?";
}
}
static const char *orc_type_name(unsigned int type)
{
switch (type) {
case ORC_TYPE_UNDEFINED:
return "(und)";
case ORC_TYPE_END_OF_STACK:
return "end";
case ORC_TYPE_CALL:
return "call";
case ORC_TYPE_REGS:
return "regs";
case ORC_TYPE_REGS_PARTIAL:
return "regs (partial)";
default:
return "?";
}
}
static void print_reg(unsigned int reg, int offset)
{
if (reg == ORC_REG_BP_INDIRECT)
printf("(bp%+d)", offset);
else if (reg == ORC_REG_SP_INDIRECT)
printf("(sp)%+d", offset);
else if (reg == ORC_REG_UNDEFINED)
printf("(und)");
else
printf("%s%+d", reg_name(reg), offset);
}
void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i)
{
printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:");
print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset));
printf(" bp:");
print_reg(orc[i].bp_reg, bswap_if_needed(dummy_elf, orc[i].bp_offset));
printf(" signal:%d\n", orc[i].signal);
}

View File

@ -20,6 +20,7 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/static_call_types.h>
#include <linux/string.h>
struct alternative {
struct alternative *next;
@ -584,7 +585,7 @@ static int add_dead_ends(struct objtool_file *file)
struct section *rsec;
struct reloc *reloc;
struct instruction *insn;
s64 addend;
unsigned long offset;
/*
* Check for manually annotated dead ends.
@ -594,27 +595,28 @@ static int add_dead_ends(struct objtool_file *file)
goto reachable;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
if (reloc->sym->type == STT_SECTION) {
offset = reloc_addend(reloc);
} else if (reloc->sym->local_label) {
offset = reloc->sym->offset;
} else {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
addend = reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, addend);
insn = find_insn(file, reloc->sym->sec, offset);
if (insn)
insn = prev_insn_same_sec(file, insn);
else if (addend == reloc->sym->sec->sh.sh_size) {
else if (offset == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
reloc->sym->sec->name, offset);
return -1;
}
} else {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
reloc->sym->sec->name, offset);
return -1;
}
@ -633,27 +635,28 @@ reachable:
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
if (reloc->sym->type == STT_SECTION) {
offset = reloc_addend(reloc);
} else if (reloc->sym->local_label) {
offset = reloc->sym->offset;
} else {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
addend = reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, addend);
insn = find_insn(file, reloc->sym->sec, offset);
if (insn)
insn = prev_insn_same_sec(file, insn);
else if (addend == reloc->sym->sec->sh.sh_size) {
else if (offset == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find reachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
reloc->sym->sec->name, offset);
return -1;
}
} else {
WARN("can't find reachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
reloc->sym->sec->name, offset);
return -1;
}
@ -2208,6 +2211,7 @@ static int read_unwind_hints(struct objtool_file *file)
struct unwind_hint *hint;
struct instruction *insn;
struct reloc *reloc;
unsigned long offset;
int i;
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
@ -2235,7 +2239,16 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (reloc->sym->type == STT_SECTION) {
offset = reloc_addend(reloc);
} else if (reloc->sym->local_label) {
offset = reloc->sym->offset;
} else {
WARN("unexpected relocation symbol type in %s", sec->rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
WARN("can't find insn for unwind_hints[%d]", i);
return -1;
@ -2506,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file)
struct symbol *func;
for_each_sym(file, func) {
if (func->type == STT_NOTYPE && strstarts(func->name, ".L"))
func->local_label = true;
if (func->bind != STB_GLOBAL)
continue;
@ -2959,10 +2975,27 @@ static int update_cfi_state(struct instruction *insn,
break;
}
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
insn->sym->frame_pointer) {
/* addi.d fp,sp,imm on LoongArch */
if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
cfa->base = CFI_BP;
cfa->offset = 0;
}
break;
}
/* lea disp(%rbp), %rsp */
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
/* addi.d sp,fp,imm on LoongArch */
if (cfa->base == CFI_BP && cfa->offset == 0) {
if (insn->sym->frame_pointer) {
cfa->base = CFI_SP;
cfa->offset = -op->src.offset;
}
} else {
/* lea disp(%rbp), %rsp */
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
}
break;
}

View File

@ -67,6 +67,8 @@ struct symbol {
u8 profiling_func : 1;
u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
u8 frame_pointer : 1;
struct list_head pv_target;
struct reloc *relocs;
};

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ORC_H
#define _OBJTOOL_ORC_H
#include <objtool/check.h>
int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn);
void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i);
int write_orc_entry(struct elf *elf, struct section *orc_sec,
struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o);
#endif /* _OBJTOOL_ORC_H */

View File

@ -6,65 +6,10 @@
#include <unistd.h>
#include <asm/orc_types.h>
#include <objtool/objtool.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
static const char *reg_name(unsigned int reg)
{
switch (reg) {
case ORC_REG_PREV_SP:
return "prevsp";
case ORC_REG_DX:
return "dx";
case ORC_REG_DI:
return "di";
case ORC_REG_BP:
return "bp";
case ORC_REG_SP:
return "sp";
case ORC_REG_R10:
return "r10";
case ORC_REG_R13:
return "r13";
case ORC_REG_BP_INDIRECT:
return "bp(ind)";
case ORC_REG_SP_INDIRECT:
return "sp(ind)";
default:
return "?";
}
}
static const char *orc_type_name(unsigned int type)
{
switch (type) {
case ORC_TYPE_UNDEFINED:
return "(und)";
case ORC_TYPE_END_OF_STACK:
return "end";
case ORC_TYPE_CALL:
return "call";
case ORC_TYPE_REGS:
return "regs";
case ORC_TYPE_REGS_PARTIAL:
return "regs (partial)";
default:
return "?";
}
}
static void print_reg(unsigned int reg, int offset)
{
if (reg == ORC_REG_BP_INDIRECT)
printf("(bp%+d)", offset);
else if (reg == ORC_REG_SP_INDIRECT)
printf("(sp)%+d", offset);
else if (reg == ORC_REG_UNDEFINED)
printf("(und)");
else
printf("%s%+d", reg_name(reg), offset);
}
int orc_dump(const char *_objname)
{
int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
@ -205,17 +150,7 @@ int orc_dump(const char *_objname)
printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
}
printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:");
print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset));
printf(" bp:");
print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
printf(" signal:%d\n", orc[i].signal);
orc_print_dump(&dummy_elf, orc, i);
}
elf_end(elf);

View File

@ -10,121 +10,10 @@
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
struct instruction *insn)
{
struct cfi_reg *bp = &cfi->regs[CFI_BP];
memset(orc, 0, sizeof(*orc));
if (!cfi) {
/*
* This is usually either unreachable nops/traps (which don't
* trigger unreachable instruction warnings), or
* STACK_FRAME_NON_STANDARD functions.
*/
orc->type = ORC_TYPE_UNDEFINED;
return 0;
}
switch (cfi->type) {
case UNWIND_HINT_TYPE_UNDEFINED:
orc->type = ORC_TYPE_UNDEFINED;
return 0;
case UNWIND_HINT_TYPE_END_OF_STACK:
orc->type = ORC_TYPE_END_OF_STACK;
return 0;
case UNWIND_HINT_TYPE_CALL:
orc->type = ORC_TYPE_CALL;
break;
case UNWIND_HINT_TYPE_REGS:
orc->type = ORC_TYPE_REGS;
break;
case UNWIND_HINT_TYPE_REGS_PARTIAL:
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
orc->signal = cfi->signal;
switch (cfi->cfa.base) {
case CFI_SP:
orc->sp_reg = ORC_REG_SP;
break;
case CFI_SP_INDIRECT:
orc->sp_reg = ORC_REG_SP_INDIRECT;
break;
case CFI_BP:
orc->sp_reg = ORC_REG_BP;
break;
case CFI_BP_INDIRECT:
orc->sp_reg = ORC_REG_BP_INDIRECT;
break;
case CFI_R10:
orc->sp_reg = ORC_REG_R10;
break;
case CFI_R13:
orc->sp_reg = ORC_REG_R13;
break;
case CFI_DI:
orc->sp_reg = ORC_REG_DI;
break;
case CFI_DX:
orc->sp_reg = ORC_REG_DX;
break;
default:
WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
switch (bp->base) {
case CFI_UNDEFINED:
orc->bp_reg = ORC_REG_UNDEFINED;
break;
case CFI_CFA:
orc->bp_reg = ORC_REG_PREV_SP;
break;
case CFI_BP:
orc->bp_reg = ORC_REG_BP;
break;
default:
WARN_INSN(insn, "unknown BP base reg %d", bp->base);
return -1;
}
orc->sp_offset = cfi->cfa.offset;
orc->bp_offset = bp->offset;
return 0;
}
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o)
{
struct orc_entry *orc;
/* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
/* populate reloc for ip */
if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
insn_sec, insn_off))
return -1;
return 0;
}
struct orc_list_entry {
struct list_head list;
struct orc_entry orc;