arm64 fixes for -rc6
- Fix kernel text addresses for relocatable images booting using EFI and with KASLR disabled so that they match the vmlinux ELF binary. - Fix unloading and unbinding of PMU driver modules. - Fix generic mmiowb() when writeX() is called from preemptible context (reported by the riscv folks). - Fix ptrace hardware single-step interactions with signal handlers, system calls and reverse debugging. - Fix reporting of 64-bit x0 register for 32-bit tasks via 'perf_regs'. - Add comments describing syscall entry/exit tracing ABI. -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl8RgvsQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNKNcB/9wsRJDxQDsCbV83xn5LrpR2qCs6G1UkVWT 7peEQ21Brh60DamHlr9FdwPrIO/C62tQItU/hjCyk5oXZP3soW4J5vAXujP8wPrL bPe5933HuYkgRnnInCcrACmOnIacO9HGns8OoOKtSdZ6HCaKarL9V4hOfzWVSn7L RicX+xdn89lzZ+AD2MXYq1Q6mLcpKWx9wa0PSiYL+rGjsUqhwHvJcsYcSMp95/Ay ZSK27jmxjjTXNW56hE/svz4dzkBvL+8ezwodhjZtz2co8PdGhH2Azbq3QtHeICy+ JB7lSx8A1sYIF3ASAhDYglCOCNlTb1dDN5LYfRwMWZ8cQfnRVdeV =o4Ve -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into master Pull arm64 fixes from Will Deacon: "A batch of arm64 fixes. Although the diffstat is a bit larger than we'd usually have at this stage, a decent amount of it is the addition of comments describing our syscall tracing behaviour, and also a sweep across all the modular arm64 PMU drivers to make them rebust against unloading and unbinding. There are a couple of minor things kicking around at the moment (CPU errata and module PLTs for very large modules), but I'm not expecting any significant changes now for us in 5.8. - Fix kernel text addresses for relocatable images booting using EFI and with KASLR disabled so that they match the vmlinux ELF binary. - Fix unloading and unbinding of PMU driver modules. - Fix generic mmiowb() when writeX() is called from preemptible context (reported by the riscv folks). - Fix ptrace hardware single-step interactions with signal handlers, system calls and reverse debugging. - Fix reporting of 64-bit x0 register for 32-bit tasks via 'perf_regs'. - Add comments describing syscall entry/exit tracing ABI" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: drivers/perf: Prevent forced unbinding of PMU drivers asm-generic/mmiowb: Allow mmiowb_set_pending() when preemptible() arm64: Use test_tsk_thread_flag() for checking TIF_SINGLESTEP arm64: ptrace: Use NO_SYSCALL instead of -1 in syscall_trace_enter() arm64: syscall: Expand the comment about ptrace and syscall(-1) arm64: ptrace: Add a comment describing our syscall entry/exit trap ABI arm64: compat: Ensure upper 32 bits of x0 are zero on syscall return arm64: ptrace: Override SPSR.SS when single-stepping is enabled arm64: ptrace: Consistently use pseudo-singlestep exceptions drivers/perf: Fix kernel panic when rmmod PMU modules during perf sampling efi/libstub/arm64: Retain 2MB kernel Image alignment if !KASLR
This commit is contained in:
commit
a570f41989
|
@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el);
|
|||
|
||||
void user_rewind_single_step(struct task_struct *task);
|
||||
void user_fastforward_single_step(struct task_struct *task);
|
||||
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
||||
struct task_struct *task);
|
||||
|
||||
void kernel_enable_single_step(struct pt_regs *regs);
|
||||
void kernel_disable_single_step(void);
|
||||
|
|
|
@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long error = regs->regs[0];
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
error = sign_extend64(error, 31);
|
||||
|
||||
return IS_ERR_VALUE(error) ? error : 0;
|
||||
}
|
||||
|
||||
|
@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
regs->regs[0] = (long) error ? error : val;
|
||||
if (error)
|
||||
val = error;
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
val = lower_32_bits(val);
|
||||
|
||||
regs->regs[0] = val;
|
||||
}
|
||||
|
||||
#define SYSCALL_MAX_ARGS 6
|
||||
|
|
|
@ -93,6 +93,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
|
||||
|
|
|
@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init);
|
|||
/*
|
||||
* Single step API and exception handling.
|
||||
*/
|
||||
static void set_regs_spsr_ss(struct pt_regs *regs)
|
||||
static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
|
||||
{
|
||||
regs->pstate |= DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(set_regs_spsr_ss);
|
||||
NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
|
||||
|
||||
static void clear_regs_spsr_ss(struct pt_regs *regs)
|
||||
static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
|
||||
{
|
||||
regs->pstate &= ~DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
|
||||
NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
|
||||
|
||||
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
|
||||
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
|
||||
|
||||
static DEFINE_SPINLOCK(debug_hook_lock);
|
||||
static LIST_HEAD(user_step_hook);
|
||||
|
@ -391,17 +394,26 @@ void user_rewind_single_step(struct task_struct *task)
|
|||
* If single step is active for this thread, then set SPSR.SS
|
||||
* to 1 to avoid returning to the active-pending state.
|
||||
*/
|
||||
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
set_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
NOKPROBE_SYMBOL(user_rewind_single_step);
|
||||
|
||||
void user_fastforward_single_step(struct task_struct *task)
|
||||
{
|
||||
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
clear_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
|
||||
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
||||
struct task_struct *task)
|
||||
{
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
set_user_regs_spsr_ss(regs);
|
||||
else
|
||||
clear_user_regs_spsr_ss(regs);
|
||||
}
|
||||
|
||||
/* Kernel API */
|
||||
void kernel_enable_single_step(struct pt_regs *regs)
|
||||
{
|
||||
|
|
|
@ -1811,19 +1811,42 @@ static void tracehook_report_syscall(struct pt_regs *regs,
|
|||
unsigned long saved_reg;
|
||||
|
||||
/*
|
||||
* A scratch register (ip(r12) on AArch32, x7 on AArch64) is
|
||||
* used to denote syscall entry/exit:
|
||||
* We have some ABI weirdness here in the way that we handle syscall
|
||||
* exit stops because we indicate whether or not the stop has been
|
||||
* signalled from syscall entry or syscall exit by clobbering a general
|
||||
* purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
|
||||
* and restoring its old value after the stop. This means that:
|
||||
*
|
||||
* - Any writes by the tracer to this register during the stop are
|
||||
* ignored/discarded.
|
||||
*
|
||||
* - The actual value of the register is not available during the stop,
|
||||
* so the tracer cannot save it and restore it later.
|
||||
*
|
||||
* - Syscall stops behave differently to seccomp and pseudo-step traps
|
||||
* (the latter do not nobble any registers).
|
||||
*/
|
||||
regno = (is_compat_task() ? 12 : 7);
|
||||
saved_reg = regs->regs[regno];
|
||||
regs->regs[regno] = dir;
|
||||
|
||||
if (dir == PTRACE_SYSCALL_EXIT)
|
||||
if (dir == PTRACE_SYSCALL_ENTER) {
|
||||
if (tracehook_report_syscall_entry(regs))
|
||||
forget_syscall(regs);
|
||||
regs->regs[regno] = saved_reg;
|
||||
} else if (!test_thread_flag(TIF_SINGLESTEP)) {
|
||||
tracehook_report_syscall_exit(regs, 0);
|
||||
else if (tracehook_report_syscall_entry(regs))
|
||||
forget_syscall(regs);
|
||||
regs->regs[regno] = saved_reg;
|
||||
} else {
|
||||
regs->regs[regno] = saved_reg;
|
||||
|
||||
regs->regs[regno] = saved_reg;
|
||||
/*
|
||||
* Signal a pseudo-step exception since we are stepping but
|
||||
* tracer modifications to the registers may have rewound the
|
||||
* state machine.
|
||||
*/
|
||||
tracehook_report_syscall_exit(regs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs)
|
||||
|
@ -1833,12 +1856,12 @@ int syscall_trace_enter(struct pt_regs *regs)
|
|||
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
|
||||
if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
|
||||
return -1;
|
||||
return NO_SYSCALL;
|
||||
}
|
||||
|
||||
/* Do the secure computing after ptrace; failures should be fast. */
|
||||
if (secure_computing() == -1)
|
||||
return -1;
|
||||
return NO_SYSCALL;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
trace_sys_enter(regs, regs->syscallno);
|
||||
|
@ -1851,12 +1874,14 @@ int syscall_trace_enter(struct pt_regs *regs)
|
|||
|
||||
void syscall_trace_exit(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags = READ_ONCE(current_thread_info()->flags);
|
||||
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
if (flags & _TIF_SYSCALL_TRACEPOINT)
|
||||
trace_sys_exit(regs, regs_return_value(regs));
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
|
||||
|
||||
rseq_syscall(regs);
|
||||
|
@ -1934,8 +1959,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
|
|||
*/
|
||||
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
|
||||
{
|
||||
if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
regs->pstate &= ~DBG_SPSR_SS;
|
||||
/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
|
||||
user_regs_reset_single_step(regs, task);
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
return valid_compat_regs(regs);
|
||||
|
|
|
@ -800,7 +800,6 @@ static void setup_restart_syscall(struct pt_regs *regs)
|
|||
*/
|
||||
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
sigset_t *oldset = sigmask_to_save();
|
||||
int usig = ksig->sig;
|
||||
int ret;
|
||||
|
@ -824,14 +823,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||
*/
|
||||
ret |= !valid_user_regs(®s->user_regs, current);
|
||||
|
||||
/*
|
||||
* Fast forward the stepping logic so we step into the signal
|
||||
* handler.
|
||||
*/
|
||||
if (!ret)
|
||||
user_fastforward_single_step(tsk);
|
||||
|
||||
signal_setup_done(ret, ksig, 0);
|
||||
/* Step into the signal handler if we are stepping */
|
||||
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
|||
ret = do_ni_syscall(regs, scno);
|
||||
}
|
||||
|
||||
if (is_compat_task())
|
||||
ret = lower_32_bits(ret);
|
||||
|
||||
regs->regs[0] = ret;
|
||||
}
|
||||
|
||||
|
@ -121,7 +124,21 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
|||
user_exit();
|
||||
|
||||
if (has_syscall_work(flags)) {
|
||||
/* set default errno for user-issued syscall(-1) */
|
||||
/*
|
||||
* The de-facto standard way to skip a system call using ptrace
|
||||
* is to set the system call to -1 (NO_SYSCALL) and set x0 to a
|
||||
* suitable error code for consumption by userspace. However,
|
||||
* this cannot be distinguished from a user-issued syscall(-1)
|
||||
* and so we must set x0 to -ENOSYS here in case the tracer doesn't
|
||||
* issue the skip and we fall into trace_exit with x0 preserved.
|
||||
*
|
||||
* This is slightly odd because it also means that if a tracer
|
||||
* sets the system call number to -1 but does not initialise x0,
|
||||
* then x0 will be preserved for all system calls apart from a
|
||||
* user-issued syscall(-1). However, requesting a skip and not
|
||||
* setting the return value is unlikely to do anything sensible
|
||||
* anyway.
|
||||
*/
|
||||
if (scno == NO_SYSCALL)
|
||||
regs->regs[0] = -ENOSYS;
|
||||
scno = syscall_trace_enter(regs);
|
||||
|
@ -139,7 +156,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
|||
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
||||
local_daif_mask();
|
||||
flags = current_thread_info()->flags;
|
||||
if (!has_syscall_work(flags)) {
|
||||
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
|
||||
/*
|
||||
* We're off to userspace, where interrupts are
|
||||
* always enabled after we restore the flags from
|
||||
|
|
|
@ -35,13 +35,16 @@ efi_status_t check_platform_features(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Relocatable kernels can fix up the misalignment with respect to
|
||||
* MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN
|
||||
* (which accounts for the alignment of statically allocated objects such as
|
||||
* the swapper stack.)
|
||||
* Although relocatable kernels can fix up the misalignment with respect to
|
||||
* MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
|
||||
* sync with those recorded in the vmlinux when kaslr is disabled but the
|
||||
* image required relocation anyway. Therefore retain 2M alignment unless
|
||||
* KASLR is in use.
|
||||
*/
|
||||
static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN
|
||||
: MIN_KIMG_ALIGN;
|
||||
static u64 min_kimg_align(void)
|
||||
{
|
||||
return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
||||
}
|
||||
|
||||
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||
unsigned long *image_size,
|
||||
|
@ -74,21 +77,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
|
||||
kernel_size = _edata - _text;
|
||||
kernel_memsize = kernel_size + (_end - _edata);
|
||||
*reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align;
|
||||
*reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
|
||||
/*
|
||||
* If KASLR is enabled, and we have some randomness available,
|
||||
* locate the kernel at a randomized offset in physical memory.
|
||||
*/
|
||||
status = efi_random_alloc(*reserve_size, min_kimg_align,
|
||||
status = efi_random_alloc(*reserve_size, min_kimg_align(),
|
||||
reserve_addr, phys_seed);
|
||||
} else {
|
||||
status = EFI_OUT_OF_RESOURCES;
|
||||
}
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) {
|
||||
if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) {
|
||||
/*
|
||||
* Just execute from wherever we were loaded by the
|
||||
* UEFI PE/COFF loader if the alignment is suitable.
|
||||
|
@ -99,7 +102,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
}
|
||||
|
||||
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
||||
ULONG_MAX, min_kimg_align);
|
||||
ULONG_MAX, min_kimg_align());
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
efi_err("Failed to relocate kernel\n");
|
||||
|
@ -108,7 +111,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
}
|
||||
}
|
||||
|
||||
*image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align;
|
||||
*image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align();
|
||||
memcpy((void *)*image_addr, _text, kernel_size);
|
||||
|
||||
return EFI_SUCCESS;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "efistub.h"
|
||||
|
||||
bool efi_nochunk;
|
||||
bool efi_nokaslr;
|
||||
bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
|
||||
bool efi_noinitrd;
|
||||
int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
|
||||
bool efi_novamap;
|
||||
|
|
|
@ -1718,6 +1718,7 @@ static struct platform_driver cci_pmu_driver = {
|
|||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.of_match_table = arm_cci_pmu_matches,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = cci_pmu_probe,
|
||||
.remove = cci_pmu_remove,
|
||||
|
|
|
@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = {
|
|||
.driver = {
|
||||
.name = "arm-ccn",
|
||||
.of_match_table = arm_ccn_match,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = arm_ccn_probe,
|
||||
.remove = arm_ccn_remove,
|
||||
|
|
|
@ -757,6 +757,7 @@ static struct platform_driver dsu_pmu_driver = {
|
|||
.driver = {
|
||||
.name = DRVNAME,
|
||||
.of_match_table = of_match_ptr(dsu_pmu_of_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = dsu_pmu_device_probe,
|
||||
.remove = dsu_pmu_device_remove,
|
||||
|
|
|
@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, smmu_pmu);
|
||||
|
||||
smmu_pmu->pmu = (struct pmu) {
|
||||
.module = THIS_MODULE,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.pmu_enable = smmu_pmu_enable,
|
||||
.pmu_disable = smmu_pmu_disable,
|
||||
|
@ -859,6 +860,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
|
|||
static struct platform_driver smmu_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "arm-smmu-v3-pmcg",
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = smmu_pmu_probe,
|
||||
.remove = smmu_pmu_remove,
|
||||
|
|
|
@ -1226,6 +1226,7 @@ static struct platform_driver arm_spe_pmu_driver = {
|
|||
.driver = {
|
||||
.name = DRVNAME,
|
||||
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = arm_spe_pmu_device_probe,
|
||||
.remove = arm_spe_pmu_device_remove,
|
||||
|
|
|
@ -512,6 +512,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
|
|||
{
|
||||
*pmu = (struct ddr_pmu) {
|
||||
.pmu = (struct pmu) {
|
||||
.module = THIS_MODULE,
|
||||
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.attr_groups = attr_groups,
|
||||
|
@ -706,6 +707,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "imx-ddr-pmu",
|
||||
.of_match_table = imx_ddr_pmu_dt_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = ddr_perf_probe,
|
||||
.remove = ddr_perf_remove,
|
||||
|
|
|
@ -378,6 +378,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
|
|||
ddrc_pmu->sccl_id, ddrc_pmu->index_id);
|
||||
ddrc_pmu->pmu = (struct pmu) {
|
||||
.name = name,
|
||||
.module = THIS_MODULE,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = hisi_uncore_pmu_event_init,
|
||||
.pmu_enable = hisi_uncore_pmu_enable,
|
||||
|
@ -418,6 +419,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "hisi_ddrc_pmu",
|
||||
.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = hisi_ddrc_pmu_probe,
|
||||
.remove = hisi_ddrc_pmu_remove,
|
||||
|
|
|
@ -390,6 +390,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
|
|||
hha_pmu->sccl_id, hha_pmu->index_id);
|
||||
hha_pmu->pmu = (struct pmu) {
|
||||
.name = name,
|
||||
.module = THIS_MODULE,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = hisi_uncore_pmu_event_init,
|
||||
.pmu_enable = hisi_uncore_pmu_enable,
|
||||
|
@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "hisi_hha_pmu",
|
||||
.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = hisi_hha_pmu_probe,
|
||||
.remove = hisi_hha_pmu_remove,
|
||||
|
|
|
@ -380,6 +380,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
|
|||
l3c_pmu->sccl_id, l3c_pmu->index_id);
|
||||
l3c_pmu->pmu = (struct pmu) {
|
||||
.name = name,
|
||||
.module = THIS_MODULE,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = hisi_uncore_pmu_event_init,
|
||||
.pmu_enable = hisi_uncore_pmu_enable,
|
||||
|
@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "hisi_l3c_pmu",
|
||||
.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = hisi_l3c_pmu_probe,
|
||||
.remove = hisi_l3c_pmu_remove,
|
||||
|
|
|
@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "qcom-l2cache-pmu",
|
||||
.acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = l2_cache_pmu_probe,
|
||||
.remove = l2_cache_pmu_remove,
|
||||
|
|
|
@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = {
|
|||
.driver = {
|
||||
.name = "qcom-l3cache-pmu",
|
||||
.acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = qcom_l3_cache_pmu_probe,
|
||||
};
|
||||
|
|
|
@ -1017,6 +1017,7 @@ static struct platform_driver tx2_uncore_driver = {
|
|||
.driver = {
|
||||
.name = "tx2-uncore-pmu",
|
||||
.acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = tx2_uncore_probe,
|
||||
.remove = tx2_uncore_remove,
|
||||
|
|
|
@ -1975,6 +1975,7 @@ static struct platform_driver xgene_pmu_driver = {
|
|||
.name = "xgene-pmu",
|
||||
.of_match_table = xgene_pmu_of_match,
|
||||
.acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <asm/smp.h>
|
||||
|
||||
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
|
||||
#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
|
||||
#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
|
||||
#else
|
||||
#define __mmiowb_state() arch_mmiowb_state()
|
||||
#endif /* arch_mmiowb_state */
|
||||
|
@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
|
|||
static inline void mmiowb_set_pending(void)
|
||||
{
|
||||
struct mmiowb_state *ms = __mmiowb_state();
|
||||
ms->mmiowb_pending = ms->nesting_count;
|
||||
|
||||
if (likely(ms->nesting_count))
|
||||
ms->mmiowb_pending = ms->nesting_count;
|
||||
}
|
||||
|
||||
static inline void mmiowb_spin_lock(void)
|
||||
|
|
Loading…
Reference in New Issue