KVM: arm64: Introduce pkvm_dump_backtrace()
Dumps the pKVM hypervisor backtrace from EL1 by reading the unwinded addresses from the shared stacktrace buffer. The nVHE hyp backtrace is dumped on hyp_panic(), before panicking the host. [ 111.623091] kvm [367]: nVHE call trace: [ 111.623215] kvm [367]: [<ffff8000090a6570>] __kvm_nvhe_hyp_panic+0xac/0xf8 [ 111.623448] kvm [367]: [<ffff8000090a65cc>] __kvm_nvhe_hyp_panic_bad_stack+0x10/0x10 [ 111.623642] kvm [367]: [<ffff8000090a61e4>] __kvm_nvhe_recursive_death+0x24/0x34 . . . [ 111.640366] kvm [367]: [<ffff8000090a61e4>] __kvm_nvhe_recursive_death+0x24/0x34 [ 111.640467] kvm [367]: [<ffff8000090a61e4>] __kvm_nvhe_recursive_death+0x24/0x34 [ 111.640574] kvm [367]: [<ffff8000090a5de4>] __kvm_nvhe___kvm_vcpu_run+0x30/0x40c [ 111.640676] kvm [367]: [<ffff8000090a8b64>] __kvm_nvhe_handle___kvm_vcpu_run+0x30/0x48 [ 111.640778] kvm [367]: [<ffff8000090a88b8>] __kvm_nvhe_handle_trap+0xc4/0x128 [ 111.640880] kvm [367]: [<ffff8000090a7864>] __kvm_nvhe___host_exit+0x64/0x64 [ 111.640996] kvm [367]: ---[ end nVHE call trace ]--- Signed-off-by: Kalesh Singh <kaleshsingh@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220726073750.3219117-18-kaleshsingh@google.com
This commit is contained in:
parent
75e9459e48
commit
3a7e1b55aa
|
@ -371,6 +371,39 @@ static void hyp_dump_backtrace(unsigned long hyp_offset)
|
|||
kvm_nvhe_dump_backtrace_end();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
|
||||
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
|
||||
pkvm_stacktrace);
|
||||
|
||||
/*
|
||||
* pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
|
||||
*
|
||||
* @hyp_offset: hypervisor offset, used for address translation.
|
||||
*
|
||||
* Dumping of the pKVM HYP backtrace is done by reading the
|
||||
* stack addresses from the shared stacktrace buffer, since the
|
||||
* host cannot directly access hypervisor memory in protected
|
||||
* mode.
|
||||
*/
|
||||
static void pkvm_dump_backtrace(unsigned long hyp_offset)
|
||||
{
|
||||
unsigned long *stacktrace
|
||||
= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
|
||||
int i, size = NVHE_STACKTRACE_SIZE / sizeof(long);
|
||||
|
||||
kvm_nvhe_dump_backtrace_start();
|
||||
/* The saved stacktrace is terminated by a null entry */
|
||||
for (i = 0; i < size && stacktrace[i]; i++)
|
||||
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
|
||||
kvm_nvhe_dump_backtrace_end();
|
||||
}
|
||||
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
|
||||
static void pkvm_dump_backtrace(unsigned long hyp_offset)
|
||||
{
|
||||
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
|
||||
}
|
||||
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
|
||||
|
||||
/*
|
||||
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
|
||||
*
|
||||
|
@ -379,7 +412,7 @@ static void hyp_dump_backtrace(unsigned long hyp_offset)
|
|||
static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
|
||||
{
|
||||
if (is_protected_kvm_enabled())
|
||||
return;
|
||||
pkvm_dump_backtrace(hyp_offset);
|
||||
else
|
||||
hyp_dump_backtrace(hyp_offset);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue