2018-07-23 15:51:51 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections | FileCheck %s --check-prefix=X64
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections -mattr=+retpoline | FileCheck %s --check-prefix=X64-RETPOLINE
|
2018-07-23 15:51:51 +08:00
|
|
|
;
|
|
|
|
; FIXME: Add support for 32-bit.
|
|
|
|
|
|
|
|
@global_fnptr = external global i32 ()*
|
|
|
|
|
|
|
|
@global_blockaddrs = constant [4 x i8*] [
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb0),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb1),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb2),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb3)
|
|
|
|
]
|
|
|
|
|
|
|
|
define i32 @test_indirect_call(i32 ()** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirect_call:
|
|
|
|
; X64: # %bb.0: # %entry
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: pushq %rax
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
|
|
|
; X64-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
|
|
|
; X64-NEXT: callq *%rcx
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: popq %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_call:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: pushq %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq (%rdi), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: popq %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** %ptr
|
|
|
|
%v = call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirect_tail_call:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_tail_call:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq (%rdi), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** %ptr
|
|
|
|
%v = tail call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_call_global() nounwind {
|
|
|
|
; X64-LABEL: test_indirect_call_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: pushq %rax
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq {{.*}}(%rip), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: callq *%rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: popq %rcx
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_call_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: pushq %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq {{.*}}(%rip), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: popq %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** @global_fnptr
|
|
|
|
%v = call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_tail_call_global() nounwind {
|
|
|
|
; X64-LABEL: test_indirect_tail_call_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq {{.*}}(%rip), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_tail_call_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq {{.*}}(%rip), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** @global_fnptr
|
|
|
|
%v = tail call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirectbr(i8** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirectbr:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq (%rdi), %rax
|
|
|
|
; X64-NEXT: orq %rcx, %rax
|
|
|
|
; X64-NEXT: jmpq *%rax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .LBB4_1: # %bb0
|
|
|
|
; X64-NEXT: movl $2, %eax
|
|
|
|
; X64-NEXT: jmp .LBB4_2
|
|
|
|
; X64-NEXT: .LBB4_4: # %bb2
|
|
|
|
; X64-NEXT: movl $13, %eax
|
|
|
|
; X64-NEXT: jmp .LBB4_2
|
|
|
|
; X64-NEXT: .LBB4_5: # %bb3
|
|
|
|
; X64-NEXT: movl $42, %eax
|
|
|
|
; X64-NEXT: jmp .LBB4_2
|
|
|
|
; X64-NEXT: .LBB4_3: # %bb1
|
|
|
|
; X64-NEXT: movl $7, %eax
|
|
|
|
; X64-NEXT: .LBB4_2: # %bb0
|
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirectbr:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%a = load i8*, i8** %ptr
|
|
|
|
indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirectbr_global(i32 %idx) nounwind {
|
|
|
|
; X64-LABEL: test_indirectbr_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
|
|
|
; X64-NEXT: movslq %edi, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq global_blockaddrs(,%rax,8), %rax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .Ltmp0: # Block address taken
|
|
|
|
; X64-NEXT: .LBB5_1: # %bb0
|
|
|
|
; X64-NEXT: movl $2, %eax
|
|
|
|
; X64-NEXT: jmp .LBB5_2
|
|
|
|
; X64-NEXT: .Ltmp1: # Block address taken
|
|
|
|
; X64-NEXT: .LBB5_4: # %bb2
|
|
|
|
; X64-NEXT: movl $13, %eax
|
|
|
|
; X64-NEXT: jmp .LBB5_2
|
|
|
|
; X64-NEXT: .Ltmp2: # Block address taken
|
|
|
|
; X64-NEXT: .LBB5_5: # %bb3
|
|
|
|
; X64-NEXT: movl $42, %eax
|
|
|
|
; X64-NEXT: jmp .LBB5_2
|
|
|
|
; X64-NEXT: .Ltmp3: # Block address taken
|
|
|
|
; X64-NEXT: .LBB5_3: # %bb1
|
|
|
|
; X64-NEXT: movl $7, %eax
|
|
|
|
; X64-NEXT: .LBB5_2: # %bb0
|
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_indirectbr_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movslq %edi, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: movq global_blockaddrs(,%rdx,8), %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $2, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB6_5
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $3, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB6_6
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.2: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $4, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB6_3
|
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp0: # Block address taken
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.7: # %bb3
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $42, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB6_4
|
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp1: # Block address taken
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_5: # %bb1
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $7, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB6_4
|
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp2: # Block address taken
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_6: # %bb2
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $13, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB6_4
|
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp3: # Block address taken
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_3: # %bb0
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $2, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_4: # %bb0
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%ptr = getelementptr [4 x i8*], [4 x i8*]* @global_blockaddrs, i32 0, i32 %idx
|
|
|
|
%a = load i8*, i8** %ptr
|
|
|
|
indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
|
|
|
; This function's switch is crafted to trigger jump-table lowering in the x86
|
|
|
|
; backend so that we can test how the exact jump table lowering behaves.
|
|
|
|
define i32 @test_switch_jumptable(i32 %idx) nounwind {
|
|
|
|
; X64-LABEL: test_switch_jumptable:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
|
|
|
; X64-NEXT: cmpl $3, %edi
|
|
|
|
; X64-NEXT: ja .LBB6_2
|
|
|
|
; X64-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-NEXT: cmovaq %rax, %rcx
|
|
|
|
; X64-NEXT: movl %edi, %eax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq .LJTI6_0(,%rax,8), %rax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .LBB6_3: # %bb1
|
|
|
|
; X64-NEXT: movl $7, %eax
|
|
|
|
; X64-NEXT: jmp .LBB6_4
|
|
|
|
; X64-NEXT: .LBB6_2: # %bb0
|
|
|
|
; X64-NEXT: cmovbeq %rax, %rcx
|
|
|
|
; X64-NEXT: movl $2, %eax
|
|
|
|
; X64-NEXT: jmp .LBB6_4
|
|
|
|
; X64-NEXT: .LBB6_5: # %bb2
|
|
|
|
; X64-NEXT: movl $13, %eax
|
|
|
|
; X64-NEXT: jmp .LBB6_4
|
|
|
|
; X64-NEXT: .LBB6_6: # %bb3
|
|
|
|
; X64-NEXT: movl $42, %eax
|
|
|
|
; X64-NEXT: jmp .LBB6_4
|
|
|
|
; X64-NEXT: .LBB6_7: # %bb5
|
|
|
|
; X64-NEXT: movl $11, %eax
|
|
|
|
; X64-NEXT: .LBB6_4: # %bb1
|
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_switch_jumptable:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jg .LBB7_4
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovgq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: testl %edi, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB7_8
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.2: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB7_6
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.3: # %bb2
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $13, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB7_7
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_4: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovleq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $2, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB7_9
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.5: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $3, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB7_6
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.10: # %bb5
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $11, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB7_7
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_6:
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $2, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB7_7
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_8: # %bb1
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $7, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB7_7
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_9: # %bb3
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movl $42, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_7: # %bb0
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
switch i32 %idx, label %bb0 [
|
|
|
|
i32 0, label %bb1
|
|
|
|
i32 1, label %bb2
|
|
|
|
i32 2, label %bb3
|
|
|
|
i32 3, label %bb5
|
|
|
|
]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
|
|
|
|
bb5:
|
|
|
|
ret i32 11
|
|
|
|
}
|