2018-07-23 15:51:51 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections | FileCheck %s --check-prefix=X64
|
2018-10-30 00:57:43 +08:00
|
|
|
; FIXME: Fix machine verifier issues and remove -verify-machineinstrs=0. PR39451.
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -relocation-model pic -data-sections -verify-machineinstrs=0 | FileCheck %s --check-prefix=X64-PIC
|
2018-11-21 02:08:56 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections -mattr=+retpoline | FileCheck %s --check-prefix=X64-RETPOLINE
|
2018-07-23 15:51:51 +08:00
|
|
|
;
|
|
|
|
; FIXME: Add support for 32-bit.
|
|
|
|
|
|
|
|
@global_fnptr = external global i32 ()*
|
|
|
|
|
|
|
|
@global_blockaddrs = constant [4 x i8*] [
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb0),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb1),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb2),
|
|
|
|
i8* blockaddress(@test_indirectbr_global, %bb3)
|
|
|
|
]
|
|
|
|
|
|
|
|
define i32 @test_indirect_call(i32 ()** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirect_call:
|
|
|
|
; X64: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: pushq %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: movq $-1, %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: sarq $63, %rax
|
|
|
|
; X64-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
|
|
|
; X64-NEXT: callq *%rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: .Lslh_ret_addr0:
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: sarq $63, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: cmpq $.Lslh_ret_addr0, %rdx
|
|
|
|
; X64-NEXT: cmovneq %rbx, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: popq %rbx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirect_call:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: pushq %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: movq $-1, %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: sarq $63, %rax
|
|
|
|
; X64-PIC-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rax
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rsp
|
|
|
|
; X64-PIC-NEXT: callq *%rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: .Lslh_ret_addr0:
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: sarq $63, %rcx
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .Lslh_ret_addr0(%rip), %rsi
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rbx, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: popq %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_call:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: pushq %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq (%rdi), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .Lslh_ret_addr0:
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmpq $.Lslh_ret_addr0, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rbx, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: popq %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** %ptr
|
|
|
|
%v = call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirect_tail_call:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirect_tail_call:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %rax
|
|
|
|
; X64-PIC-NEXT: movq $-1, %rcx
|
|
|
|
; X64-PIC-NEXT: sarq $63, %rax
|
|
|
|
; X64-PIC-NEXT: movq (%rdi), %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rax
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rsp
|
|
|
|
; X64-PIC-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_tail_call:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq (%rdi), %r11
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %r11
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** %ptr
|
|
|
|
%v = tail call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_call_global() nounwind {
|
|
|
|
; X64-LABEL: test_indirect_call_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: pushq %rbx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: movq $-1, %rbx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-NEXT: movq global_fnptr(%rip), %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: callq *%rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: .Lslh_ret_addr1:
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: sarq $63, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: cmpq $.Lslh_ret_addr1, %rdx
|
|
|
|
; X64-NEXT: cmovneq %rbx, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-NEXT: popq %rbx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirect_call_global:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: pushq %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: movq $-1, %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: movq global_fnptr@GOTPCREL(%rip), %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq (%rcx), %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rax
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rsp
|
|
|
|
; X64-PIC-NEXT: callq *%rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: .Lslh_ret_addr1:
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: sarq $63, %rcx
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .Lslh_ret_addr1(%rip), %rsi
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rbx, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-PIC-NEXT: popq %rbx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_call_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: pushq %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq global_fnptr(%rip), %r11
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .Lslh_ret_addr1:
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmpq $.Lslh_ret_addr1, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rbx, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
2018-09-04 18:59:10 +08:00
|
|
|
; X64-RETPOLINE-NEXT: popq %rbx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** @global_fnptr
|
|
|
|
%v = call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirect_tail_call_global() nounwind {
|
|
|
|
; X64-LABEL: test_indirect_tail_call_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rax
|
|
|
|
; X64-NEXT: movq $-1, %rcx
|
|
|
|
; X64-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-NEXT: movq global_fnptr(%rip), %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: orq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rax
|
|
|
|
; X64-NEXT: orq %rax, %rsp
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirect_tail_call_global:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %rax
|
|
|
|
; X64-PIC-NEXT: movq $-1, %rcx
|
|
|
|
; X64-PIC-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: movq global_fnptr@GOTPCREL(%rip), %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq (%rcx), %rcx
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rax
|
|
|
|
; X64-PIC-NEXT: orq %rax, %rsp
|
|
|
|
; X64-PIC-NEXT: jmpq *%rcx # TAILCALL
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirect_tail_call_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movq global_fnptr(%rip), %r11
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rax, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%fp = load i32 ()*, i32 ()** @global_fnptr
|
|
|
|
%v = tail call i32 %fp()
|
|
|
|
ret i32 %v
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirectbr(i8** %ptr) nounwind {
|
|
|
|
; X64-LABEL: test_indirectbr:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: movq (%rdi), %rdx
|
|
|
|
; X64-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-NEXT: jmpq *%rdx
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB4_1: # Block address taken
|
|
|
|
; X64-NEXT: # %bb0
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB4_1, %rdx
|
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB4_3: # Block address taken
|
|
|
|
; X64-NEXT: # %bb2
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB4_3, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB4_4: # Block address taken
|
|
|
|
; X64-NEXT: # %bb3
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB4_4, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB4_2: # Block address taken
|
|
|
|
; X64-NEXT: # %bb1
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB4_2, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: movl $7, %eax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirectbr:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-PIC-NEXT: movq $-1, %rax
|
|
|
|
; X64-PIC-NEXT: sarq $63, %rcx
|
|
|
|
; X64-PIC-NEXT: movq (%rdi), %rdx
|
|
|
|
; X64-PIC-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-PIC-NEXT: jmpq *%rdx
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB4_1: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb0
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB4_1(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB4_3: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb2
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB4_3(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB4_4: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb3
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB4_4(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB4_2: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb1
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB4_2(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: movl $7, %eax
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirectbr:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%a = load i8*, i8** %ptr
|
|
|
|
indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_indirectbr_global(i32 %idx) nounwind {
|
|
|
|
; X64-LABEL: test_indirectbr_global:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: movslq %edi, %rdx
|
|
|
|
; X64-NEXT: movq global_blockaddrs(,%rdx,8), %rdx
|
|
|
|
; X64-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-NEXT: jmpq *%rdx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .Ltmp0: # Block address taken
|
|
|
|
; X64-NEXT: .LBB5_1: # %bb0
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB5_1, %rdx
|
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .Ltmp1: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: .LBB5_3: # %bb2
|
|
|
|
; X64-NEXT: cmpq $.LBB5_3, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .Ltmp2: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: .LBB5_4: # %bb3
|
|
|
|
; X64-NEXT: cmpq $.LBB5_4, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .Ltmp3: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: .LBB5_2: # %bb1
|
|
|
|
; X64-NEXT: cmpq $.LBB5_2, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: movl $7, %eax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_indirectbr_global:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-PIC-NEXT: movq $-1, %rax
|
|
|
|
; X64-PIC-NEXT: sarq $63, %rcx
|
|
|
|
; X64-PIC-NEXT: movslq %edi, %rdx
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: movq global_blockaddrs@GOTPCREL(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movq (%rsi,%rdx,8), %rdx
|
|
|
|
; X64-PIC-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-PIC-NEXT: jmpq *%rdx
|
|
|
|
; X64-PIC-NEXT: .Ltmp0: # Block address taken
|
|
|
|
; X64-PIC-NEXT: .LBB5_1: # %bb0
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB5_1(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: .Ltmp1: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: .LBB5_3: # %bb2
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB5_3(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: .Ltmp2: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: .LBB5_4: # %bb3
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB5_4(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: .Ltmp3: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: .LBB5_2: # %bb1
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB5_2(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: movl $7, %eax
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_indirectbr_global:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movslq %edi, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: movq global_blockaddrs(,%rdx,8), %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $2, %rdx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: je .LBB6_4
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $3, %rdx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: je .LBB6_5
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.2: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpq $4, %rdx
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB6_3
|
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp0: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.6: # %bb3
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp1: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_4: # %bb1
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $7, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp2: # Block address taken
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_5: # %bb2
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .Ltmp3: # Block address taken
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB6_3: # %bb0
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $2, %eax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
%ptr = getelementptr [4 x i8*], [4 x i8*]* @global_blockaddrs, i32 0, i32 %idx
|
|
|
|
%a = load i8*, i8** %ptr
|
|
|
|
indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
}
|
|
|
|
|
|
|
|
; This function's switch is crafted to trigger jump-table lowering in the x86
|
|
|
|
; backend so that we can test how the exact jump table lowering behaves.
|
|
|
|
define i32 @test_switch_jumptable(i32 %idx) nounwind {
|
|
|
|
; X64-LABEL: test_switch_jumptable:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-NEXT: movq $-1, %rax
|
|
|
|
; X64-NEXT: sarq $63, %rcx
|
|
|
|
; X64-NEXT: cmpl $3, %edi
|
|
|
|
; X64-NEXT: ja .LBB6_2
|
|
|
|
; X64-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-NEXT: cmovaq %rax, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: movl %edi, %edx
|
|
|
|
; X64-NEXT: movq .LJTI6_0(,%rdx,8), %rdx
|
|
|
|
; X64-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-NEXT: jmpq *%rdx
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB6_3: # Block address taken
|
|
|
|
; X64-NEXT: # %bb1
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB6_3, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $7, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: .LBB6_2: # %bb0
|
|
|
|
; X64-NEXT: cmovbeq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB6_4: # Block address taken
|
|
|
|
; X64-NEXT: # %bb2
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB6_4, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB6_5: # Block address taken
|
|
|
|
; X64-NEXT: # %bb3
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB6_5, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB6_6: # Block address taken
|
|
|
|
; X64-NEXT: # %bb5
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB6_6, %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmovneq %rax, %rcx
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-NEXT: movl $11, %eax
|
2018-07-23 15:51:51 +08:00
|
|
|
; X64-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
;
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-LABEL: test_switch_jumptable:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-PIC-NEXT: movq $-1, %rax
|
|
|
|
; X64-PIC-NEXT: sarq $63, %rcx
|
|
|
|
; X64-PIC-NEXT: cmpl $3, %edi
|
|
|
|
; X64-PIC-NEXT: ja .LBB6_2
|
|
|
|
; X64-PIC-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-PIC-NEXT: cmovaq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: movl %edi, %edx
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LJTI6_0(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movslq (%rsi,%rdx,4), %rdx
|
|
|
|
; X64-PIC-NEXT: addq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: orq %rcx, %rdx
|
|
|
|
; X64-PIC-NEXT: jmpq *%rdx
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB6_3: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb1
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB6_3(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $7, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: .LBB6_2: # %bb0
|
|
|
|
; X64-PIC-NEXT: cmovbeq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB6_4: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb2
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB6_4(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB6_5: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb3
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB6_5(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movl $42, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB6_6: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb5
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB6_6(%rip), %rsi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rsi, %rdx
|
|
|
|
; X64-PIC-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-PIC-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-PIC-NEXT: movl $11, %eax
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-LABEL: test_switch_jumptable:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %rax
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jg .LBB7_4
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovgq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: testl %edi, %edi
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: je .LBB7_7
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.2: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB7_6
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.3: # %bb2
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $13, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_4: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovleq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $2, %edi
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: je .LBB7_8
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.5: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $3, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB7_6
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: # %bb.9: # %bb5
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $11, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_6:
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $2, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_7: # %bb1
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $7, %eax
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB7_8: # %bb3
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %rcx
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; X64-RETPOLINE-NEXT: movl $42, %eax
|
[x86/SLH] Teach the x86 speculative load hardening pass to harden
against v1.2 BCBS attacks directly.
Attacks using spectre v1.2 (a subset of BCBS) are described in the paper
here:
https://people.csail.mit.edu/vlk/spectre11.pdf
The core idea is to speculatively store over the address in a vtable,
jumptable, or other target of indirect control flow that will be
subsequently loaded. Speculative execution after such a store can
forward the stored value to subsequent loads, and if called or jumped
to, the speculative execution will be steered to this potentially
attacker controlled address.
Up until now, this could be mitigated by enableing retpolines. However,
that is a relatively expensive technique to mitigate this particular
flavor. Especially because in most cases SLH will have already mitigated
this. To fully mitigate this with SLH, we need to do two core things:
1) Unfold loads from calls and jumps, allowing the loads to be post-load
hardened.
2) Force hardening of incoming registers even if we didn't end up
needing to harden the load itself.
The reason we need to do these two things is because hardening calls and
jumps from this particular variant is importantly different from
hardening against leak of secret data. Because the "bad" data here isn't
a secret, but in fact speculatively stored by the attacker, it may be
loaded from any address, regardless of whether it is read-only memory,
mapped memory, or a "hardened" address. The only 100% effective way to
harden these instructions is to harden the their operand itself. But to
the extent possible, we'd like to take advantage of all the other
hardening going on, we just need a fallback in case none of that
happened to cover the particular input to the control transfer
instruction.
For users of SLH, currently they are paing 2% to 6% performance overhead
for retpolines, but this mechanism is expected to be substantially
cheaper. However, it is worth reminding folks that this does not
mitigate all of the things retpolines do -- most notably, variant #2 is
not in *any way* mitigated by this technique. So users of SLH may still
want to enable retpolines, and the implementation is carefuly designed to
gracefully leverage retpolines to avoid the need for further hardening
here when they are enabled.
Differential Revision: https://reviews.llvm.org/D49663
llvm-svn: 337878
2018-07-25 09:51:29 +08:00
|
|
|
; X64-RETPOLINE-NEXT: orq %rcx, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
2018-07-23 15:51:51 +08:00
|
|
|
entry:
|
|
|
|
switch i32 %idx, label %bb0 [
|
|
|
|
i32 0, label %bb1
|
|
|
|
i32 1, label %bb2
|
|
|
|
i32 2, label %bb3
|
|
|
|
i32 3, label %bb5
|
|
|
|
]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
ret i32 2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
ret i32 7
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
ret i32 13
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret i32 42
|
|
|
|
|
|
|
|
bb5:
|
|
|
|
ret i32 11
|
|
|
|
}
|
2018-09-04 18:44:21 +08:00
|
|
|
|
|
|
|
; This function's switch is crafted to trigger jump-table lowering in the x86
|
|
|
|
; backend so that we can test how the exact jump table lowering behaves, but
|
|
|
|
; also arranges for fallthroughs from case to case to ensure that this pattern
|
|
|
|
; too can be handled.
|
|
|
|
define i32 @test_switch_jumptable_fallthrough(i32 %idx, i32* %a.ptr, i32* %b.ptr, i32* %c.ptr, i32* %d.ptr) nounwind {
|
|
|
|
; X64-LABEL: test_switch_jumptable_fallthrough:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: movq %rsp, %r9
|
|
|
|
; X64-NEXT: movq $-1, %r10
|
|
|
|
; X64-NEXT: sarq $63, %r9
|
|
|
|
; X64-NEXT: cmpl $3, %edi
|
|
|
|
; X64-NEXT: ja .LBB7_2
|
|
|
|
; X64-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-NEXT: cmovaq %r10, %r9
|
|
|
|
; X64-NEXT: xorl %eax, %eax
|
|
|
|
; X64-NEXT: movl %edi, %esi
|
|
|
|
; X64-NEXT: movq .LJTI7_0(,%rsi,8), %rsi
|
|
|
|
; X64-NEXT: orq %r9, %rsi
|
|
|
|
; X64-NEXT: jmpq *%rsi
|
|
|
|
; X64-NEXT: .LBB7_2: # %bb0
|
|
|
|
; X64-NEXT: cmovbeq %r10, %r9
|
|
|
|
; X64-NEXT: movl (%rsi), %eax
|
|
|
|
; X64-NEXT: orl %r9d, %eax
|
|
|
|
; X64-NEXT: movq $.LBB7_3, %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB7_3: # Block address taken
|
|
|
|
; X64-NEXT: # %bb1
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB7_3, %rsi
|
|
|
|
; X64-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-NEXT: addl (%rdx), %eax
|
|
|
|
; X64-NEXT: orl %r9d, %eax
|
|
|
|
; X64-NEXT: movq $.LBB7_4, %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB7_4: # Block address taken
|
|
|
|
; X64-NEXT: # %bb2
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB7_4, %rsi
|
|
|
|
; X64-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-NEXT: addl (%rcx), %eax
|
|
|
|
; X64-NEXT: orl %r9d, %eax
|
|
|
|
; X64-NEXT: movq $.LBB7_5, %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB7_5: # Block address taken
|
|
|
|
; X64-NEXT: # %bb3
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB7_5, %rsi
|
|
|
|
; X64-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-NEXT: addl (%r8), %eax
|
|
|
|
; X64-NEXT: orl %r9d, %eax
|
|
|
|
; X64-NEXT: movq $.LBB7_6, %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-NEXT: .LBB7_6: # Block address taken
|
|
|
|
; X64-NEXT: # %bb4
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-NEXT: cmpq $.LBB7_6, %rsi
|
|
|
|
; X64-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-NEXT: shlq $47, %r9
|
|
|
|
; X64-NEXT: orq %r9, %rsp
|
|
|
|
; X64-NEXT: retq
|
|
|
|
;
|
|
|
|
; X64-PIC-LABEL: test_switch_jumptable_fallthrough:
|
|
|
|
; X64-PIC: # %bb.0: # %entry
|
|
|
|
; X64-PIC-NEXT: movq %rsp, %r9
|
|
|
|
; X64-PIC-NEXT: movq $-1, %r10
|
|
|
|
; X64-PIC-NEXT: sarq $63, %r9
|
|
|
|
; X64-PIC-NEXT: cmpl $3, %edi
|
|
|
|
; X64-PIC-NEXT: ja .LBB7_2
|
|
|
|
; X64-PIC-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-PIC-NEXT: cmovaq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: xorl %eax, %eax
|
|
|
|
; X64-PIC-NEXT: movl %edi, %esi
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LJTI7_0(%rip), %rdi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: movslq (%rdi,%rsi,4), %rsi
|
|
|
|
; X64-PIC-NEXT: addq %rdi, %rsi
|
|
|
|
; X64-PIC-NEXT: orq %r9, %rsi
|
|
|
|
; X64-PIC-NEXT: jmpq *%rsi
|
|
|
|
; X64-PIC-NEXT: .LBB7_2: # %bb0
|
|
|
|
; X64-PIC-NEXT: cmovbeq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: movl (%rsi), %eax
|
|
|
|
; X64-PIC-NEXT: orl %r9d, %eax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_3(%rip), %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB7_3: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb1
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_3(%rip), %rdi
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rdi, %rsi
|
|
|
|
; X64-PIC-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: addl (%rdx), %eax
|
|
|
|
; X64-PIC-NEXT: orl %r9d, %eax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_4(%rip), %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB7_4: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb2
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_4(%rip), %rdx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rdx, %rsi
|
|
|
|
; X64-PIC-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: addl (%rcx), %eax
|
|
|
|
; X64-PIC-NEXT: orl %r9d, %eax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_5(%rip), %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB7_5: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb3
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_5(%rip), %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rcx, %rsi
|
|
|
|
; X64-PIC-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: addl (%r8), %eax
|
|
|
|
; X64-PIC-NEXT: orl %r9d, %eax
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_6(%rip), %rsi
|
2018-12-05 23:42:11 +08:00
|
|
|
; X64-PIC-NEXT: .LBB7_6: # Block address taken
|
|
|
|
; X64-PIC-NEXT: # %bb4
|
2018-12-05 23:41:13 +08:00
|
|
|
; X64-PIC-NEXT: leaq .LBB7_6(%rip), %rcx
|
2018-09-04 18:44:21 +08:00
|
|
|
; X64-PIC-NEXT: cmpq %rcx, %rsi
|
|
|
|
; X64-PIC-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-PIC-NEXT: shlq $47, %r9
|
|
|
|
; X64-PIC-NEXT: orq %r9, %rsp
|
|
|
|
; X64-PIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; X64-RETPOLINE-LABEL: test_switch_jumptable_fallthrough:
|
|
|
|
; X64-RETPOLINE: # %bb.0: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: movq %rsp, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: movq $-1, %r10
|
|
|
|
; X64-RETPOLINE-NEXT: sarq $63, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: xorl %eax, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jg .LBB8_5
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.1: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovgq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: testl %edi, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB8_2
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.3: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $1, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB8_8
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.4:
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB8_10
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_5: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmovleq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $2, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: je .LBB8_6
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.7: # %entry
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: cmpl $3, %edi
|
|
|
|
; X64-RETPOLINE-NEXT: jne .LBB8_8
|
|
|
|
; X64-RETPOLINE-NEXT: # %bb.13:
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB8_12
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_8:
|
|
|
|
; X64-RETPOLINE-NEXT: cmoveq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: movl (%rsi), %eax
|
|
|
|
; X64-RETPOLINE-NEXT: orl %r9d, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB8_9
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_2:
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_9: # %bb1
|
|
|
|
; X64-RETPOLINE-NEXT: addl (%rdx), %eax
|
|
|
|
; X64-RETPOLINE-NEXT: orl %r9d, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_10: # %bb2
|
|
|
|
; X64-RETPOLINE-NEXT: addl (%rcx), %eax
|
|
|
|
; X64-RETPOLINE-NEXT: orl %r9d, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: jmp .LBB8_11
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_6:
|
|
|
|
; X64-RETPOLINE-NEXT: cmovneq %r10, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_11: # %bb3
|
|
|
|
; X64-RETPOLINE-NEXT: addl (%r8), %eax
|
|
|
|
; X64-RETPOLINE-NEXT: orl %r9d, %eax
|
|
|
|
; X64-RETPOLINE-NEXT: .LBB8_12: # %bb4
|
|
|
|
; X64-RETPOLINE-NEXT: shlq $47, %r9
|
|
|
|
; X64-RETPOLINE-NEXT: orq %r9, %rsp
|
|
|
|
; X64-RETPOLINE-NEXT: retq
|
|
|
|
entry:
|
|
|
|
switch i32 %idx, label %bb0 [
|
|
|
|
i32 0, label %bb1
|
|
|
|
i32 1, label %bb2
|
|
|
|
i32 2, label %bb3
|
|
|
|
i32 3, label %bb4
|
|
|
|
]
|
|
|
|
|
|
|
|
bb0:
|
|
|
|
%a = load i32, i32* %a.ptr
|
|
|
|
br label %bb1
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
%b.phi = phi i32 [ 0, %entry ], [ %a, %bb0 ]
|
|
|
|
%b = load i32, i32* %b.ptr
|
|
|
|
%b.sum = add i32 %b.phi, %b
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%c.phi = phi i32 [ 0, %entry ], [ %b.sum, %bb1 ]
|
|
|
|
%c = load i32, i32* %c.ptr
|
|
|
|
%c.sum = add i32 %c.phi, %c
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
%d.phi = phi i32 [ 0, %entry ], [ %c.sum, %bb2 ]
|
|
|
|
%d = load i32, i32* %d.ptr
|
|
|
|
%d.sum = add i32 %d.phi, %d
|
|
|
|
br label %bb4
|
|
|
|
|
|
|
|
bb4:
|
|
|
|
%e.phi = phi i32 [ 0, %entry ], [ %d.sum, %bb3 ]
|
|
|
|
ret i32 %e.phi
|
|
|
|
}
|