llvm-project/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

438 lines
19 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
; RUN: llc -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=NOHARDENARM -dump-input-context=100
; RUN: llc -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=NOHARDENTHUMB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
[ARM] Implement harden-sls-retbr for ARM mode Some processors may speculatively execute the instructions immediately following indirect control flow, such as returns, indirect jumps and indirect function calls. To avoid a potential miss-speculatively executed gadget after these instructions leaking secrets through side channels, this pass places a speculation barrier immediately after every indirect control flow where control flow doesn't return to the next instruction, such as returns and indirect jumps, but not indirect function calls. Hardening of indirect function calls will be done in a later, independent patch. This patch is implementing the same functionality as the AArch64 counter part implemented in https://reviews.llvm.org/D81400. For AArch64, returns and indirect jumps only occur on RET and BR instructions and hence the function attribute to control the hardening is called "harden-sls-retbr" there. On AArch32, there is a much wider variety of instructions that can trigger an indirect unconditional control flow change. I've decided to stick with the name "harden-sls-retbr" as introduced for the corresponding AArch64 mitigation. This patch implements this for ARM mode. A future patch will extend this to also support Thumb mode. The inserted barriers are never on the correct, architectural execution path, and therefore performance overhead of this is expected to be low. To ensure these barriers are never on an architecturally executed path, when the harden-sls-retbr function attribute is present, indirect control flow is never conditionalized/predicated. On targets that implement that Armv8.0-SB Speculation Barrier extension, a single SB instruction is emitted that acts as a speculation barrier. On other targets, a DSB SYS followed by a ISB is emitted to act as a speculation barrier. These speculation barriers are implemented as pseudo instructions to avoid later passes to analyze them and potentially remove them. The mitigation is off by default and can be enabled by the harden-sls-retbr subtarget feature. Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
; Function Attrs: norecurse nounwind readnone
define dso_local i32 @double_return(i32 %a, i32 %b) local_unnamed_addr {
; NOHARDENARM-LABEL: double_return:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: cmp r0, #0
; NOHARDENARM-NEXT: mulgt r0, r1, r0
; NOHARDENARM-NEXT: bxgt lr
; NOHARDENARM-NEXT: .LBB0_1: @ %if.else
; NOHARDENARM-NEXT: sdiv r1, r0, r1
; NOHARDENARM-NEXT: sdiv r1, r0, r1
; NOHARDENARM-NEXT: sdiv r0, r0, r1
; NOHARDENARM-NEXT: bx lr
;
; NOHARDENTHUMB-LABEL: double_return:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: cmp r0, #0
; NOHARDENTHUMB-NEXT: ble .LBB0_2
; NOHARDENTHUMB-NEXT: @ %bb.1: @ %if.then
; NOHARDENTHUMB-NEXT: muls r0, r1, r0
; NOHARDENTHUMB-NEXT: bx lr
; NOHARDENTHUMB-NEXT: .LBB0_2: @ %if.else
; NOHARDENTHUMB-NEXT: sdiv r1, r0, r1
; NOHARDENTHUMB-NEXT: sdiv r1, r0, r1
; NOHARDENTHUMB-NEXT: sdiv r0, r0, r1
; NOHARDENTHUMB-NEXT: bx lr
[ARM] Implement harden-sls-retbr for ARM mode Some processors may speculatively execute the instructions immediately following indirect control flow, such as returns, indirect jumps and indirect function calls. To avoid a potential miss-speculatively executed gadget after these instructions leaking secrets through side channels, this pass places a speculation barrier immediately after every indirect control flow where control flow doesn't return to the next instruction, such as returns and indirect jumps, but not indirect function calls. Hardening of indirect function calls will be done in a later, independent patch. This patch is implementing the same functionality as the AArch64 counter part implemented in https://reviews.llvm.org/D81400. For AArch64, returns and indirect jumps only occur on RET and BR instructions and hence the function attribute to control the hardening is called "harden-sls-retbr" there. On AArch32, there is a much wider variety of instructions that can trigger an indirect unconditional control flow change. I've decided to stick with the name "harden-sls-retbr" as introduced for the corresponding AArch64 mitigation. This patch implements this for ARM mode. A future patch will extend this to also support Thumb mode. The inserted barriers are never on the correct, architectural execution path, and therefore performance overhead of this is expected to be low. To ensure these barriers are never on an architecturally executed path, when the harden-sls-retbr function attribute is present, indirect control flow is never conditionalized/predicated. On targets that implement that Armv8.0-SB Speculation Barrier extension, a single SB instruction is emitted that acts as a speculation barrier. On other targets, a DSB SYS followed by a ISB is emitted to act as a speculation barrier. These speculation barriers are implemented as pseudo instructions to avoid later passes to analyze them and potentially remove them. The mitigation is off by default and can be enabled by the harden-sls-retbr subtarget feature. Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
entry:
%cmp = icmp sgt i32 %a, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
; Make a very easy, very likely to predicate return (BX LR), to test that
; it will not get predicated when sls-hardening is enabled.
%mul = mul i32 %b, %a
ret i32 %mul
if.else: ; preds = %entry
%div3 = sdiv i32 %a, %b
%div2 = sdiv i32 %a, %div3
%div1 = sdiv i32 %a, %div2
ret i32 %div1
}
@__const.indirect_branch.ptr = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@indirect_branch, %return), i8* blockaddress(@indirect_branch, %l2)], align 8
; Function Attrs: norecurse nounwind readnone
define dso_local i32 @indirect_branch(i32 %a, i32 %b, i32 %i) {
; NOHARDENARM-LABEL: indirect_branch:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: movw r0, :lower16:.L__const.indirect_branch.ptr
; NOHARDENARM-NEXT: movt r0, :upper16:.L__const.indirect_branch.ptr
; NOHARDENARM-NEXT: ldr r0, [r0, r2, lsl #2]
; NOHARDENARM-NEXT: bx r0
; NOHARDENARM-NEXT: .Ltmp0: @ Block address taken
; NOHARDENARM-NEXT: .LBB1_1: @ %return
; NOHARDENARM-NEXT: mov r0, #0
; NOHARDENARM-NEXT: bx lr
; NOHARDENARM-NEXT: .Ltmp1: @ Block address taken
; NOHARDENARM-NEXT: .LBB1_2: @ %l2
; NOHARDENARM-NEXT: mov r0, #1
; NOHARDENARM-NEXT: bx lr
;
; NOHARDENTHUMB-LABEL: indirect_branch:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: movw r0, :lower16:.L__const.indirect_branch.ptr
; NOHARDENTHUMB-NEXT: movt r0, :upper16:.L__const.indirect_branch.ptr
; NOHARDENTHUMB-NEXT: ldr.w r0, [r0, r2, lsl #2]
; NOHARDENTHUMB-NEXT: mov pc, r0
; NOHARDENTHUMB-NEXT: .Ltmp0: @ Block address taken
; NOHARDENTHUMB-NEXT: .LBB1_1: @ %return
; NOHARDENTHUMB-NEXT: movs r0, #0
; NOHARDENTHUMB-NEXT: bx lr
; NOHARDENTHUMB-NEXT: .Ltmp1: @ Block address taken
; NOHARDENTHUMB-NEXT: .LBB1_2: @ %l2
; NOHARDENTHUMB-NEXT: movs r0, #1
; NOHARDENTHUMB-NEXT: bx lr
[ARM] Implement harden-sls-retbr for ARM mode Some processors may speculatively execute the instructions immediately following indirect control flow, such as returns, indirect jumps and indirect function calls. To avoid a potential miss-speculatively executed gadget after these instructions leaking secrets through side channels, this pass places a speculation barrier immediately after every indirect control flow where control flow doesn't return to the next instruction, such as returns and indirect jumps, but not indirect function calls. Hardening of indirect function calls will be done in a later, independent patch. This patch is implementing the same functionality as the AArch64 counter part implemented in https://reviews.llvm.org/D81400. For AArch64, returns and indirect jumps only occur on RET and BR instructions and hence the function attribute to control the hardening is called "harden-sls-retbr" there. On AArch32, there is a much wider variety of instructions that can trigger an indirect unconditional control flow change. I've decided to stick with the name "harden-sls-retbr" as introduced for the corresponding AArch64 mitigation. This patch implements this for ARM mode. A future patch will extend this to also support Thumb mode. The inserted barriers are never on the correct, architectural execution path, and therefore performance overhead of this is expected to be low. To ensure these barriers are never on an architecturally executed path, when the harden-sls-retbr function attribute is present, indirect control flow is never conditionalized/predicated. On targets that implement that Armv8.0-SB Speculation Barrier extension, a single SB instruction is emitted that acts as a speculation barrier. On other targets, a DSB SYS followed by a ISB is emitted to act as a speculation barrier. These speculation barriers are implemented as pseudo instructions to avoid later passes to analyze them and potentially remove them. The mitigation is off by default and can be enabled by the harden-sls-retbr subtarget feature. Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
entry:
%idxprom = sext i32 %i to i64
%arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @__const.indirect_branch.ptr, i64 0, i64 %idxprom
%0 = load i8*, i8** %arrayidx, align 8
indirectbr i8* %0, [label %return, label %l2]
l2: ; preds = %entry
br label %return
return: ; preds = %entry, %l2
%retval.0 = phi i32 [ 1, %l2 ], [ 0, %entry ]
ret i32 %retval.0
}
define i32 @asmgoto() {
; NOHARDENARM-LABEL: asmgoto:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: mov r0, #0
; NOHARDENARM-NEXT: @APP
; NOHARDENARM-NEXT: b .Ltmp2
; NOHARDENARM-NEXT: @NO_APP
; NOHARDENARM-NEXT: @ %bb.1: @ %common.ret
; NOHARDENARM-NEXT: bx lr
; NOHARDENARM-NEXT: .Ltmp2: @ Block address taken
; NOHARDENARM-NEXT: .LBB2_2: @ %d
; NOHARDENARM-NEXT: mov r0, #1
; NOHARDENARM-NEXT: bx lr
;
; NOHARDENTHUMB-LABEL: asmgoto:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: @APP
; NOHARDENTHUMB-NEXT: b .Ltmp2
; NOHARDENTHUMB-NEXT: @NO_APP
; NOHARDENTHUMB-NEXT: @ %bb.1:
; NOHARDENTHUMB-NEXT: movs r0, #0
; NOHARDENTHUMB-NEXT: bx lr
; NOHARDENTHUMB-NEXT: .Ltmp2: @ Block address taken
; NOHARDENTHUMB-NEXT: .LBB2_2: @ %d
; NOHARDENTHUMB-NEXT: movs r0, #1
; NOHARDENTHUMB-NEXT: bx lr
[ARM] Implement harden-sls-retbr for ARM mode Some processors may speculatively execute the instructions immediately following indirect control flow, such as returns, indirect jumps and indirect function calls. To avoid a potential miss-speculatively executed gadget after these instructions leaking secrets through side channels, this pass places a speculation barrier immediately after every indirect control flow where control flow doesn't return to the next instruction, such as returns and indirect jumps, but not indirect function calls. Hardening of indirect function calls will be done in a later, independent patch. This patch is implementing the same functionality as the AArch64 counter part implemented in https://reviews.llvm.org/D81400. For AArch64, returns and indirect jumps only occur on RET and BR instructions and hence the function attribute to control the hardening is called "harden-sls-retbr" there. On AArch32, there is a much wider variety of instructions that can trigger an indirect unconditional control flow change. I've decided to stick with the name "harden-sls-retbr" as introduced for the corresponding AArch64 mitigation. This patch implements this for ARM mode. A future patch will extend this to also support Thumb mode. The inserted barriers are never on the correct, architectural execution path, and therefore performance overhead of this is expected to be low. To ensure these barriers are never on an architecturally executed path, when the harden-sls-retbr function attribute is present, indirect control flow is never conditionalized/predicated. On targets that implement that Armv8.0-SB Speculation Barrier extension, a single SB instruction is emitted that acts as a speculation barrier. On other targets, a DSB SYS followed by a ISB is emitted to act as a speculation barrier. These speculation barriers are implemented as pseudo instructions to avoid later passes to analyze them and potentially remove them. The mitigation is off by default and can be enabled by the harden-sls-retbr subtarget feature. Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
entry:
callbr void asm sideeffect "B $0", "X"(i8* blockaddress(@asmgoto, %d))
to label %asm.fallthrough [label %d]
; The asm goto above produces a direct branch:
; For direct branches, no mitigation is needed.
; ISDDSB-NOT: dsb sy
asm.fallthrough: ; preds = %entry
ret i32 0
d: ; preds = %asm.fallthrough, %entry
ret i32 1
}
; Check that indirect branches produced through switch jump tables are also
; hardened:
define dso_local i32 @jumptable(i32 %a, i32 %b) {
; NOHARDENARM-LABEL: jumptable:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: cmp r1, #4
; NOHARDENARM-NEXT: bxhi lr
; NOHARDENARM-NEXT: .LBB3_1: @ %entry
; NOHARDENARM-NEXT: adr r2, .LJTI3_0
; NOHARDENARM-NEXT: ldr pc, [r2, r1, lsl #2]
; NOHARDENARM-NEXT: @ %bb.2:
; NOHARDENARM-NEXT: .p2align 2
; NOHARDENARM-NEXT: .LJTI3_0:
; NOHARDENARM-NEXT: .long .LBB3_3
; NOHARDENARM-NEXT: .long .LBB3_4
; NOHARDENARM-NEXT: .long .LBB3_7
; NOHARDENARM-NEXT: .long .LBB3_5
; NOHARDENARM-NEXT: .long .LBB3_6
; NOHARDENARM-NEXT: .LBB3_3: @ %sw.bb
; NOHARDENARM-NEXT: lsl r0, r0, #1
; NOHARDENARM-NEXT: .LBB3_4: @ %sw.bb1
; NOHARDENARM-NEXT: lsl r0, r0, #1
; NOHARDENARM-NEXT: .LBB3_5: @ %sw.bb3
; NOHARDENARM-NEXT: lsl r0, r0, #1
; NOHARDENARM-NEXT: .LBB3_6: @ %sw.bb5
; NOHARDENARM-NEXT: lsl r0, r0, #1
; NOHARDENARM-NEXT: .LBB3_7: @ %sw.epilog
; NOHARDENARM-NEXT: bx lr
;
; NOHARDENTHUMB-LABEL: jumptable:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: cmp r1, #4
; NOHARDENTHUMB-NEXT: bhi .LBB3_7
; NOHARDENTHUMB-NEXT: @ %bb.1: @ %entry
; NOHARDENTHUMB-NEXT: .LCPI3_0:
; NOHARDENTHUMB-NEXT: tbb [pc, r1]
; NOHARDENTHUMB-NEXT: @ %bb.2:
; NOHARDENTHUMB-NEXT: .LJTI3_0:
; NOHARDENTHUMB-NEXT: .byte (.LBB3_3-(.LCPI3_0+4))/2
; NOHARDENTHUMB-NEXT: .byte (.LBB3_4-(.LCPI3_0+4))/2
; NOHARDENTHUMB-NEXT: .byte (.LBB3_7-(.LCPI3_0+4))/2
; NOHARDENTHUMB-NEXT: .byte (.LBB3_5-(.LCPI3_0+4))/2
; NOHARDENTHUMB-NEXT: .byte (.LBB3_6-(.LCPI3_0+4))/2
; NOHARDENTHUMB-NEXT: .p2align 1
; NOHARDENTHUMB-NEXT: .LBB3_3: @ %sw.bb
; NOHARDENTHUMB-NEXT: lsls r0, r0, #1
; NOHARDENTHUMB-NEXT: .LBB3_4: @ %sw.bb1
; NOHARDENTHUMB-NEXT: lsls r0, r0, #1
; NOHARDENTHUMB-NEXT: .LBB3_5: @ %sw.bb3
; NOHARDENTHUMB-NEXT: lsls r0, r0, #1
; NOHARDENTHUMB-NEXT: .LBB3_6: @ %sw.bb5
; NOHARDENTHUMB-NEXT: lsls r0, r0, #1
; NOHARDENTHUMB-NEXT: .LBB3_7: @ %sw.epilog
; NOHARDENTHUMB-NEXT: bx lr
[ARM] Implement harden-sls-retbr for ARM mode Some processors may speculatively execute the instructions immediately following indirect control flow, such as returns, indirect jumps and indirect function calls. To avoid a potential miss-speculatively executed gadget after these instructions leaking secrets through side channels, this pass places a speculation barrier immediately after every indirect control flow where control flow doesn't return to the next instruction, such as returns and indirect jumps, but not indirect function calls. Hardening of indirect function calls will be done in a later, independent patch. This patch is implementing the same functionality as the AArch64 counter part implemented in https://reviews.llvm.org/D81400. For AArch64, returns and indirect jumps only occur on RET and BR instructions and hence the function attribute to control the hardening is called "harden-sls-retbr" there. On AArch32, there is a much wider variety of instructions that can trigger an indirect unconditional control flow change. I've decided to stick with the name "harden-sls-retbr" as introduced for the corresponding AArch64 mitigation. This patch implements this for ARM mode. A future patch will extend this to also support Thumb mode. The inserted barriers are never on the correct, architectural execution path, and therefore performance overhead of this is expected to be low. To ensure these barriers are never on an architecturally executed path, when the harden-sls-retbr function attribute is present, indirect control flow is never conditionalized/predicated. On targets that implement that Armv8.0-SB Speculation Barrier extension, a single SB instruction is emitted that acts as a speculation barrier. On other targets, a DSB SYS followed by a ISB is emitted to act as a speculation barrier. These speculation barriers are implemented as pseudo instructions to avoid later passes to analyze them and potentially remove them. The mitigation is off by default and can be enabled by the harden-sls-retbr subtarget feature. Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
entry:
switch i32 %b, label %sw.epilog [
i32 0, label %sw.bb
i32 1, label %sw.bb1
i32 3, label %sw.bb3
i32 4, label %sw.bb5
]
sw.bb: ; preds = %entry
%add = shl nsw i32 %a, 1
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
%a.addr.0 = phi i32 [ %a, %entry ], [ %add, %sw.bb ]
%add2 = shl nsw i32 %a.addr.0, 1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb1
%a.addr.1 = phi i32 [ %a, %entry ], [ %add2, %sw.bb1 ]
%add4 = shl nsw i32 %a.addr.1, 1
br label %sw.bb5
sw.bb5: ; preds = %entry, %sw.bb3
%a.addr.2 = phi i32 [ %a, %entry ], [ %add4, %sw.bb3 ]
%add6 = shl nsw i32 %a.addr.2, 1
br label %sw.epilog
sw.epilog: ; preds = %sw.bb5, %entry
%a.addr.3 = phi i32 [ %a, %entry ], [ %add6, %sw.bb5 ]
ret i32 %a.addr.3
}
define dso_local i32 @indirect_call(
; NOHARDENARM-LABEL: indirect_call:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: .save {r4, r5, r11, lr}
; NOHARDENARM-NEXT: push {r4, r5, r11, lr}
; NOHARDENARM-NEXT: mov r4, r1
; NOHARDENARM-NEXT: blx r0
; NOHARDENARM-NEXT: mov r5, r0
; NOHARDENARM-NEXT: blx r4
; NOHARDENARM-NEXT: add r0, r0, r5
; NOHARDENARM-NEXT: pop {r4, r5, r11, pc}
;
; NOHARDENTHUMB-LABEL: indirect_call:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: .save {r4, r5, r7, lr}
; NOHARDENTHUMB-NEXT: push {r4, r5, r7, lr}
; NOHARDENTHUMB-NEXT: mov r4, r1
; NOHARDENTHUMB-NEXT: blx r0
; NOHARDENTHUMB-NEXT: mov r5, r0
; NOHARDENTHUMB-NEXT: blx r4
; NOHARDENTHUMB-NEXT: add r0, r5
; NOHARDENTHUMB-NEXT: pop {r4, r5, r7, pc}
i32 (...)* nocapture %f1, i32 (...)* nocapture %f2) {
entry:
%callee.knr.cast = bitcast i32 (...)* %f1 to i32 ()*
%call = tail call i32 %callee.knr.cast()
; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
%callee.knr.cast1 = bitcast i32 (...)* %f2 to i32 ()*
%call2 = tail call i32 %callee.knr.cast1()
; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
%add = add nsw i32 %call2, %call
ret i32 %add
}
; verify calling through a function pointer.
@a = dso_local local_unnamed_addr global i32 (...)* null, align 8
@b = dso_local local_unnamed_addr global i32 0, align 4
define dso_local void @indirect_call_global() local_unnamed_addr {
; NOHARDENARM-LABEL: indirect_call_global:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: .save {r11, lr}
; NOHARDENARM-NEXT: push {r11, lr}
; NOHARDENARM-NEXT: movw r0, :lower16:a
; NOHARDENARM-NEXT: movt r0, :upper16:a
; NOHARDENARM-NEXT: ldr r0, [r0]
; NOHARDENARM-NEXT: blx r0
; NOHARDENARM-NEXT: movw r1, :lower16:b
; NOHARDENARM-NEXT: movt r1, :upper16:b
; NOHARDENARM-NEXT: str r0, [r1]
; NOHARDENARM-NEXT: pop {r11, pc}
;
; NOHARDENTHUMB-LABEL: indirect_call_global:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: .save {r7, lr}
; NOHARDENTHUMB-NEXT: push {r7, lr}
; NOHARDENTHUMB-NEXT: movw r0, :lower16:a
; NOHARDENTHUMB-NEXT: movt r0, :upper16:a
; NOHARDENTHUMB-NEXT: ldr r0, [r0]
; NOHARDENTHUMB-NEXT: blx r0
; NOHARDENTHUMB-NEXT: movw r1, :lower16:b
; NOHARDENTHUMB-NEXT: movt r1, :upper16:b
; NOHARDENTHUMB-NEXT: str r0, [r1]
; NOHARDENTHUMB-NEXT: pop {r7, pc}
entry:
%0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @a to i32 ()**), align 8
%call = tail call i32 %0() nounwind
; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
store i32 %call, i32* @b, align 4
ret void
}
; Verify that neither r12 nor lr are used as registers in indirect call
; instructions when the sls-hardening-blr mitigation is enabled, as
; (a) a linker is allowed to clobber r12 on calls, and
; (b) the hardening transformation isn't correct if lr is the register holding
; the address of the function called.
define i32 @check_r12(i32 ()** %fp) {
; NOHARDENARM-LABEL: check_r12:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: .save {r11, lr}
; NOHARDENARM-NEXT: push {r11, lr}
; NOHARDENARM-NEXT: ldr r12, [r0]
; NOHARDENARM-NEXT: @APP
; NOHARDENARM-NEXT: add r12, r12, #0
; NOHARDENARM-NEXT: @NO_APP
; NOHARDENARM-NEXT: blx r12
; NOHARDENARM-NEXT: pop {r11, pc}
;
; NOHARDENTHUMB-LABEL: check_r12:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: .save {r7, lr}
; NOHARDENTHUMB-NEXT: push {r7, lr}
; NOHARDENTHUMB-NEXT: ldr.w r12, [r0]
; NOHARDENTHUMB-NEXT: @APP
; NOHARDENTHUMB-NEXT: add.w r12, r12, #0
; NOHARDENTHUMB-NEXT: @NO_APP
; NOHARDENTHUMB-NEXT: blx r12
; NOHARDENTHUMB-NEXT: pop {r7, pc}
entry:
%f = load i32 ()*, i32 ()** %fp, align 4
; Force f to be moved into r12
%r12_f = tail call i32 ()* asm "add $0, $1, #0", "={r12},{r12}"(i32 ()* %f) nounwind
%call = call i32 %r12_f()
ret i32 %call
}
define i32 @check_lr(i32 ()** %fp) {
; NOHARDENARM-LABEL: check_lr:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: .save {r11, lr}
; NOHARDENARM-NEXT: push {r11, lr}
; NOHARDENARM-NEXT: ldr lr, [r0]
; NOHARDENARM-NEXT: @APP
; NOHARDENARM-NEXT: add lr, lr, #0
; NOHARDENARM-NEXT: @NO_APP
; NOHARDENARM-NEXT: blx lr
; NOHARDENARM-NEXT: pop {r11, pc}
;
; NOHARDENTHUMB-LABEL: check_lr:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: .save {r7, lr}
; NOHARDENTHUMB-NEXT: push {r7, lr}
; NOHARDENTHUMB-NEXT: ldr.w lr, [r0]
; NOHARDENTHUMB-NEXT: @APP
; NOHARDENTHUMB-NEXT: add.w lr, lr, #0
; NOHARDENTHUMB-NEXT: @NO_APP
; NOHARDENTHUMB-NEXT: blx lr
; NOHARDENTHUMB-NEXT: pop {r7, pc}
entry:
%f = load i32 ()*, i32 ()** %fp, align 4
; Force f to be moved into lr
%lr_f = tail call i32 ()* asm "add $0, $1, #0", "={lr},{lr}"(i32 ()* %f) nounwind
%call = call i32 %lr_f()
ret i32 %call
}
; Verify that even when sls-harden-blr is enabled, "blx r12" is still an
; instruction that is accepted by the inline assembler
define void @verify_inline_asm_blx_r12(void ()* %g) {
; ISBDSB-LABEL: verify_inline_asm_blx_r12:
; ISBDSB: @ %bb.0: @ %entry
; ISBDSB-NEXT: mov r12, r0
; ISBDSB-NEXT: @APP
; ISBDSB-NEXT: blx r12
; ISBDSB-NEXT: @NO_APP
; ISBDSB-NEXT: bx lr
; ISBDSB-NEXT: dsb sy
; ISBDSB-NEXT: isb sy
;
; SB-LABEL: verify_inline_asm_blx_r12:
; SB: @ %bb.0: @ %entry
; SB-NEXT: mov r12, r0
; SB-NEXT: @APP
; SB-NEXT: blx r12
; SB-NEXT: @NO_APP
; SB-NEXT: bx lr
; SB-NEXT: sb
;
; NOHARDENARM-LABEL: verify_inline_asm_blx_r12:
; NOHARDENARM: @ %bb.0: @ %entry
; NOHARDENARM-NEXT: mov r12, r0
; NOHARDENARM-NEXT: @APP
; NOHARDENARM-NEXT: blx r12
; NOHARDENARM-NEXT: @NO_APP
; NOHARDENARM-NEXT: bx lr
;
; NOHARDENTHUMB-LABEL: verify_inline_asm_blx_r12:
; NOHARDENTHUMB: @ %bb.0: @ %entry
; NOHARDENTHUMB-NEXT: mov r12, r0
; NOHARDENTHUMB-NEXT: @APP
; NOHARDENTHUMB-NEXT: blx r12
; NOHARDENTHUMB-NEXT: @NO_APP
; NOHARDENTHUMB-NEXT: bx lr
entry:
%0 = bitcast void ()* %g to i8*
tail call void asm sideeffect "blx $0", "{r12}"(i8* %0) nounwind
ret void
}
; HARDEN-COMDAT: .section {{.text.__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT: .hidden {{__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT: .weak {{__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT: .type {{__llvm_slsblr_thunk_(arm|thumb)_r5}},%function
; HARDEN-COMDAT-OFF-NOT: .section {{.text.__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT-OFF-NOT: .hidden {{__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT-OFF-NOT: .weak {{__llvm_slsblr_thunk_(arm|thumb)_r5}}
; HARDEN-COMDAT-OFF: .type {{__llvm_slsblr_thunk_(arm|thumb)_r5}},%function
; HARDEN-label: {{__llvm_slsblr_thunk_(arm|thumb)_r5}}:
; HARDEN: bx r5
; ISBDSB-NEXT: dsb sy
; ISBDSB-NEXT: isb
; SB-NEXT: dsb sy
; SB-NEXT: isb
; HARDEN-NEXT: .Lfunc_end