forked from OSchip/llvm-project
[x86/retpoline] Split the LLVM concept of retpolines into separate
subtarget features for indirect calls and indirect branches. This is in preparation for enabling *only* the call retpolines when using speculative load hardening. I've continued to use subtarget features for now as they continue to seem the best fit given the lack of other retpoline like constructs so far. The LLVM side is pretty simple. I'd like to eventually get rid of the old feature, but not sure what backwards compatibility issues that will cause. This does remove the "implies" from requesting an external thunk. This always seemed somewhat questionable and is now clearly not desirable -- you specify a thunk the same way no matter which set of things are getting retpolines. I really want to keep this nicely isolated from end users and just an LLVM implementation detail, so I've moved the `-mretpoline` flag in Clang to no longer rely on a specific subtarget feature by that name and instead to be directly handled. In some ways this is simpler, but in order to preserve existing behavior I've had to add some fallback code so that users who relied on merely passing -mretpoline-external-thunk continue to get the same behavior. We should eventually remove this I suspect (we have never tested that it works!) but I've not done that in this patch. Differential Revision: https://reviews.llvm.org/D51150 llvm-svn: 340515
This commit is contained in:
parent
a67161fffa
commit
ae0cafece8
|
@ -1999,6 +1999,9 @@ def mno_rtd: Flag<["-"], "mno-rtd">, Group<m_Group>;
|
|||
def mno_soft_float : Flag<["-"], "mno-soft-float">, Group<m_Group>;
|
||||
def mno_stackrealign : Flag<["-"], "mno-stackrealign">, Group<m_Group>;
|
||||
|
||||
def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>, Flags<[CoreOption,DriverOption]>;
|
||||
def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>, Flags<[CoreOption,DriverOption]>;
|
||||
|
||||
def mrelax : Flag<["-"], "mrelax">, Group<m_riscv_Features_Group>,
|
||||
HelpText<"Enable linker relaxation">;
|
||||
def mno_relax : Flag<["-"], "mno-relax">, Group<m_riscv_Features_Group>,
|
||||
|
@ -2824,8 +2827,6 @@ def mxsaves : Flag<["-"], "mxsaves">, Group<m_x86_Features_Group>;
|
|||
def mno_xsaves : Flag<["-"], "mno-xsaves">, Group<m_x86_Features_Group>;
|
||||
def mshstk : Flag<["-"], "mshstk">, Group<m_x86_Features_Group>;
|
||||
def mno_shstk : Flag<["-"], "mno-shstk">, Group<m_x86_Features_Group>;
|
||||
def mretpoline : Flag<["-"], "mretpoline">, Group<m_x86_Features_Group>;
|
||||
def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_x86_Features_Group>;
|
||||
def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group<m_x86_Features_Group>;
|
||||
def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group<m_x86_Features_Group>;
|
||||
|
||||
|
|
|
@ -796,8 +796,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
|
|||
HasCLDEMOTE = true;
|
||||
} else if (Feature == "+rdpid") {
|
||||
HasRDPID = true;
|
||||
} else if (Feature == "+retpoline") {
|
||||
HasRetpoline = true;
|
||||
} else if (Feature == "+retpoline-external-thunk") {
|
||||
HasRetpolineExternalThunk = true;
|
||||
} else if (Feature == "+sahf") {
|
||||
|
@ -1397,7 +1395,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
|
|||
.Case("rdpid", HasRDPID)
|
||||
.Case("rdrnd", HasRDRND)
|
||||
.Case("rdseed", HasRDSEED)
|
||||
.Case("retpoline", HasRetpoline)
|
||||
.Case("retpoline-external-thunk", HasRetpolineExternalThunk)
|
||||
.Case("rtm", HasRTM)
|
||||
.Case("sahf", HasLAHFSAHF)
|
||||
|
|
|
@ -98,7 +98,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
|
|||
bool HasMOVBE = false;
|
||||
bool HasPREFETCHWT1 = false;
|
||||
bool HasRDPID = false;
|
||||
bool HasRetpoline = false;
|
||||
bool HasRetpolineExternalThunk = false;
|
||||
bool HasLAHFSAHF = false;
|
||||
bool HasWBNOINVD = false;
|
||||
|
|
|
@ -144,6 +144,26 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
|
|||
Features.push_back("+ssse3");
|
||||
}
|
||||
|
||||
// Translate the high level `-mretpoline` flag to the specific target feature
|
||||
// flags. We also detect if the user asked for retpoline external thunks but
|
||||
// failed to ask for retpolines themselves. This is a bit hacky but keeps
|
||||
// existing usages working. We should consider deprecated this and instead
|
||||
// warning if the user requests external retpoline thunks and *doesn't*
|
||||
// request some form of retpolines.
|
||||
if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline)) {
|
||||
if (Args.hasFlag(options::OPT_mretpoline, options::OPT_mno_retpoline,
|
||||
false)) {
|
||||
Features.push_back("+retpoline-indirect-calls");
|
||||
Features.push_back("+retpoline-indirect-branches");
|
||||
}
|
||||
} else if (Args.hasFlag(options::OPT_mretpoline_external_thunk,
|
||||
options::OPT_mno_retpoline_external_thunk, false)) {
|
||||
// FIXME: Add a warning about failing to specify `-mretpoline` and
|
||||
// eventually switch to an error here.
|
||||
Features.push_back("+retpoline-indirect-calls");
|
||||
Features.push_back("+retpoline-indirect-branches");
|
||||
}
|
||||
|
||||
// Now add any that the user explicitly requested on the command line,
|
||||
// which may override the defaults.
|
||||
handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
|
||||
|
|
|
@ -132,8 +132,8 @@
|
|||
|
||||
// RUN: %clang -target i386-linux-gnu -mretpoline %s -### -o %t.o 2>&1 | FileCheck -check-prefix=RETPOLINE %s
|
||||
// RUN: %clang -target i386-linux-gnu -mno-retpoline %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-RETPOLINE %s
|
||||
// RETPOLINE: "-target-feature" "+retpoline"
|
||||
// NO-RETPOLINE: "-target-feature" "-retpoline"
|
||||
// RETPOLINE: "-target-feature" "+retpoline-indirect-calls" "-target-feature" "+retpoline-indirect-branches"
|
||||
// NO-RETPOLINE-NOT: retpoline
|
||||
|
||||
// RUN: %clang -target i386-linux-gnu -mretpoline -mretpoline-external-thunk %s -### -o %t.o 2>&1 | FileCheck -check-prefix=RETPOLINE-EXTERNAL-THUNK %s
|
||||
// RUN: %clang -target i386-linux-gnu -mretpoline -mno-retpoline-external-thunk %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-RETPOLINE-EXTERNAL-THUNK %s
|
||||
|
|
|
@ -362,17 +362,30 @@ def FeaturePrefer256Bit
|
|||
: SubtargetFeature<"prefer-256-bit", "Prefer256Bit", "true",
|
||||
"Prefer 256-bit AVX instructions">;
|
||||
|
||||
// Enable mitigation of some aspects of speculative execution related
|
||||
// vulnerabilities by removing speculatable indirect branches. This disables
|
||||
// jump-table formation, rewrites explicit `indirectbr` instructions into
|
||||
// `switch` instructions, and uses a special construct called a "retpoline" to
|
||||
// prevent speculation of the remaining indirect branches (indirect calls and
|
||||
// tail calls).
|
||||
// Lower indirect calls using a special construct called a `retpoline` to
|
||||
// mitigate potential Spectre v2 attacks against them.
|
||||
def FeatureRetpolineIndirectCalls
|
||||
: SubtargetFeature<
|
||||
"retpoline-indirect-calls", "UseRetpolineIndirectCalls", "true",
|
||||
"Remove speculation of indirect calls from the generated code.">;
|
||||
|
||||
// Lower indirect branches and switches either using conditional branch trees
|
||||
// or using a special construct called a `retpoline` to mitigate potential
|
||||
// Spectre v2 attacks against them.
|
||||
def FeatureRetpolineIndirectBranches
|
||||
: SubtargetFeature<
|
||||
"retpoline-indirect-branches", "UseRetpolineIndirectBranches", "true",
|
||||
"Remove speculation of indirect branches from the generated code.">;
|
||||
|
||||
// Deprecated umbrella feature for enabling both `retpoline-indirect-calls` and
|
||||
// `retpoline-indirect-branches` above.
|
||||
def FeatureRetpoline
|
||||
: SubtargetFeature<"retpoline", "UseRetpoline", "true",
|
||||
: SubtargetFeature<"retpoline", "DeprecatedUseRetpoline", "true",
|
||||
"Remove speculation of indirect branches from the "
|
||||
"generated code, either by avoiding them entirely or "
|
||||
"lowering them with a speculation blocking construct.">;
|
||||
"lowering them with a speculation blocking construct.",
|
||||
[FeatureRetpolineIndirectCalls,
|
||||
FeatureRetpolineIndirectBranches]>;
|
||||
|
||||
// Rely on external thunks for the emitted retpoline calls. This allows users
|
||||
// to provide their own custom thunk definitions in highly specialized
|
||||
|
@ -380,8 +393,10 @@ def FeatureRetpoline
|
|||
def FeatureRetpolineExternalThunk
|
||||
: SubtargetFeature<
|
||||
"retpoline-external-thunk", "UseRetpolineExternalThunk", "true",
|
||||
"Enable retpoline, but with an externally provided thunk.",
|
||||
[FeatureRetpoline]>;
|
||||
"When lowering an indirect call or branch using a `retpoline`, rely "
|
||||
"on the specified user provided thunk rather than emitting one "
|
||||
"ourselves. Only has effect when combined with some other retpoline "
|
||||
"feature.", [FeatureRetpolineIndirectCalls]>;
|
||||
|
||||
// Direct Move instructions.
|
||||
def FeatureMOVDIRI : SubtargetFeature<"movdiri", "HasMOVDIRI", "true",
|
||||
|
|
|
@ -3222,8 +3222,8 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
|||
(CalledFn && CalledFn->hasFnAttribute("no_caller_saved_registers")))
|
||||
return false;
|
||||
|
||||
// Functions using retpoline should use SDISel for calls.
|
||||
if (Subtarget->useRetpoline())
|
||||
// Functions using retpoline for indirect calls need to use SDISel.
|
||||
if (Subtarget->useRetpolineIndirectCalls())
|
||||
return false;
|
||||
|
||||
// Handle only C, fastcc, and webkit_js calling conventions for now.
|
||||
|
|
|
@ -765,7 +765,7 @@ void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
|
|||
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
|
||||
|
||||
// FIXME: Add retpoline support and remove this.
|
||||
if (Is64Bit && IsLargeCodeModel && STI.useRetpoline())
|
||||
if (Is64Bit && IsLargeCodeModel && STI.useRetpolineIndirectCalls())
|
||||
report_fatal_error("Emitting stack probe calls on 64-bit with the large "
|
||||
"code model and retpoline not yet implemented.");
|
||||
|
||||
|
@ -2437,7 +2437,7 @@ void X86FrameLowering::adjustForSegmentedStacks(
|
|||
// is laid out within 2^31 bytes of each function body, but this seems
|
||||
// to be sufficient for JIT.
|
||||
// FIXME: Add retpoline support and remove the error here..
|
||||
if (STI.useRetpoline())
|
||||
if (STI.useRetpolineIndirectCalls())
|
||||
report_fatal_error("Emitting morestack calls on 64-bit with the large "
|
||||
"code model and retpoline not yet implemented.");
|
||||
BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
|
||||
|
|
|
@ -725,7 +725,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
|
|||
if (OptLevel != CodeGenOpt::None &&
|
||||
// Only do this when the target can fold the load into the call or
|
||||
// jmp.
|
||||
!Subtarget->useRetpoline() &&
|
||||
!Subtarget->useRetpolineIndirectCalls() &&
|
||||
((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
|
||||
(N->getOpcode() == X86ISD::TC_RETURN &&
|
||||
(Subtarget->is64Bit() ||
|
||||
|
|
|
@ -26649,7 +26649,7 @@ bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
|
|||
|
||||
bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
|
||||
// If the subtarget is using retpolines, we need to not generate jump tables.
|
||||
if (Subtarget.useRetpoline())
|
||||
if (Subtarget.useRetpolineIndirectBranches())
|
||||
return false;
|
||||
|
||||
// Otherwise, fallback on the generic logic.
|
||||
|
|
|
@ -1095,14 +1095,14 @@ def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
|
|||
|
||||
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
|
||||
(TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
|
||||
Requires<[Not64BitMode, NotUseRetpoline]>;
|
||||
Requires<[Not64BitMode, NotUseRetpolineIndirectCalls]>;
|
||||
|
||||
// FIXME: This is disabled for 32-bit PIC mode because the global base
|
||||
// register which is part of the address mode may be assigned a
|
||||
// callee-saved register.
|
||||
def : Pat<(X86tcret (load addr:$dst), imm:$off),
|
||||
(TCRETURNmi addr:$dst, imm:$off)>,
|
||||
Requires<[Not64BitMode, IsNotPIC, NotUseRetpoline]>;
|
||||
Requires<[Not64BitMode, IsNotPIC, NotUseRetpolineIndirectCalls]>;
|
||||
|
||||
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
|
||||
(TCRETURNdi tglobaladdr:$dst, imm:$off)>,
|
||||
|
@ -1114,21 +1114,21 @@ def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
|
|||
|
||||
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
|
||||
(TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
|
||||
Requires<[In64BitMode, NotUseRetpoline]>;
|
||||
Requires<[In64BitMode, NotUseRetpolineIndirectCalls]>;
|
||||
|
||||
// Don't fold loads into X86tcret requiring more than 6 regs.
|
||||
// There wouldn't be enough scratch registers for base+index.
|
||||
def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
|
||||
(TCRETURNmi64 addr:$dst, imm:$off)>,
|
||||
Requires<[In64BitMode, NotUseRetpoline]>;
|
||||
Requires<[In64BitMode, NotUseRetpolineIndirectCalls]>;
|
||||
|
||||
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
|
||||
(RETPOLINE_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>,
|
||||
Requires<[In64BitMode, UseRetpoline]>;
|
||||
Requires<[In64BitMode, UseRetpolineIndirectCalls]>;
|
||||
|
||||
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
|
||||
(RETPOLINE_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>,
|
||||
Requires<[Not64BitMode, UseRetpoline]>;
|
||||
Requires<[Not64BitMode, UseRetpolineIndirectCalls]>;
|
||||
|
||||
def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
|
||||
(TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
|
||||
|
|
|
@ -222,11 +222,13 @@ let isCall = 1 in
|
|||
Sched<[WriteJumpLd]>;
|
||||
def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst),
|
||||
"call{l}\t{*}$dst", [(X86call GR32:$dst)]>, OpSize32,
|
||||
Requires<[Not64BitMode,NotUseRetpoline]>, Sched<[WriteJump]>;
|
||||
Requires<[Not64BitMode,NotUseRetpolineIndirectCalls]>,
|
||||
Sched<[WriteJump]>;
|
||||
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst),
|
||||
"call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
|
||||
OpSize32,
|
||||
Requires<[Not64BitMode,FavorMemIndirectCall,NotUseRetpoline]>,
|
||||
Requires<[Not64BitMode,FavorMemIndirectCall,
|
||||
NotUseRetpolineIndirectCalls]>,
|
||||
Sched<[WriteJumpLd]>;
|
||||
|
||||
// Non-tracking calls for IBT, use with caution.
|
||||
|
@ -320,11 +322,11 @@ let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in {
|
|||
Requires<[In64BitMode]>;
|
||||
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
|
||||
"call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
|
||||
Requires<[In64BitMode,NotUseRetpoline]>;
|
||||
Requires<[In64BitMode,NotUseRetpolineIndirectCalls]>;
|
||||
def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst),
|
||||
"call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
|
||||
Requires<[In64BitMode,FavorMemIndirectCall,
|
||||
NotUseRetpoline]>;
|
||||
NotUseRetpolineIndirectCalls]>;
|
||||
|
||||
// Non-tracking calls for IBT, use with caution.
|
||||
let isCodeGenOnly = 1 in {
|
||||
|
@ -379,11 +381,11 @@ let isPseudo = 1, isCall = 1, isCodeGenOnly = 1,
|
|||
SchedRW = [WriteJump] in {
|
||||
def RETPOLINE_CALL32 :
|
||||
PseudoI<(outs), (ins GR32:$dst), [(X86call GR32:$dst)]>,
|
||||
Requires<[Not64BitMode,UseRetpoline]>;
|
||||
Requires<[Not64BitMode,UseRetpolineIndirectCalls]>;
|
||||
|
||||
def RETPOLINE_CALL64 :
|
||||
PseudoI<(outs), (ins GR64:$dst), [(X86call GR64:$dst)]>,
|
||||
Requires<[In64BitMode,UseRetpoline]>;
|
||||
Requires<[In64BitMode,UseRetpolineIndirectCalls]>;
|
||||
|
||||
// Retpoline variant of indirect tail calls.
|
||||
let isTerminator = 1, isReturn = 1, isBarrier = 1 in {
|
||||
|
|
|
@ -955,8 +955,8 @@ def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
|
|||
def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
|
||||
def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
|
||||
def HasMFence : Predicate<"Subtarget->hasMFence()">;
|
||||
def UseRetpoline : Predicate<"Subtarget->useRetpoline()">;
|
||||
def NotUseRetpoline : Predicate<"!Subtarget->useRetpoline()">;
|
||||
def UseRetpolineIndirectCalls : Predicate<"Subtarget->useRetpolineIndirectCalls()">;
|
||||
def NotUseRetpolineIndirectCalls : Predicate<"!Subtarget->useRetpolineIndirectCalls()">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// X86 Instruction Format Definitions.
|
||||
|
|
|
@ -898,7 +898,7 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
|
|||
break;
|
||||
case MachineOperand::MO_Register:
|
||||
// FIXME: Add retpoline support and remove this.
|
||||
if (Subtarget->useRetpoline())
|
||||
if (Subtarget->useRetpolineIndirectCalls())
|
||||
report_fatal_error("Lowering register statepoints with retpoline not "
|
||||
"yet implemented.");
|
||||
CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
|
||||
|
@ -1055,7 +1055,7 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
|
|||
EmitAndCountInstruction(
|
||||
MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp));
|
||||
// FIXME: Add retpoline support and remove this.
|
||||
if (Subtarget->useRetpoline())
|
||||
if (Subtarget->useRetpolineIndirectCalls())
|
||||
report_fatal_error(
|
||||
"Lowering patchpoint with retpoline not yet implemented.");
|
||||
EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
|
||||
|
|
|
@ -115,7 +115,9 @@ bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
|
|||
// FIXME: It's a little silly to look at every function just to enumerate
|
||||
// the subtargets, but eventually we'll want to look at them for indirect
|
||||
// calls, so maybe this is OK.
|
||||
if (!STI->useRetpoline() || STI->useRetpolineExternalThunk())
|
||||
if ((!STI->useRetpolineIndirectCalls() &&
|
||||
!STI->useRetpolineIndirectBranches()) ||
|
||||
STI->useRetpolineExternalThunk())
|
||||
return false;
|
||||
|
||||
// Otherwise, we need to insert the thunk.
|
||||
|
|
|
@ -387,7 +387,15 @@ protected:
|
|||
|
||||
/// Use a retpoline thunk rather than indirect calls to block speculative
|
||||
/// execution.
|
||||
bool UseRetpoline = false;
|
||||
bool UseRetpolineIndirectCalls = false;
|
||||
|
||||
/// Use a retpoline thunk or remove any indirect branch to block speculative
|
||||
/// execution.
|
||||
bool UseRetpolineIndirectBranches = false;
|
||||
|
||||
/// Deprecated flag, query `UseRetpolineIndirectCalls` and
|
||||
/// `UseRetpolineIndirectBranches` instead.
|
||||
bool DeprecatedUseRetpoline = false;
|
||||
|
||||
/// When using a retpoline thunk, call an externally provided thunk rather
|
||||
/// than emitting one inside the compiler.
|
||||
|
@ -649,7 +657,10 @@ public:
|
|||
bool hasPCONFIG() const { return HasPCONFIG; }
|
||||
bool hasSGX() const { return HasSGX; }
|
||||
bool hasINVPCID() const { return HasINVPCID; }
|
||||
bool useRetpoline() const { return UseRetpoline; }
|
||||
bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
|
||||
bool useRetpolineIndirectBranches() const {
|
||||
return UseRetpolineIndirectBranches;
|
||||
}
|
||||
bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
|
||||
|
||||
unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
|
||||
|
@ -804,7 +815,9 @@ public:
|
|||
|
||||
/// If we are using retpolines, we need to expand indirectbr to avoid it
|
||||
/// lowering to an actual indirect jump.
|
||||
bool enableIndirectBrExpand() const override { return useRetpoline(); }
|
||||
bool enableIndirectBrExpand() const override {
|
||||
return useRetpolineIndirectBranches();
|
||||
}
|
||||
|
||||
/// Enable the MachineScheduler pass for all X86 subtargets.
|
||||
bool enableMachineScheduler() const override { return true; }
|
||||
|
|
|
@ -163,4 +163,4 @@ define void @direct_tail() #0 {
|
|||
; X86FAST-NOT: __{{.*}}_retpoline_{{.*}}:
|
||||
|
||||
|
||||
attributes #0 = { "target-features"="+retpoline-external-thunk" }
|
||||
attributes #0 = { "target-features"="+retpoline-indirect-calls,+retpoline-external-thunk" }
|
||||
|
|
|
@ -38,5 +38,5 @@ entry:
|
|||
; CHECK: popl %edi
|
||||
; CHECK: retl
|
||||
|
||||
attributes #0 = { "target-features"="+retpoline" }
|
||||
attributes #1 = { "target-features"="+retpoline-external-thunk" }
|
||||
attributes #0 = { "target-features"="+retpoline-indirect-calls" }
|
||||
attributes #1 = { "target-features"="+retpoline-indirect-calls,+retpoline-external-thunk" }
|
||||
|
|
|
@ -156,7 +156,7 @@ define void @direct_tail() #0 {
|
|||
; X86FAST: jmp direct_callee # TAILCALL
|
||||
|
||||
|
||||
declare void @nonlazybind_callee() #1
|
||||
declare void @nonlazybind_callee() #2
|
||||
|
||||
define void @nonlazybind_caller() #0 {
|
||||
call void @nonlazybind_callee()
|
||||
|
@ -183,6 +183,153 @@ define void @nonlazybind_caller() #0 {
|
|||
; X86FAST: jmp nonlazybind_callee@PLT # TAILCALL
|
||||
|
||||
|
||||
; Check that a switch gets lowered using a jump table when retpolines are only
|
||||
; enabled for calls.
|
||||
define void @switch_jumptable(i32* %ptr, i64* %sink) #0 {
|
||||
; X64-LABEL: switch_jumptable:
|
||||
; X64: jmpq *
|
||||
; X86-LABEL: switch_jumptable:
|
||||
; X86: jmpl *
|
||||
entry:
|
||||
br label %header
|
||||
|
||||
header:
|
||||
%i = load volatile i32, i32* %ptr
|
||||
switch i32 %i, label %bb0 [
|
||||
i32 1, label %bb1
|
||||
i32 2, label %bb2
|
||||
i32 3, label %bb3
|
||||
i32 4, label %bb4
|
||||
i32 5, label %bb5
|
||||
i32 6, label %bb6
|
||||
i32 7, label %bb7
|
||||
i32 8, label %bb8
|
||||
i32 9, label %bb9
|
||||
]
|
||||
|
||||
bb0:
|
||||
store volatile i64 0, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb1:
|
||||
store volatile i64 1, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb2:
|
||||
store volatile i64 2, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb3:
|
||||
store volatile i64 3, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb4:
|
||||
store volatile i64 4, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb5:
|
||||
store volatile i64 5, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb6:
|
||||
store volatile i64 6, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb7:
|
||||
store volatile i64 7, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb8:
|
||||
store volatile i64 8, i64* %sink
|
||||
br label %header
|
||||
|
||||
bb9:
|
||||
store volatile i64 9, i64* %sink
|
||||
br label %header
|
||||
}
|
||||
|
||||
|
||||
@indirectbr_preserved.targets = constant [10 x i8*] [i8* blockaddress(@indirectbr_preserved, %bb0),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb1),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb2),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb3),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb4),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb5),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb6),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb7),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb8),
|
||||
i8* blockaddress(@indirectbr_preserved, %bb9)]
|
||||
|
||||
; Check that we preserve indirectbr when only calls are retpolined.
|
||||
define void @indirectbr_preserved(i64* readonly %p, i64* %sink) #0 {
|
||||
; X64-LABEL: indirectbr_preserved:
|
||||
; X64: jmpq *
|
||||
; X86-LABEL: indirectbr_preserved:
|
||||
; X86: jmpl *
|
||||
entry:
|
||||
%i0 = load i64, i64* %p
|
||||
%target.i0 = getelementptr [10 x i8*], [10 x i8*]* @indirectbr_preserved.targets, i64 0, i64 %i0
|
||||
%target0 = load i8*, i8** %target.i0
|
||||
indirectbr i8* %target0, [label %bb1, label %bb3]
|
||||
|
||||
bb0:
|
||||
store volatile i64 0, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb1:
|
||||
store volatile i64 1, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb2:
|
||||
store volatile i64 2, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb3:
|
||||
store volatile i64 3, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb4:
|
||||
store volatile i64 4, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb5:
|
||||
store volatile i64 5, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb6:
|
||||
store volatile i64 6, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb7:
|
||||
store volatile i64 7, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb8:
|
||||
store volatile i64 8, i64* %sink
|
||||
br label %latch
|
||||
|
||||
bb9:
|
||||
store volatile i64 9, i64* %sink
|
||||
br label %latch
|
||||
|
||||
latch:
|
||||
%i.next = load i64, i64* %p
|
||||
%target.i.next = getelementptr [10 x i8*], [10 x i8*]* @indirectbr_preserved.targets, i64 0, i64 %i.next
|
||||
%target.next = load i8*, i8** %target.i.next
|
||||
; Potentially hit a full 10 successors here so that even if we rewrite as
|
||||
; a switch it will try to be lowered with a jump table.
|
||||
indirectbr i8* %target.next, [label %bb0,
|
||||
label %bb1,
|
||||
label %bb2,
|
||||
label %bb3,
|
||||
label %bb4,
|
||||
label %bb5,
|
||||
label %bb6,
|
||||
label %bb7,
|
||||
label %bb8,
|
||||
label %bb9]
|
||||
}
|
||||
|
||||
@indirectbr_rewrite.targets = constant [10 x i8*] [i8* blockaddress(@indirectbr_rewrite, %bb0),
|
||||
i8* blockaddress(@indirectbr_rewrite, %bb1),
|
||||
i8* blockaddress(@indirectbr_rewrite, %bb2),
|
||||
|
@ -194,10 +341,10 @@ define void @nonlazybind_caller() #0 {
|
|||
i8* blockaddress(@indirectbr_rewrite, %bb8),
|
||||
i8* blockaddress(@indirectbr_rewrite, %bb9)]
|
||||
|
||||
; Check that when retpolines are enabled a function with indirectbr gets
|
||||
; rewritten to use switch, and that in turn doesn't get lowered as a jump
|
||||
; table.
|
||||
define void @indirectbr_rewrite(i64* readonly %p, i64* %sink) #0 {
|
||||
; Check that when retpolines are enabled for indirect branches the indirectbr
|
||||
; instruction gets rewritten to use switch, and that in turn doesn't get lowered
|
||||
; as a jump table.
|
||||
define void @indirectbr_rewrite(i64* readonly %p, i64* %sink) #1 {
|
||||
; X64-LABEL: indirectbr_rewrite:
|
||||
; X64-NOT: jmpq
|
||||
; X86-LABEL: indirectbr_rewrite:
|
||||
|
@ -359,5 +506,6 @@ latch:
|
|||
; X86-NEXT: retl
|
||||
|
||||
|
||||
attributes #0 = { "target-features"="+retpoline" }
|
||||
attributes #1 = { nonlazybind }
|
||||
attributes #0 = { "target-features"="+retpoline-indirect-calls" }
|
||||
attributes #1 = { "target-features"="+retpoline-indirect-calls,+retpoline-indirect-branches" }
|
||||
attributes #2 = { nonlazybind }
|
||||
|
|
Loading…
Reference in New Issue