diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2689212795ec..ebbffa1c82fe 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -71,14 +71,6 @@ using namespace llvm; STATISTIC(NumTailCalls, "Number of tail calls"); -static cl::opt ExperimentalPrefLoopAlignment( - "x86-experimental-pref-loop-alignment", cl::init(4), - cl::desc( - "Sets the preferable loop alignment for experiments (as log2 bytes)" - "(the last x86-experimental-pref-loop-alignment bits" - " of the loop header PC will be 0)."), - cl::Hidden); - static cl::opt ExperimentalPrefInnermostLoopAlignment( "x86-experimental-pref-innermost-loop-alignment", cl::init(4), cl::desc( @@ -2070,8 +2062,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, MaxLoadsPerMemcmp = 2; MaxLoadsPerMemcmpOptSize = 2; - // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). - setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment)); + // Default loop alignment, which can be overridden by -align-loops. + setPrefLoopAlignment(Align(16)); // An out-of-order CPU can speculatively execute past a predictable branch, // but a conditional move could be stalled by an expensive earlier operation.