[RISCV] Add target specific loop unrolling and peeling preferences

Both these preference helper functions have initial support with
this change. The loop unrolling preferences are set with initial
settings to control thresholds, size and attributes of loops to
unroll with some tuning done.  The peeling preferences may need
some tuning as well as the initial support looks much like what
other architectures utilize.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D113798
This commit is contained in:
Michael Berg 2021-12-17 18:31:29 -08:00
parent cc4781464f
commit f95ee6074a
4 changed files with 265 additions and 0 deletions

View File

@ -162,3 +162,94 @@ InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
getMemoryOpCost(Opcode, VTy->getElementType(), Alignment, 0, CostKind, I);
return NumLoads * MemOpCost;
}
void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP,
OptimizationRemarkEmitter *ORE) {
// TODO: More tuning on benchmarks and metrics with changes as needed
// would apply to all settings below to enable performance.
// Support explicit targets enabled for SiFive with the unrolling preferences
// below
bool UseDefaultPreferences = true;
if (ST->getTuneCPU().contains("sifive-e76") ||
ST->getTuneCPU().contains("sifive-s76") ||
ST->getTuneCPU().contains("sifive-u74") ||
ST->getTuneCPU().contains("sifive-7"))
UseDefaultPreferences = false;
if (UseDefaultPreferences)
return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
// Enable Upper bound unrolling universally, not dependant upon the conditions
// below.
UP.UpperBound = true;
// Disable loop unrolling for Oz and Os.
UP.OptSizeThreshold = 0;
UP.PartialOptSizeThreshold = 0;
if (L->getHeader()->getParent()->hasOptSize())
return;
SmallVector<BasicBlock *, 4> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
LLVM_DEBUG(dbgs() << "Loop has:\n"
<< "Blocks: " << L->getNumBlocks() << "\n"
<< "Exit blocks: " << ExitingBlocks.size() << "\n");
// Only allow another exit other than the latch. This acts as an early exit
// as it mirrors the profitability calculation of the runtime unroller.
if (ExitingBlocks.size() > 2)
return;
// Limit the CFG of the loop body for targets with a branch predictor.
// Allowing 4 blocks permits if-then-else diamonds in the body.
if (L->getNumBlocks() > 4)
return;
// Don't unroll vectorized loops, including the remainder loop
if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
return;
// Scan the loop: don't unroll loops with calls as this could prevent
// inlining.
InstructionCost Cost = 0;
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
// Initial setting - Don't unroll loops containing vectorized
// instructions.
if (I.getType()->isVectorTy())
return;
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
if (!isLoweredToCall(F))
continue;
}
return;
}
SmallVector<const Value *> Operands(I.operand_values());
Cost +=
getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
}
}
LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
UP.Partial = true;
UP.Runtime = true;
UP.UnrollRemainder = true;
UP.UnrollAndJam = true;
UP.UnrollAndJamInnerLoopThreshold = 60;
// Force unrolling small loops can be very useful because of the branch
// taken cost of the backedge.
if (Cost < 12)
UP.Force = true;
}
void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) {
BaseT::getPeelingPreferences(L, SE, PP);
}

View File

@ -73,6 +73,13 @@ public:
llvm_unreachable("Unsupported register kind");
}
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP,
OptimizationRemarkEmitter *ORE);
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP);
unsigned getMinVectorRegisterBitWidth() const {
return ST->hasVInstructions() ? ST->getMinRVVVectorSizeInBits() : 0;
}

View File

@ -0,0 +1,5 @@
config.suffixes = ['.ll']
targets = set(config.root.targets_to_build.split())
if not 'RISCV' in targets:
config.unsupported = True

View File

@ -0,0 +1,162 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -S -mtriple=riscv64 -loop-unroll -mcpu=sifive-7-rv64 | FileCheck %s
define dso_local void @saxpy(float %a, float* %x, float* %y) {
; CHECK-LABEL: @saxpy(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_15:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP0]], [[A:%.*]]
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[Y:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL]], [[TMP1]]
; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP2]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL_1]], [[TMP3]]
; CHECK-NEXT: store float [[ADD_1]], float* [[ARRAYIDX2_1]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT]], 1
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP4]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_1]]
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
; CHECK-NEXT: store float [[ADD_2]], float* [[ARRAYIDX2_2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_1]], 1
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP6]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_2]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL_3]], [[TMP7]]
; CHECK-NEXT: store float [[ADD_3]], float* [[ARRAYIDX2_3]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_2]], 1
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_3]]
; CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX_4]], align 4
; CHECK-NEXT: [[MUL_4:%.*]] = fmul fast float [[TMP8]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_3]]
; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX2_4]], align 4
; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float [[MUL_4]], [[TMP9]]
; CHECK-NEXT: store float [[ADD_4]], float* [[ARRAYIDX2_4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_3]], 1
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_4]]
; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX_5]], align 4
; CHECK-NEXT: [[MUL_5:%.*]] = fmul fast float [[TMP10]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_4]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2_5]], align 4
; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float [[MUL_5]], [[TMP11]]
; CHECK-NEXT: store float [[ADD_5]], float* [[ARRAYIDX2_5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_4]], 1
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_5]]
; CHECK-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX_6]], align 4
; CHECK-NEXT: [[MUL_6:%.*]] = fmul fast float [[TMP12]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_5]]
; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX2_6]], align 4
; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float [[MUL_6]], [[TMP13]]
; CHECK-NEXT: store float [[ADD_6]], float* [[ARRAYIDX2_6]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_5]], 1
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_6]]
; CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX_7]], align 4
; CHECK-NEXT: [[MUL_7:%.*]] = fmul fast float [[TMP14]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_6]]
; CHECK-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX2_7]], align 4
; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float [[MUL_7]], [[TMP15]]
; CHECK-NEXT: store float [[ADD_7]], float* [[ARRAYIDX2_7]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_7:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_6]], 1
; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_7]]
; CHECK-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX_8]], align 4
; CHECK-NEXT: [[MUL_8:%.*]] = fmul fast float [[TMP16]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_8:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_7]]
; CHECK-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX2_8]], align 4
; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float [[MUL_8]], [[TMP17]]
; CHECK-NEXT: store float [[ADD_8]], float* [[ARRAYIDX2_8]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_8:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_7]], 1
; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_8]]
; CHECK-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX_9]], align 4
; CHECK-NEXT: [[MUL_9:%.*]] = fmul fast float [[TMP18]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_9:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_8]]
; CHECK-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX2_9]], align 4
; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float [[MUL_9]], [[TMP19]]
; CHECK-NEXT: store float [[ADD_9]], float* [[ARRAYIDX2_9]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_9:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_8]], 1
; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_9]]
; CHECK-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX_10]], align 4
; CHECK-NEXT: [[MUL_10:%.*]] = fmul fast float [[TMP20]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_10:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_9]]
; CHECK-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX2_10]], align 4
; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float [[MUL_10]], [[TMP21]]
; CHECK-NEXT: store float [[ADD_10]], float* [[ARRAYIDX2_10]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_10:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_9]], 1
; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_10]]
; CHECK-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX_11]], align 4
; CHECK-NEXT: [[MUL_11:%.*]] = fmul fast float [[TMP22]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_11:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_10]]
; CHECK-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX2_11]], align 4
; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float [[MUL_11]], [[TMP23]]
; CHECK-NEXT: store float [[ADD_11]], float* [[ARRAYIDX2_11]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_11:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_10]], 1
; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_11]]
; CHECK-NEXT: [[TMP24:%.*]] = load float, float* [[ARRAYIDX_12]], align 4
; CHECK-NEXT: [[MUL_12:%.*]] = fmul fast float [[TMP24]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_12:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_11]]
; CHECK-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX2_12]], align 4
; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float [[MUL_12]], [[TMP25]]
; CHECK-NEXT: store float [[ADD_12]], float* [[ARRAYIDX2_12]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_12:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_11]], 1
; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_12]]
; CHECK-NEXT: [[TMP26:%.*]] = load float, float* [[ARRAYIDX_13]], align 4
; CHECK-NEXT: [[MUL_13:%.*]] = fmul fast float [[TMP26]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_13:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_12]]
; CHECK-NEXT: [[TMP27:%.*]] = load float, float* [[ARRAYIDX2_13]], align 4
; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float [[MUL_13]], [[TMP27]]
; CHECK-NEXT: store float [[ADD_13]], float* [[ARRAYIDX2_13]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_13:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_12]], 1
; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_13]]
; CHECK-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX_14]], align 4
; CHECK-NEXT: [[MUL_14:%.*]] = fmul fast float [[TMP28]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_14:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_13]]
; CHECK-NEXT: [[TMP29:%.*]] = load float, float* [[ARRAYIDX2_14]], align 4
; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float [[MUL_14]], [[TMP29]]
; CHECK-NEXT: store float [[ADD_14]], float* [[ARRAYIDX2_14]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_14:%.*]] = add nuw nsw i64 [[INDVARS_IV_NEXT_13]], 1
; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 [[INDVARS_IV_NEXT_14]]
; CHECK-NEXT: [[TMP30:%.*]] = load float, float* [[ARRAYIDX_15]], align 4
; CHECK-NEXT: [[MUL_15:%.*]] = fmul fast float [[TMP30]], [[A]]
; CHECK-NEXT: [[ARRAYIDX2_15:%.*]] = getelementptr inbounds float, float* [[Y]], i64 [[INDVARS_IV_NEXT_14]]
; CHECK-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX2_15]], align 4
; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float [[MUL_15]], [[TMP31]]
; CHECK-NEXT: store float [[ADD_15]], float* [[ARRAYIDX2_15]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_15]] = add nuw nsw i64 [[INDVARS_IV_NEXT_14]], 1
; CHECK-NEXT: [[EXITCOND_NOT_15:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_15]], 64
; CHECK-NEXT: br i1 [[EXITCOND_NOT_15]], label [[EXIT_LOOP:%.*]], label [[FOR_BODY]]
; CHECK: exit_loop:
; CHECK-NEXT: ret void
;
entry:
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
%0 = load float, float* %arrayidx, align 4
%mul = fmul fast float %0, %a
%arrayidx2 = getelementptr inbounds float, float* %y, i64 %indvars.iv
%1 = load float, float* %arrayidx2, align 4
%add = fadd fast float %mul, %1
store float %add, float* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond.not = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond.not, label %exit_loop, label %for.body
exit_loop:
ret void
}