Revert "[OpenMP] Codegen aggregate for outlined function captures"

This reverts commit 7539e9cf81.
This commit is contained in:
Dhruva Chakrabarti 2022-09-15 03:08:46 +00:00
parent f221720e82
commit 839ac62c50
222 changed files with 156115 additions and 180130 deletions

View File

@ -1263,7 +1263,7 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
HasCancel, OutlinedHelperName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
return CGF.GenerateOpenMPCapturedStmtFunctionAggregate(*CS, D.getBeginLoc());
return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
@ -2036,11 +2036,11 @@ void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
}
void CGOpenMPRuntime::emitParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes, const Expr *IfCond,
llvm::Value *NumThreads) {
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond,
llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
@ -12793,11 +12793,12 @@ llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
llvm_unreachable("Not supported in SIMD-only mode");
}
void CGOpenMPSIMDRuntime::emitParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes, const Expr *IfCond,
llvm::Value *NumThreads) {
void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond,
llvm::Value *NumThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}

View File

@ -1018,7 +1018,6 @@ public:
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes,
const Expr *IfCond, llvm::Value *NumThreads);
/// Emits a critical region.
@ -1997,7 +1996,6 @@ public:
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes,
const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emits a critical region.

View File

@ -1236,17 +1236,18 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
}
void CGOpenMPRuntimeGPU::emitParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes, const Expr *IfCond,
llvm::Value *NumThreads) {
void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond,
llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
CapturedVarsElemTypes, IfCond, NumThreads](
CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
NumThreads](CodeGenFunction &CGF,
PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *NumThreadsVal = NumThreads;
llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
@ -1260,49 +1261,25 @@ void CGOpenMPRuntimeGPU::emitParallelCall(
// TODO: Is that needed?
CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
assert(CapturedVars.size() == 1 &&
"Expected single aggregate argument to outlined function");
// Globalize the single aggregate argument, if needed, or use a local
// alloca, or emit null when there are no arguments.
llvm::Value *AggregateV = CapturedVars[0];
assert(AggregateV->getType()->isPointerTy() &&
"Expected pointer type for aggregate argument.");
assert(CapturedVarsElemTypes.size() == 1 &&
"Expected single element in array of types.");
llvm::Type *PtrElemTy = CapturedVarsElemTypes[0];
auto &DL = CGM.getDataLayout();
unsigned AllocSize = DL.getTypeAllocSize(PtrElemTy);
llvm::CallBase *GlobalPtr = nullptr;
llvm::Value *AggregatePtr = nullptr;
if (AllocSize) {
llvm::AllocaInst *LocalAlloc =
CGF.CreateTempAlloca(PtrElemTy, ".tmp.outlined.agg.arg");
llvm::Value *LocalPtr = Bld.CreatePointerCast(LocalAlloc, CGF.VoidPtrTy);
GlobalPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
{llvm::ConstantInt::get(CGM.SizeTy, AllocSize)});
GlobalPtr->addRetAttr(llvm::Attribute::get(
CGM.getLLVMContext(), llvm::Attribute::Alignment,
CGM.getContext().getTargetInfo().getNewAlign() / 8));
llvm::Value *AllocArgs[] = {LocalPtr, GlobalPtr};
AggregatePtr = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_aggregate_arg),
AllocArgs);
llvm::Value *CapturedVarVal = Bld.CreateAlignedLoad(
PtrElemTy, AggregateV, DL.getABITypeAlign(PtrElemTy));
llvm::Value *AggregatePtrCast = Bld.CreatePointerBitCastOrAddrSpaceCast(
AggregatePtr, PtrElemTy->getPointerTo());
Bld.CreateDefaultAlignedStore(CapturedVarVal, AggregatePtrCast);
} else {
AggregatePtr = llvm::Constant::getNullValue(OMPBuilder.VoidPtr);
Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
"captured_vars_addrs");
// There's something to share.
if (!CapturedVars.empty()) {
// Prepare for parallel region. Indicate the outlined function.
ASTContext &Ctx = CGF.getContext();
unsigned Idx = 0;
for (llvm::Value *V : CapturedVars) {
Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
llvm::Value *PtrV;
if (V->getType()->isIntegerTy())
PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
else
PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
Ctx.getPointerType(Ctx.VoidPtrTy));
++Idx;
}
}
llvm::Value *IfCondVal = nullptr;
@ -1318,29 +1295,21 @@ void CGOpenMPRuntimeGPU::emitParallelCall(
NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
assert(IfCondVal && "Expected a value");
assert(AggregatePtr && "Expected non-null aggregate pointer value");
// Create the parallel call.
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *Args[] = {RTLoc,
getThreadID(CGF, Loc),
IfCondVal,
NumThreadsVal,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
AggregatePtr};
llvm::Value *Args[] = {
RTLoc,
getThreadID(CGF, Loc),
IfCondVal,
NumThreadsVal,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
CGF.VoidPtrPtrTy),
llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_parallel_51),
Args);
if (AllocSize) {
assert(GlobalPtr && "Expected non-null global pointer value");
// Pop global memory used for argument allocation.
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_free_shared),
{GlobalPtr, llvm::ConstantInt::get(CGM.SizeTy, AllocSize)});
}
};
RegionCodeGenTy RCG(ParallelGen);
@ -3278,6 +3247,7 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
D.getBeginLoc(), D.getBeginLoc());
const auto *RD = CS.getCapturedRecordDecl();
auto CurField = RD->field_begin();
Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".zero.addr");
@ -3289,44 +3259,70 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
Args.emplace_back(ZeroAddr.getPointer());
CGBuilderTy &Bld = CGF.Builder;
auto CI = CS.capture_begin();
// Use global memory for data sharing.
// Handle passing of global args to workers.
Address GlobalArgs =
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrTy, "global_args");
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_get_shared_variables_aggregate),
DataSharingArgs);
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
DataSharingArgs);
// Retrieve the shared variables from the list of references returned
// by the runtime. Pass the variables to the outlined function.
Address SharedArgAggregateAddress = Address::invalid();
if (CS.capture_size() > 0) {
SharedArgAggregateAddress = CGF.EmitLoadOfPointer(
Address SharedArgListAddress = Address::invalid();
if (CS.capture_size() > 0 ||
isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
SharedArgListAddress = CGF.EmitLoadOfPointer(
GlobalArgs, CGF.getContext()
.getPointerType(CGF.getContext().VoidTy)
.getPointerType(CGF.getContext().VoidPtrTy)
.castAs<PointerType>());
// Load the outlined arg aggregate struct.
ASTContext &CGFContext = CGF.getContext();
QualType RecordPointerTy =
CGFContext.getPointerType(CGFContext.getRecordType(RD));
}
unsigned Idx = 0;
if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
SharedArgAggregateAddress,
CGF.ConvertTypeForMem(RecordPointerTy)->getPointerTo(),
CGF.ConvertTypeForMem(RecordPointerTy));
llvm::Value *Arg = TypedAddress.getPointer();
Args.emplace_back(Arg);
} else {
// If there are no captured arguments, use nullptr.
Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *LB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
Args.emplace_back(LB);
++Idx;
Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *UB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
Args.emplace_back(UB);
++Idx;
}
if (CS.capture_size() > 0) {
ASTContext &CGFContext = CGF.getContext();
QualType RecordPointerTy =
CGFContext.getPointerType(CGFContext.getRecordType(RD));
llvm::Value *Arg =
llvm::Constant::getNullValue(CGF.ConvertTypeForMem(RecordPointerTy));
Args.emplace_back(Arg);
for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
QualType ElemTy = CurField->getType();
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
CGF.ConvertTypeForMem(ElemTy));
llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
/*Volatile=*/false,
CGFContext.getPointerType(ElemTy),
CI->getLocation());
if (CI->capturesVariableByCopy() &&
!CI->getCapturedVar()->getType()->isAnyPointerType()) {
Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
CI->getLocation());
}
Args.emplace_back(Arg);
}
}
emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);

View File

@ -260,7 +260,6 @@ public:
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
ArrayRef<llvm::Type *> CapturedVarsElemTypes,
const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.

View File

@ -90,19 +90,6 @@ public:
auto *VD = C.getCapturedVar();
assert(VD == VD->getCanonicalDecl() &&
"Canonical decl must be captured.");
// Skip implicit captures for combined distribute loop bounds,
// those will be handled by later codegen.
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) {
const auto *LoopDirective = cast<OMPLoopDirective>(&S);
VarDecl *PrevLB = cast<VarDecl>(
cast<DeclRefExpr>(LoopDirective->getPrevLowerBoundVariable())
->getDecl());
VarDecl *PrevUB = cast<VarDecl>(
cast<DeclRefExpr>(LoopDirective->getPrevUpperBoundVariable())
->getDecl());
if (VD == PrevLB || VD == PrevUB)
continue;
}
DeclRefExpr DRE(
CGF.getContext(), const_cast<VarDecl *>(VD),
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
@ -278,19 +265,6 @@ public:
continue;
assert(VD == VD->getCanonicalDecl() &&
"Canonical decl must be captured.");
// Skip implicit captures for combined distribute loop bounds,
// those will be handled by later codegen.
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) {
const auto *LoopDirective = cast<OMPLoopDirective>(&S);
VarDecl *PrevLB = cast<VarDecl>(
cast<DeclRefExpr>(LoopDirective->getPrevLowerBoundVariable())
->getDecl());
VarDecl *PrevUB = cast<VarDecl>(
cast<DeclRefExpr>(LoopDirective->getPrevUpperBoundVariable())
->getDecl());
if (VD == PrevLB || VD == PrevUB)
continue;
}
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
isCapturedVar(CGF, VD) ||
(CGF.CapturedStmtInfo &&
@ -348,34 +322,6 @@ llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
return CGM.getSize(SizeInChars);
}
void CodeGenFunction::GenerateOpenMPCapturedVarsAggregate(
const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars,
SmallVectorImpl<llvm::Type *> &CapturedVarsElemTypes) {
const RecordDecl *RD = S.getCapturedRecordDecl();
QualType RecordTy = getContext().getRecordType(RD);
// Create the aggregate argument struct for the outlined function.
LValue AggLV = MakeAddrLValue(
CreateMemTemp(RecordTy, "omp.outlined.arg.agg."), RecordTy);
// Initialize the aggregate with captured values.
auto CurField = RD->field_begin();
for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
E = S.capture_init_end();
I != E; ++I, ++CurField) {
LValue LV = EmitLValueForFieldInitialization(AggLV, *CurField);
// Initialize for VLA.
if (CurField->hasCapturedVLAType()) {
EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
} else
// Initialize for capturesThis, capturesVariableByCopy,
// capturesVariable
EmitInitializerForField(*CurField, LV, *I);
}
CapturedVars.push_back(AggLV.getPointer(*this));
CapturedVarsElemTypes.push_back(ConvertTypeForMem(AggLV.getType()));
}
void CodeGenFunction::GenerateOpenMPCapturedVars(
const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
const RecordDecl *RD = S.getCapturedRecordDecl();
@ -475,103 +421,6 @@ struct FunctionOptions {
};
} // namespace
static llvm::Function *emitOutlinedFunctionPrologueAggregate(
CodeGenFunction &CGF, FunctionArgList &Args,
llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
&LocalAddrs,
llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
&VLASizes,
llvm::Value *&CXXThisValue, const CapturedStmt &CS, SourceLocation Loc,
StringRef FunctionName) {
const CapturedDecl *CD = CS.getCapturedDecl();
const RecordDecl *RD = CS.getCapturedRecordDecl();
assert(CD->hasBody() && "missing CapturedDecl body");
CXXThisValue = nullptr;
// Build the argument list.
CodeGenModule &CGM = CGF.CGM;
ASTContext &Ctx = CGM.getContext();
Args.append(CD->param_begin(), CD->param_end());
// Create the function declaration.
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
auto *F =
llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
FunctionName, &CGM.getModule());
CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
if (CD->isNothrow())
F->setDoesNotThrow();
F->setDoesNotRecurse();
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, Loc, Loc);
Address ContextAddr = CGF.GetAddrOfLocalVar(CD->getContextParam());
llvm::Value *ContextV = CGF.Builder.CreateLoad(ContextAddr);
LValue ContextLV = CGF.MakeNaturalAlignAddrLValue(
ContextV, CGM.getContext().getTagDeclType(RD));
const auto *I = CS.captures().begin();
for (const FieldDecl *FD : RD->fields()) {
LValue FieldLV = CGF.EmitLValueForFieldInitialization(ContextLV, FD);
// Do not map arguments if we emit function with non-original types.
Address LocalAddr = FieldLV.getAddress(CGF);
// If we are capturing a pointer by copy we don't need to do anything, just
// use the value that we get from the arguments.
if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
const VarDecl *CurVD = I->getCapturedVar();
LocalAddrs.insert({FD, {CurVD, LocalAddr}});
++I;
continue;
}
LValue ArgLVal =
CGF.MakeAddrLValue(LocalAddr, FD->getType(), AlignmentSource::Decl);
if (FD->hasCapturedVLAType()) {
llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
const VariableArrayType *VAT = FD->getCapturedVLAType();
VLASizes.try_emplace(FD, VAT->getSizeExpr(), ExprArg);
} else if (I->capturesVariable()) {
const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
Address ArgAddr = ArgLVal.getAddress(CGF);
if (ArgLVal.getType()->isLValueReferenceType()) {
ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
} else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
assert(ArgLVal.getType()->isPointerType());
ArgAddr = CGF.EmitLoadOfPointer(
ArgAddr, ArgLVal.getType()->castAs<PointerType>());
}
LocalAddrs.insert(
{FD,
{Var, Address(ArgAddr.getPointer(), ArgAddr.getElementType(),
Ctx.getDeclAlign(Var))}});
} else if (I->capturesVariableByCopy()) {
assert(!FD->getType()->isAnyPointerType() &&
"Not expecting a captured pointer.");
const VarDecl *Var = I->getCapturedVar();
Address CopyAddr = CGF.CreateMemTemp(FD->getType(), Ctx.getDeclAlign(FD),
Var->getName());
LValue CopyLVal =
CGF.MakeAddrLValue(CopyAddr, FD->getType(), AlignmentSource::Decl);
RValue ArgRVal = CGF.EmitLoadOfLValue(ArgLVal, I->getLocation());
CGF.EmitStoreThroughLValue(ArgRVal, CopyLVal);
LocalAddrs.insert({FD, {Var, CopyAddr}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
LocalAddrs.insert({FD, {nullptr, ArgLVal.getAddress(CGF)}});
}
++I;
}
return F;
}
static llvm::Function *emitOutlinedFunctionPrologue(
CodeGenFunction &CGF, FunctionArgList &Args,
llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
@ -756,38 +605,6 @@ static llvm::Function *emitOutlinedFunctionPrologue(
return F;
}
llvm::Function *CodeGenFunction::GenerateOpenMPCapturedStmtFunctionAggregate(
const CapturedStmt &S, SourceLocation Loc) {
assert(
CapturedStmtInfo &&
"CapturedStmtInfo should be set when generating the captured function");
const CapturedDecl *CD = S.getCapturedDecl();
// Build the argument list.
FunctionArgList Args;
llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
StringRef FunctionName = CapturedStmtInfo->getHelperName();
llvm::Function *F = emitOutlinedFunctionPrologueAggregate(
*this, Args, LocalAddrs, VLASizes, CXXThisValue, S, Loc, FunctionName);
CodeGenFunction::OMPPrivateScope LocalScope(*this);
for (const auto &LocalAddrPair : LocalAddrs) {
if (LocalAddrPair.second.first) {
auto retAddrPair = [&LocalAddrPair]() {
return LocalAddrPair.second.second;
};
LocalScope.addPrivate(LocalAddrPair.second.first, retAddrPair());
}
}
(void)LocalScope.Privatize();
for (const auto &VLASizePair : VLASizes)
VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
PGO.assignRegionCounters(GlobalDecl(CD), F);
CapturedStmtInfo->EmitBody(*this, CD->getBody());
(void)LocalScope.ForceCleanup();
FinishFunction(CD->getBodyRBrace());
return F;
}
llvm::Function *
CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
SourceLocation Loc) {
@ -1656,8 +1473,9 @@ namespace {
/// Codegen lambda for appending distribute lower and upper bounds to outlined
/// parallel function. This is necessary for combined constructs such as
/// 'distribute parallel for'
typedef llvm::function_ref<void(
CodeGenFunction &, const OMPExecutableDirective &, const CapturedStmt &)>
typedef llvm::function_ref<void(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &)>
CodeGenBoundParametersTy;
} // anonymous namespace
@ -1750,18 +1568,14 @@ static void emitCommonOMPParallelDirective(
OMPParallelScope Scope(CGF, S);
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
llvm::SmallVector<llvm::Type *, 16> CapturedVarsElemTypes;
// Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
// lower and upper bounds with the pragma 'for' chunking mechanism.
// The following lambda takes care of appending the lower and upper bound
// parameters when necessary
CodeGenBoundParameters(CGF, S, *CS);
CGF.GenerateOpenMPCapturedVarsAggregate(*CS, CapturedVars,
CapturedVarsElemTypes);
CGF.CGM.getOpenMPRuntime().emitParallelCall(
CGF, S.getBeginLoc(), OutlinedFn, CapturedVars, CapturedVarsElemTypes,
IfCond, NumThreads);
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
CapturedVars, IfCond, NumThreads);
}
static bool isAllocatableDecl(const VarDecl *VD) {
@ -1777,7 +1591,7 @@ static bool isAllocatableDecl(const VarDecl *VD) {
static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
const CapturedStmt &) {}
llvm::SmallVectorImpl<llvm::Value *> &) {}
static void emitOMPCopyinClause(CodeGenFunction &CGF,
const OMPExecutableDirective &S) {
@ -3292,30 +3106,21 @@ emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
static void emitDistributeParallelForDistributeInnerBoundParams(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
const CapturedStmt &CS) {
llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
const auto &Dir = cast<OMPLoopDirective>(S);
// The first captured variable of the captured statement corresponds
// to inner lower bound.
VarDecl *PrevLBCapDecl = CS.captures().begin()->getCapturedVar();
// The second captured variable corresponds to the inner upper bound.
VarDecl *PrevUBCapDecl = std::next(CS.captures().begin())->getCapturedVar();
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
llvm::Value *LBCast =
CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
CGF.SizeTy, /*isSigned=*/false);
CGF.EmitStoreOfScalar(LBCast, CGF.GetAddrOfLocalVar(PrevLBCapDecl),
/*Volatile=*/false, PrevLBCapDecl->getType());
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
llvm::Value *UBCast =
CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
CGF.SizeTy, /*isSigned=*/false);
CGF.EmitStoreOfScalar(UBCast, CGF.GetAddrOfLocalVar(PrevUBCapDecl),
/*Volatile=*/false, PrevUBCapDecl->getType());
CapturedVars.push_back(UBCast);
}
static void
@ -5763,14 +5568,6 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
LValue IL =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
// Emit previous upper, lower bound captured variables, if applicable.
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) {
EmitOMPHelperVar(*this,
cast<DeclRefExpr>(S.getPrevLowerBoundVariable()));
EmitOMPHelperVar(*this,
cast<DeclRefExpr>(S.getPrevUpperBoundVariable()));
}
OMPPrivateScope LoopScope(*this);
if (EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races
@ -6907,10 +6704,7 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
OMPTeamsScope Scope(CGF, S);
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
llvm::SmallVector<llvm::Type *, 16> CapturedVarsElemTypes;
CGF.GenerateOpenMPCapturedVarsAggregate(*CS, CapturedVars,
CapturedVarsElemTypes);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
CapturedVars);
}

View File

@ -3342,14 +3342,8 @@ public:
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
Address GenerateCapturedStmtArgument(const CapturedStmt &S);
llvm::Function *
GenerateOpenMPCapturedStmtFunctionAggregate(const CapturedStmt &S,
SourceLocation Loc);
llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
SourceLocation Loc);
void GenerateOpenMPCapturedVarsAggregate(
const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars,
SmallVectorImpl<llvm::Type *> &CapturedVarsElemTypes);
void GenerateOpenMPCapturedVars(const CapturedStmt &S,
SmallVectorImpl<llvm::Value *> &CapturedVars);
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,

View File

@ -4386,22 +4386,12 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
VarDecl *PrevLBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.lb");
VarDecl *PrevUBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.ub");
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
MarkVariableReferenced(SourceLocation(), PrevLBDecl);
MarkVariableReferenced(SourceLocation(), PrevUBDecl);
break;
}
case OMPD_target_teams_distribute_parallel_for:
@ -4452,24 +4442,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
VarDecl *PrevLBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.lb");
VarDecl *PrevUBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.ub");
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsParallel, /*OpenMPCaptureLevel=*/3);
MarkVariableReferenced(SourceLocation(), PrevLBDecl);
MarkVariableReferenced(SourceLocation(), PrevUBDecl);
break;
}
@ -4507,24 +4487,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
VarDecl *PrevLBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.lb");
VarDecl *PrevUBDecl =
buildVarDecl(*this, SourceLocation(), Context.getSizeType().withConst(),
".captured.omp.previous.ub");
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsParallel, /*OpenMPCaptureLevel=*/1);
MarkVariableReferenced(SourceLocation(), PrevLBDecl);
MarkVariableReferenced(SourceLocation(), PrevUBDecl);
break;
}
case OMPD_target_update:
@ -9775,23 +9745,23 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
CombEUB =
SemaRef.ActOnFinishFullExpr(CombEUB.get(), /*DiscardedValue*/ false);
const CapturedStmt *CS = cast<CapturedStmt>(AStmt);
const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
// We expect to have at least 2 more parameters than the 'parallel'
// directive does - the lower and upper bounds of the previous schedule.
assert(CD->getNumParams() >= 4 &&
"Unexpected number of parameters in loop combined directive");
// Refer to captured variables from the associated captured statement of
// the combined directive. First captured variable is the previous lower
// bound, second is the previous upper bound.
VarDecl *PrevLBCapDecl = CS->captures().begin()->getCapturedVar();
assert(PrevLBCapDecl &&
"Expected captured var for previous LB in combined directive");
VarDecl *PrevUBCapDecl =
std::next(CS->captures().begin())->getCapturedVar();
assert(PrevLBCapDecl &&
"Expected captured var for previous UB in combined directive");
// Set the proper type for the bounds given what we learned from the
// enclosed loops.
ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
PrevLB = buildDeclRefExpr(SemaRef, PrevLBCapDecl,
PrevLBCapDecl->getType(), InitLoc);
PrevUB = buildDeclRefExpr(SemaRef, PrevUBCapDecl,
PrevUBCapDecl->getType(), InitLoc);
// Previous lower and upper bounds are obtained from the region
// parameters.
PrevLB =
buildDeclRefExpr(SemaRef, PrevLBDecl, PrevLBDecl->getType(), InitLoc);
PrevUB =
buildDeclRefExpr(SemaRef, PrevUBDecl, PrevUBDecl->getType(), InitLoc);
}
}

View File

@ -35,230 +35,231 @@ void test_five(int x, int y, int z) {
;
}
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_1:0x[a-z0-9]*]] <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_2:0x[a-z0-9]*]] <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective [[ADDR_3:0x[a-z0-9]*]] <line:4:1, col:41>
// CHECK-NEXT: | `-CapturedStmt [[ADDR_4:0x[a-z0-9]*]] <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_5:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_6:0x[a-z0-9]*]] <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_7:0x[a-z0-9]*]] <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_8:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:4:1, col:41>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_10:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_11:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_8]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_13:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_14:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_1]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_15:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_8]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_17:0x[a-z0-9]*]] <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_18:0x[a-z0-9]*]] <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_19:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_20:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl [[ADDR_8]] <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_9]] <col:16> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_22:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_23:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_24:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_25:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_1]] 'x' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_27:0x[a-z0-9]*]] <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_28:0x[a-z0-9]*]] <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective [[ADDR_30:0x[a-z0-9]*]] <line:10:1, col:41>
// CHECK-NEXT: | `-CapturedStmt [[ADDR_31:0x[a-z0-9]*]] <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_32:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_33:0x[a-z0-9]*]] <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_34:0x[a-z0-9]*]] <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_35:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:10:1, col:41>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_38:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_35]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_41:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_27]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_42:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_43:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_35]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_44:0x[a-z0-9]*]] <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_45:0x[a-z0-9]*]] <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_46:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_47:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_49:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_50:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_46]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_28]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_53:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_46]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_55:0x[a-z0-9]*]] <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_56:0x[a-z0-9]*]] <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_57:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_58:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_35]] <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_36]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_46]] <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_47]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_59:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_60:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_62:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <line:11:23> 'int' {{.*}}ParmVar [[ADDR_27]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_64:0x[a-z0-9]*]] <line:12:25> 'int' {{.*}}ParmVar [[ADDR_28]] 'y' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_65:0x[a-z0-9]*]] <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_66:0x[a-z0-9]*]] <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_67:0x[a-z0-9]*]] <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_68:0x[a-z0-9]*]] <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective [[ADDR_69:0x[a-z0-9]*]] <line:17:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause [[ADDR_70:0x[a-z0-9]*]] <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr [[ADDR_71:0x[a-z0-9]*]] <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_72:0x[a-z0-9]*]] <col:51> 'int' 1
// CHECK-NEXT: | `-CapturedStmt [[ADDR_73:0x[a-z0-9]*]] <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_74:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_75:0x[a-z0-9]*]] <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_76:0x[a-z0-9]*]] <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_77:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_78:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:17:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_79:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_77]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_82:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_83:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_66]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_84:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_85:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_77]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_86:0x[a-z0-9]*]] <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_87:0x[a-z0-9]*]] <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_88:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_89:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_90:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_91:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_92:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_88]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_93:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_94:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_67]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_95:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_96:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_88]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_97:0x[a-z0-9]*]] <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_98:0x[a-z0-9]*]] <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_99:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_100:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_77]] <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_78]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_88]] <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_89]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_101:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_102:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_103:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_104:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_105:0x[a-z0-9]*]] <line:18:23> 'int' {{.*}}ParmVar [[ADDR_66]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_106:0x[a-z0-9]*]] <line:19:25> 'int' {{.*}}ParmVar [[ADDR_67]] 'y' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_107:0x[a-z0-9]*]] <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_108:0x[a-z0-9]*]] <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_109:0x[a-z0-9]*]] <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_110:0x[a-z0-9]*]] <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective [[ADDR_111:0x[a-z0-9]*]] <line:24:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause [[ADDR_112:0x[a-z0-9]*]] <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr [[ADDR_113:0x[a-z0-9]*]] <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_114:0x[a-z0-9]*]] <col:51> 'int' 2
// CHECK-NEXT: | `-CapturedStmt [[ADDR_115:0x[a-z0-9]*]] <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_116:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_117:0x[a-z0-9]*]] <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_118:0x[a-z0-9]*]] <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_119:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_120:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:24:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_121:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_122:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_123:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_119]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_124:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_125:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_108]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_126:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_127:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_119]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_128:0x[a-z0-9]*]] <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_129:0x[a-z0-9]*]] <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_130:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_131:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_132:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_133:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_134:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_130]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_135:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_136:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_109]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_137:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_138:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_130]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_139:0x[a-z0-9]*]] <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_140:0x[a-z0-9]*]] <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_141:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_142:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_119]] <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_120]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_130]] <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_131]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_143:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_144:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_145:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_146:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_147:0x[a-z0-9]*]] <line:25:23> 'int' {{.*}}ParmVar [[ADDR_108]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_148:0x[a-z0-9]*]] <line:26:25> 'int' {{.*}}ParmVar [[ADDR_109]] 'y' 'int'
// CHECK-NEXT: `-FunctionDecl [[ADDR_149:0x[a-z0-9]*]] <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_150:0x[a-z0-9]*]] <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_151:0x[a-z0-9]*]] <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_152:0x[a-z0-9]*]] <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt [[ADDR_153:0x[a-z0-9]*]] <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeParallelForSimdDirective [[ADDR_154:0x[a-z0-9]*]] <line:31:1, col:53>
// CHECK-NEXT: |-OMPCollapseClause [[ADDR_155:0x[a-z0-9]*]] <col:42, col:52>
// CHECK-NEXT: | `-ConstantExpr [[ADDR_156:0x[a-z0-9]*]] <col:51> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_157:0x[a-z0-9]*]] <col:51> 'int' 2
// CHECK-NEXT: `-CapturedStmt [[ADDR_158:0x[a-z0-9]*]] <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl [[ADDR_159:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt [[ADDR_160:0x[a-z0-9]*]] <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_161:0x[a-z0-9]*]] <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_162:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_163:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeParallelForSimdDirective {{.*}} <line:31:1, col:53>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_164:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_165:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_166:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_162]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_167:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_168:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_150]] 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_169:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_170:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_162]] 'i' 'int'
// CHECK-NEXT: | | `-ForStmt [[ADDR_171:0x[a-z0-9]*]] <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_172:0x[a-z0-9]*]] <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_173:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_174:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_175:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_176:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_177:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_173]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_178:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_179:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_151]] 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_180:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_181:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_173]] 'i' 'int'
// CHECK-NEXT: | | `-ForStmt [[ADDR_182:0x[a-z0-9]*]] <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_183:0x[a-z0-9]*]] <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_184:0x[a-z0-9]*]] <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_185:0x[a-z0-9]*]] <col:20> 'int' 0
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_186:0x[a-z0-9]*]] <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_187:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_188:0x[a-z0-9]*]] <col:23> 'int' {{.*}}Var [[ADDR_184]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_189:0x[a-z0-9]*]] <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_190:0x[a-z0-9]*]] <col:27> 'int' {{.*}}ParmVar [[ADDR_152]] 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_191:0x[a-z0-9]*]] <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_192:0x[a-z0-9]*]] <col:30> 'int' {{.*}}Var [[ADDR_184]] 'i' 'int'
// CHECK-NEXT: | | `-NullStmt [[ADDR_193:0x[a-z0-9]*]] <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_194:0x[a-z0-9]*]] <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_195:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_196:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl [[ADDR_162]] <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_163]] <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl [[ADDR_173]] <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_174]] <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl [[ADDR_184]] <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_185]] <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr [[ADDR_197:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_198:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_199:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_200:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_201:0x[a-z0-9]*]] <line:32:23> 'int' {{.*}}ParmVar [[ADDR_150]] 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_202:0x[a-z0-9]*]] <line:33:25> 'int' {{.*}}ParmVar [[ADDR_151]] 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr [[ADDR_203:0x[a-z0-9]*]] <line:34:27> 'int' {{.*}}ParmVar [[ADDR_152]] 'z' 'int'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'

View File

@ -35,230 +35,231 @@ void test_five(int x, int y, int z) {
;
}
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_1:0x[a-z0-9]*]] <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_2:0x[a-z0-9]*]] <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective [[ADDR_3:0x[a-z0-9]*]] <line:4:1, col:36>
// CHECK-NEXT: | `-CapturedStmt [[ADDR_4:0x[a-z0-9]*]] <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_5:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_6:0x[a-z0-9]*]] <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_7:0x[a-z0-9]*]] <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_8:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:4:1, col:36>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_10:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_11:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_8]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_13:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_14:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_1]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_15:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_8]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_17:0x[a-z0-9]*]] <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_18:0x[a-z0-9]*]] <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_19:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_20:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl [[ADDR_8]] <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_9]] <col:16> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_22:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_23:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_24:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_25:0x[a-z0-9]*]] <col:3> 'int' {{.*}}ParmVar [[ADDR_1]] 'x' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_27:0x[a-z0-9]*]] <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_28:0x[a-z0-9]*]] <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective [[ADDR_30:0x[a-z0-9]*]] <line:10:1, col:36>
// CHECK-NEXT: | `-CapturedStmt [[ADDR_31:0x[a-z0-9]*]] <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_32:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_33:0x[a-z0-9]*]] <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_34:0x[a-z0-9]*]] <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_35:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:10:1, col:36>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_38:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_35]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_41:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_27]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_42:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_43:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_35]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_44:0x[a-z0-9]*]] <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_45:0x[a-z0-9]*]] <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_46:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_47:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_49:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_50:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_46]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_28]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_53:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_46]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_55:0x[a-z0-9]*]] <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_56:0x[a-z0-9]*]] <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_57:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_58:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_35]] <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_36]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_46]] <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_47]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_59:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_60:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_61:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_62:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <line:11:3> 'int' {{.*}}ParmVar [[ADDR_27]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_64:0x[a-z0-9]*]] <line:12:25> 'int' {{.*}}ParmVar [[ADDR_28]] 'y' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_65:0x[a-z0-9]*]] <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_66:0x[a-z0-9]*]] <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_67:0x[a-z0-9]*]] <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_68:0x[a-z0-9]*]] <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective [[ADDR_69:0x[a-z0-9]*]] <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause [[ADDR_70:0x[a-z0-9]*]] <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr [[ADDR_71:0x[a-z0-9]*]] <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_72:0x[a-z0-9]*]] <col:46> 'int' 1
// CHECK-NEXT: | `-CapturedStmt [[ADDR_73:0x[a-z0-9]*]] <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_74:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_75:0x[a-z0-9]*]] <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_76:0x[a-z0-9]*]] <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_77:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_78:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_79:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_77]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_82:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_83:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_66]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_84:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_85:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_77]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_86:0x[a-z0-9]*]] <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_87:0x[a-z0-9]*]] <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_88:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_89:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_90:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_91:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_92:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_88]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_93:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_94:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_67]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_95:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_96:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_88]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_97:0x[a-z0-9]*]] <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_98:0x[a-z0-9]*]] <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_99:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_100:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_77]] <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_78]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_88]] <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_89]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_101:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_102:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_103:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_104:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_105:0x[a-z0-9]*]] <line:18:3> 'int' {{.*}}ParmVar [[ADDR_66]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_106:0x[a-z0-9]*]] <line:19:25> 'int' {{.*}}ParmVar [[ADDR_67]] 'y' 'int'
// CHECK-NEXT: |-FunctionDecl [[ADDR_107:0x[a-z0-9]*]] <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_108:0x[a-z0-9]*]] <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_109:0x[a-z0-9]*]] <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_110:0x[a-z0-9]*]] <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective [[ADDR_111:0x[a-z0-9]*]] <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause [[ADDR_112:0x[a-z0-9]*]] <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr [[ADDR_113:0x[a-z0-9]*]] <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_114:0x[a-z0-9]*]] <col:46> 'int' 2
// CHECK-NEXT: | `-CapturedStmt [[ADDR_115:0x[a-z0-9]*]] <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl [[ADDR_116:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt [[ADDR_117:0x[a-z0-9]*]] <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_118:0x[a-z0-9]*]] <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_119:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_120:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_121:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_122:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_123:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_119]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_124:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_125:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_108]] 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_126:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_127:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_119]] 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt [[ADDR_128:0x[a-z0-9]*]] <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt [[ADDR_129:0x[a-z0-9]*]] <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl [[ADDR_130:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral [[ADDR_131:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_132:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr [[ADDR_133:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_134:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_130]] 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_135:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_136:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_109]] 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator [[ADDR_137:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_138:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_130]] 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt [[ADDR_139:0x[a-z0-9]*]] <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_140:0x[a-z0-9]*]] <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_141:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl [[ADDR_142:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl [[ADDR_119]] <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_120]] <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl [[ADDR_130]] <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_131]] <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_143:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_144:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_145:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_146:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: | |-DeclRefExpr [[ADDR_147:0x[a-z0-9]*]] <line:25:3> 'int' {{.*}}ParmVar [[ADDR_108]] 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_148:0x[a-z0-9]*]] <line:26:5> 'int' {{.*}}ParmVar [[ADDR_109]] 'y' 'int'
// CHECK-NEXT: `-FunctionDecl [[ADDR_149:0x[a-z0-9]*]] <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_150:0x[a-z0-9]*]] <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_151:0x[a-z0-9]*]] <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl [[ADDR_152:0x[a-z0-9]*]] <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt [[ADDR_153:0x[a-z0-9]*]] <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeParallelForDirective [[ADDR_154:0x[a-z0-9]*]] <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause [[ADDR_155:0x[a-z0-9]*]] <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr [[ADDR_156:0x[a-z0-9]*]] <col:46> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_157:0x[a-z0-9]*]] <col:46> 'int' 2
// CHECK-NEXT: `-CapturedStmt [[ADDR_158:0x[a-z0-9]*]] <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl [[ADDR_159:0x[a-z0-9]*]] <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt [[ADDR_160:0x[a-z0-9]*]] <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_161:0x[a-z0-9]*]] <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_162:0x[a-z0-9]*]] <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_163:0x[a-z0-9]*]] <col:16> 'int' 0
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeParallelForDirective {{.*}} <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_164:0x[a-z0-9]*]] <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_165:0x[a-z0-9]*]] <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_166:0x[a-z0-9]*]] <col:19> 'int' {{.*}}Var [[ADDR_162]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_167:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_168:0x[a-z0-9]*]] <col:23> 'int' {{.*}}ParmVar [[ADDR_150]] 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_169:0x[a-z0-9]*]] <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_170:0x[a-z0-9]*]] <col:26> 'int' {{.*}}Var [[ADDR_162]] 'i' 'int'
// CHECK-NEXT: | | `-ForStmt [[ADDR_171:0x[a-z0-9]*]] <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_172:0x[a-z0-9]*]] <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_173:0x[a-z0-9]*]] <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_174:0x[a-z0-9]*]] <col:18> 'int' 0
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_175:0x[a-z0-9]*]] <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_176:0x[a-z0-9]*]] <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_177:0x[a-z0-9]*]] <col:21> 'int' {{.*}}Var [[ADDR_173]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_178:0x[a-z0-9]*]] <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_179:0x[a-z0-9]*]] <col:25> 'int' {{.*}}ParmVar [[ADDR_151]] 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_180:0x[a-z0-9]*]] <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_181:0x[a-z0-9]*]] <col:28> 'int' {{.*}}Var [[ADDR_173]] 'i' 'int'
// CHECK-NEXT: | | `-ForStmt [[ADDR_182:0x[a-z0-9]*]] <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt [[ADDR_183:0x[a-z0-9]*]] <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl [[ADDR_184:0x[a-z0-9]*]] <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral [[ADDR_185:0x[a-z0-9]*]] <col:20> 'int' 0
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_186:0x[a-z0-9]*]] <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_187:0x[a-z0-9]*]] <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_188:0x[a-z0-9]*]] <col:23> 'int' {{.*}}Var [[ADDR_184]] 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_189:0x[a-z0-9]*]] <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_190:0x[a-z0-9]*]] <col:27> 'int' {{.*}}ParmVar [[ADDR_152]] 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator [[ADDR_191:0x[a-z0-9]*]] <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_192:0x[a-z0-9]*]] <col:30> 'int' {{.*}}Var [[ADDR_184]] 'i' 'int'
// CHECK-NEXT: | | `-NullStmt [[ADDR_193:0x[a-z0-9]*]] <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_194:0x[a-z0-9]*]] <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_195:0x[a-z0-9]*]] <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl [[ADDR_196:0x[a-z0-9]*]] <col:1> col:1 implicit __context 'struct ({{.*}}ast-dump-openmp-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl [[ADDR_162]] <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_163]] <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl [[ADDR_173]] <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_174]] <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl [[ADDR_184]] <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_185]] <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr [[ADDR_197:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_198:0x[a-z0-9]*]] '.captured.omp.previous.lb' 'const unsigned long'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_199:0x[a-z0-9]*]] <<invalid sloc>> 'const unsigned long' {{.*}}Var [[ADDR_200:0x[a-z0-9]*]] '.captured.omp.previous.ub' 'const unsigned long'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_201:0x[a-z0-9]*]] <line:32:3> 'int' {{.*}}ParmVar [[ADDR_150]] 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr [[ADDR_202:0x[a-z0-9]*]] <line:33:5> 'int' {{.*}}ParmVar [[ADDR_151]] 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr [[ADDR_203:0x[a-z0-9]*]] <line:34:27> 'int' {{.*}}ParmVar [[ADDR_152]] 'z' 'int'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@ void ffcomplex (int a) {
// CHECK: call noundef { double, double } @__muldc3(double noundef %{{.+}}, double noundef %{{.+}}, double noundef %{{.+}}, double noundef %{{.+}})
dc *= dc;
// CHECK: call {{.+}} @__kmpc_fork_call({{.+}} [[REGNAME1:@.*]] to void (i32*, i32*, ...)*), %struct.anon* %{{.+}})
// CHECK: call {{.+}} @__kmpc_fork_call({{.+}} [[REGNAME1:@.*]] to void (i32*, i32*, ...)*), { double, double }* %{{.+}})
#pragma omp parallel
{
dc *= dc;
@ -32,7 +32,7 @@ void foo(int a, int b) {
void (*fptr)(void) noexcept = fnoexcp;
// CHECK: call {{.+}} @__kmpc_fork_call({{.+}} [[REGNAME2:@.*]] to void (i32*, i32*, ...)*), %struct.anon.0* %{{.+}})
// CHECK: call {{.+}} @__kmpc_fork_call({{.+}} [[REGNAME2:@.*]] to void (i32*, i32*, ...)*), void ()** %{{.+}})
#pragma omp parallel
{
fptr();

File diff suppressed because it is too large Load Diff

View File

@ -86,7 +86,6 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
@ -107,52 +106,45 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I28:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[AGG_CAPTURED42:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_43:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_44:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_45:%.*]] = alloca [[STRUCT_ANON_5:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[AGG_CAPTURED42:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i8*** [[ARGV_ADDR]], i8**** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP2]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i8*** [[ARGV_ADDR]], i32* [[ARGC_ADDR]])
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 0
// CHECK1-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 0
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
// CHECK1-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP7]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK1-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1: .cancel.exit1:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue2:
@ -160,8 +152,8 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
@ -174,28 +166,28 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_5]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_6]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_SECTIONS_IL_6]], i32* [[DOTOMP_SECTIONS_LB_3]], i32* [[DOTOMP_SECTIONS_UB_4]], i32* [[DOTOMP_SECTIONS_ST_5]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = icmp slt i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 1
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_3]], align 4
// CHECK1-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp slt i32 [[TMP13]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP13]], i32 1
// CHECK1-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_3]], align 4
// CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
// CHECK1: omp.inner.for.cond8:
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_4]], align 4
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]]
// CHECK1: omp.inner.for.body10:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: switch i32 [[TMP21]], label [[DOTOMP_SECTIONS_EXIT19:%.*]] [
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_SECTIONS_EXIT19:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE11:%.*]]
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE14:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case11:
// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[DOTCANCEL_EXIT12:%.*]], label [[DOTCANCEL_CONTINUE13:%.*]]
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK1-NEXT: br i1 [[TMP21]], label [[DOTCANCEL_EXIT12:%.*]], label [[DOTCANCEL_CONTINUE13:%.*]]
// CHECK1: .cancel.exit12:
// CHECK1-NEXT: br label [[CANCEL_EXIT23:%.*]]
// CHECK1: cancel.exit:
@ -204,15 +196,15 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: .cancel.continue13:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT19]]
// CHECK1: .omp.sections.case14:
// CHECK1-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK1-NEXT: br i1 [[TMP25]], label [[DOTCANCEL_EXIT15:%.*]], label [[DOTCANCEL_CONTINUE16:%.*]]
// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[DOTCANCEL_EXIT15:%.*]], label [[DOTCANCEL_CONTINUE16:%.*]]
// CHECK1: .cancel.exit15:
// CHECK1-NEXT: br label [[CANCEL_EXIT23]]
// CHECK1: .cancel.continue16:
// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK1-NEXT: br i1 [[TMP27]], label [[DOTCANCEL_EXIT17:%.*]], label [[DOTCANCEL_CONTINUE18:%.*]]
// CHECK1-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
// CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
// CHECK1-NEXT: br i1 [[TMP25]], label [[DOTCANCEL_EXIT17:%.*]], label [[DOTCANCEL_CONTINUE18:%.*]]
// CHECK1: .cancel.exit17:
// CHECK1-NEXT: br label [[CANCEL_EXIT23]]
// CHECK1: .cancel.continue18:
@ -220,8 +212,8 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: .omp.sections.exit19:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]]
// CHECK1: omp.inner.for.inc20:
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: [[INC21:%.*]] = add nsw i32 [[TMP28]], 1
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: [[INC21:%.*]] = add nsw i32 [[TMP26]], 1
// CHECK1-NEXT: store i32 [[INC21]], i32* [[DOTOMP_SECTIONS_IV_7]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND8]]
// CHECK1: omp.inner.for.end22:
@ -229,62 +221,62 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: br label [[CANCEL_CONT24:%.*]]
// CHECK1: cancel.cont24:
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP29]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP30]], 0
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 0, [[TMP31]]
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 0, [[TMP29]]
// CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: store i32 [[TMP32]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: store i32 [[TMP30]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: [[CMP29:%.*]] = icmp sgt i32 [[TMP33]], [[TMP34]]
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: [[CMP29:%.*]] = icmp sgt i32 [[TMP31]], [[TMP32]]
// CHECK1-NEXT: br i1 [[CMP29]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP35]], [[COND_TRUE]] ], [ [[TMP36]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], [[COND_TRUE]] ], [ [[TMP34]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]]
// CHECK1: omp.inner.for.cond30:
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP31:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP31:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
// CHECK1-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END39:%.*]]
// CHECK1: omp.inner.for.body32:
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP40]], 1
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP38]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I28]], align 4
// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
// CHECK1-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK1-NEXT: br i1 [[TMP42]], label [[DOTCANCEL_EXIT33:%.*]], label [[DOTCANCEL_CONTINUE34:%.*]]
// CHECK1-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
// CHECK1-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
// CHECK1-NEXT: br i1 [[TMP40]], label [[DOTCANCEL_EXIT33:%.*]], label [[DOTCANCEL_CONTINUE34:%.*]]
// CHECK1: .cancel.exit33:
// CHECK1-NEXT: br label [[CANCEL_EXIT40:%.*]]
// CHECK1: cancel.exit23:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[CANCEL_CONT24]]
// CHECK1: .cancel.continue34:
// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK1-NEXT: br i1 [[TMP44]], label [[DOTCANCEL_EXIT35:%.*]], label [[DOTCANCEL_CONTINUE36:%.*]]
// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
// CHECK1-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
// CHECK1-NEXT: br i1 [[TMP42]], label [[DOTCANCEL_EXIT35:%.*]], label [[DOTCANCEL_CONTINUE36:%.*]]
// CHECK1: .cancel.exit35:
// CHECK1-NEXT: br label [[CANCEL_EXIT40]]
// CHECK1: .cancel.continue36:
@ -292,8 +284,8 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]]
// CHECK1: omp.inner.for.inc37:
// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP45]], 1
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP43]], 1
// CHECK1-NEXT: store i32 [[ADD38]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND30]]
// CHECK1: omp.inner.for.end39:
@ -308,59 +300,56 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: br label [[CANCEL_CONT41]]
// CHECK1: cancel.cont41:
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP46:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP47:%.*]] = bitcast i8* [[TMP46]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP47]], i32 0, i32 0
// CHECK1-NEXT: [[TMP49:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP46]])
// CHECK1-NEXT: [[TMP50:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_entry..3 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP51:%.*]] = bitcast i8* [[TMP50]] to %struct.kmp_task_t_with_privates.2*
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP51]], i32 0, i32 0
// CHECK1-NEXT: [[TMP53:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP50]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_43]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_44]])
// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[OMP_OUTLINED_ARG_AGG_45]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP54]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.5*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.anon.5* [[OMP_OUTLINED_ARG_AGG_45]])
// CHECK1-NEXT: [[TMP55:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: ret i32 [[TMP55]]
// CHECK1-NEXT: [[TMP44:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP44]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP45]], i32 0, i32 0
// CHECK1-NEXT: [[TMP47:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP44]])
// CHECK1-NEXT: [[TMP48:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.1*)* @.omp_task_entry..3 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP49:%.*]] = bitcast i8* [[TMP48]] to %struct.kmp_task_t_with_privates.1*
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP49]], i32 0, i32 0
// CHECK1-NEXT: [[TMP51:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP48]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..5 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]])
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: ret i32 [[TMP52]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGV:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8***, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1)
// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TMP8]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1-NEXT: store i8*** [[ARGV]], i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
// CHECK1-NEXT: br i1 [[TMP5]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_cancel_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
// CHECK1-NEXT: br label [[RETURN:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 1)
// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TMP8]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1: .cancel.exit1:
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
// CHECK1-NEXT: br label [[RETURN]]
// CHECK1: .cancel.continue2:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP13]] to i8
// CHECK1-NEXT: [[TMP14:%.*]] = load i8**, i8*** [[TMP2]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP14]], i64 0
// CHECK1-NEXT: [[TMP15:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP15]], i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP10]] to i8
// CHECK1-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[TMP0]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP11]], i64 0
// CHECK1-NEXT: [[TMP12:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP12]], i64 0
// CHECK1-NEXT: store i8 [[CONV]], i8* [[ARRAYIDX3]], align 1
// CHECK1-NEXT: br label [[RETURN]]
// CHECK1: return:
@ -375,7 +364,7 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[CLEANUP_DEST_SLOT_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
@ -387,7 +376,7 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
@ -398,8 +387,8 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 4)
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
@ -423,27 +412,27 @@ for (int i = 0; i < argc; ++i) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry..3
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.1* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[CLEANUP_DEST_SLOT_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.1*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.1* [[TMP1]], %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.1*, %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.2* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.1* [[TMP3]] to i8*
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
@ -453,8 +442,8 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !24
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 4)
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
@ -471,11 +460,10 @@ for (int i = 0; i < argc; ++i) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
@ -483,42 +471,40 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 0
// CHECK1-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 0
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK1-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1: .cancel.exit1:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue2:
@ -526,26 +512,25 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: ret void
// CHECK1: cancel.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
@ -553,51 +538,49 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 1
// CHECK1-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 1
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE3:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK1-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTCANCEL_EXIT1:%.*]], label [[DOTCANCEL_CONTINUE2:%.*]]
// CHECK1: .cancel.exit1:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue2:
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.case3:
// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTCANCEL_EXIT4:%.*]], label [[DOTCANCEL_CONTINUE5:%.*]]
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTCANCEL_EXIT4:%.*]], label [[DOTCANCEL_CONTINUE5:%.*]]
// CHECK1: .cancel.exit4:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue5:
@ -605,26 +588,26 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: cancel.cont:
// CHECK1-NEXT: ret void
// CHECK1: cancel.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[CANCEL_CONT]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.5* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.5*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
@ -637,69 +620,67 @@ for (int i = 0; i < argc; ++i) {
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.5* [[__CONTEXT]], %struct.anon.5** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.5*, %struct.anon.5** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_5:%.*]], %struct.anon.5* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB5]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB5]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 2)
// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
// CHECK1-NEXT: br i1 [[TMP20]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]], i32 2)
// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]]
// CHECK1: .cancel.exit:
// CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]]
// CHECK1: .cancel.continue:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]], i32 2)
// CHECK1-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
// CHECK1-NEXT: br i1 [[TMP24]], label [[DOTCANCEL_EXIT6:%.*]], label [[DOTCANCEL_CONTINUE7:%.*]]
// CHECK1-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]], i32 2)
// CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK1-NEXT: br i1 [[TMP22]], label [[DOTCANCEL_EXIT6:%.*]], label [[DOTCANCEL_CONTINUE7:%.*]]
// CHECK1: .cancel.exit6:
// CHECK1-NEXT: br label [[CANCEL_EXIT]]
// CHECK1: .cancel.continue7:
@ -707,21 +688,21 @@ for (int i = 0; i < argc; ++i) {
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB5]], i32 [[TMP25]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: cancel.exit:
// CHECK1-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB5]], i32 [[TMP27]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: cancel.exit:
// CHECK1-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB5]], i32 [[TMP29]])
// CHECK1-NEXT: br label [[CANCEL_CONT:%.*]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: br label [[CANCEL_CONT]]

View File

@ -12,40 +12,52 @@ void a() {
// CHECK1-LABEL: define {{[^@]+}}@_Z1av
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG6:![0-9]+]] {
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG7:![0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[B:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata { float, float }* [[B]], metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG13:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG14:![0-9]+]]
// CHECK1-NEXT: [[B_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[B]], i32 0, i32 0, !dbg [[DBG15:![0-9]+]]
// CHECK1-NEXT: [[B_REAL:%.*]] = load float, float* [[B_REALP]], align 4, !dbg [[DBG15]]
// CHECK1-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[B]], i32 0, i32 1, !dbg [[DBG15]]
// CHECK1-NEXT: [[B_IMAG:%.*]] = load float, float* [[B_IMAGP]], align 4, !dbg [[DBG15]]
// CHECK1-NEXT: [[DOTREALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 0, !dbg [[DBG14]]
// CHECK1-NEXT: [[DOTIMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 1, !dbg [[DBG14]]
// CHECK1-NEXT: store float [[B_REAL]], float* [[DOTREALP]], align 4, !dbg [[DBG14]]
// CHECK1-NEXT: store float [[B_IMAG]], float* [[DOTIMAGP]], align 4, !dbg [[DBG14]]
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG14]]
// CHECK1-NEXT: ret void, !dbg [[DBG17:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = load { float, float }, { float, float }* [[B]], align 4, !dbg [[DBG14:![0-9]+]]
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to { float, float }*, !dbg [[DBG14]]
// CHECK1-NEXT: store { float, float } [[TMP0]], { float, float }* [[CONV]], align 4, !dbg [[DBG14]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_CASTED]], align 8, !dbg [[DBG14]]
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]]), !dbg [[DBG14]]
// CHECK1-NEXT: ret void, !dbg [[DBG15:![0-9]+]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined._debug__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR2:[0-9]+]] !dbg [[DBG16:![0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[B:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast { float, float }* [[B]] to <2 x float>*
// CHECK1-NEXT: store <2 x float> [[B_COERCE]], <2 x float>* [[TMP0]], align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata { float, float }* [[B]], metadata [[META27:![0-9]+]], metadata !DIExpression()), !dbg [[DBG28:![0-9]+]]
// CHECK1-NEXT: ret void, !dbg [[DBG29:![0-9]+]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] !dbg [[DBG18:![0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[B:%.*]]) #[[ATTR3:[0-9]+]] !dbg [[DBG30:![0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[B:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META30:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META34:![0-9]+]], metadata !DIExpression()), !dbg [[DBG35:![0-9]+]]
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META32:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31]]
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata %struct.anon** [[__CONTEXT_ADDR]], metadata [[META33:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31]]
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG34:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0, !dbg [[DBG34]]
// CHECK1-NEXT: [[TMP2:%.*]] = load { float, float }, { float, float }* [[TMP1]], align 4, !dbg [[DBG34]]
// CHECK1-NEXT: store { float, float } [[TMP2]], { float, float }* [[B]], align 4, !dbg [[DBG34]]
// CHECK1-NEXT: ret void, !dbg [[DBG35:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META36:![0-9]+]], metadata !DIExpression()), !dbg [[DBG35]]
// CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i64* [[B_ADDR]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG35]]
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to { float, float }*, !dbg [[DBG38:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG38]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG38]]
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast { float, float }* [[CONV]] to <2 x float>*, !dbg [[DBG38]]
// CHECK1-NEXT: [[TMP3:%.*]] = load <2 x float>, <2 x float>* [[TMP2]], align 4, !dbg [[DBG38]]
// CHECK1-NEXT: call void @.omp_outlined._debug__(i32* [[TMP0]], i32* [[TMP1]], <2 x float> [[TMP3]]) #[[ATTR4:[0-9]+]], !dbg [[DBG38]]
// CHECK1-NEXT: ret void, !dbg [[DBG38]]
//

View File

@ -20,36 +20,31 @@ void f(int m) {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[M]], i32* [[M_ADDR]], align 4
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[M_ADDR]], metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG13:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I]], metadata [[META14:![0-9]+]], metadata !DIExpression()), !dbg [[DBG15:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[M_ADDR]], align 4, !dbg [[DBG16:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG17:![0-9]+]]
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG17]]
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG17]]
// CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG17]]
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG17]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META18:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META21:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG26:![0-9]+]]
// CHECK1-NEXT: store i32* [[M_ADDR]], i32** [[TMP3]], align 8, !dbg [[DBG26]]
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG26]]
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP4]], align 8, !dbg [[DBG26]]
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2, !dbg [[DBG26]]
// CHECK1-NEXT: store i32* [[VLA]], i32** [[TMP5]], align 8, !dbg [[DBG26]]
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG26]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG27:![0-9]+]]
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP6]]), !dbg [[DBG27]]
// CHECK1-NEXT: ret void, !dbg [[DBG27]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[M_ADDR]], metadata [[META11:![0-9]+]], metadata !DIExpression()), !dbg [[DBG12:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I]], metadata [[META13:![0-9]+]], metadata !DIExpression()), !dbg [[DBG14:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[M_ADDR]], align 4, !dbg [[DBG15:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG16:![0-9]+]]
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG16]]
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG16]]
// CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG16]]
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG16]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG19:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META20:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[M_ADDR]], i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG25:![0-9]+]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG26:![0-9]+]]
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP3]]), !dbg [[DBG26]]
// CHECK1-NEXT: ret void, !dbg [[DBG26]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] !dbg [[DBG28:![0-9]+]] {
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined._debug__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[M:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CEN:%.*]]) #[[ATTR3:[0-9]+]] !dbg [[DBG27:![0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[M_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CEN_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
@ -61,93 +56,123 @@ void f(int m) {
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META39:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META35:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36:![0-9]+]]
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata %struct.anon** [[__CONTEXT_ADDR]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG43:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IV]], metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTCAPTURE_EXPR_]], metadata [[META45:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG46:![0-9]+]]
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTCAPTURE_EXPR_1]], metadata [[META45]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0, !dbg [[DBG47:![0-9]+]]
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1, !dbg [[DBG47]]
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1, !dbg [[DBG47]]
// CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG47]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I]], metadata [[META48:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4, !dbg [[DBG49:![0-9]+]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]], !dbg [[DBG47]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32* [[M]], i32** [[M_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[M_ADDR]], metadata [[META38:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39:![0-9]+]]
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32* [[CEN]], i32** [[CEN_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[CEN_ADDR]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG42:![0-9]+]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[M_ADDR]], align 8, !dbg [[DBG43:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[CEN_ADDR]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IV]], metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTCAPTURE_EXPR_]], metadata [[META45:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4, !dbg [[DBG46:![0-9]+]]
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTCAPTURE_EXPR_1]], metadata [[META45]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0, !dbg [[DBG43]]
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1, !dbg [[DBG43]]
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1, !dbg [[DBG43]]
// CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I]], metadata [[META47:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4, !dbg [[DBG48:![0-9]+]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !dbg [[DBG46]]
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]], !dbg [[DBG43]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]], !dbg [[DBG43]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_LB]], metadata [[META50:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG51:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_UB]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG47]]
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_STRIDE]], metadata [[META53:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IS_LAST]], metadata [[META54:![0-9]+]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I3]], metadata [[META48]], metadata !DIExpression()), !dbg [[DBG40]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP12]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG47]]
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]], !dbg [[DBG51]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG51]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_LB]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG50:![0-9]+]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_UB]], metadata [[META51:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_STRIDE]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[DOTOMP_IS_LAST]], metadata [[META53:![0-9]+]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32* [[I3]], metadata [[META47]], metadata !DIExpression()), !dbg [[DBG36]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg [[DBG54:![0-9]+]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]], !dbg [[DBG50]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG50]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG47]]
// CHECK1-NEXT: br label [[COND_END:%.*]], !dbg [[DBG51]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: br label [[COND_END:%.*]], !dbg [[DBG50]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: br label [[COND_END]], !dbg [[DBG51]]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: br label [[COND_END]], !dbg [[DBG50]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ], !dbg [[DBG51]]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ], !dbg [[DBG50]]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG43]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]], !dbg [[DBG47]]
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]], !dbg [[DBG43]]
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG43]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1, !dbg [[DBG49]]
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]], !dbg [[DBG49]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !dbg [[DBG49]]
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !dbg [[DBG55:![0-9]+]]
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !dbg [[DBG57:![0-9]+]]
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64, !dbg [[DBG58:![0-9]+]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i64 [[IDXPROM]], !dbg [[DBG58]]
// CHECK1-NEXT: store i32 [[TMP21]], i32* [[ARRAYIDX]], align 4, !dbg [[DBG59:![0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1, !dbg [[DBG48]]
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]], !dbg [[DBG48]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !dbg [[DBG48]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !dbg [[DBG55:![0-9]+]]
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4, !dbg [[DBG57:![0-9]+]]
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64, !dbg [[DBG58:![0-9]+]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]], !dbg [[DBG58]]
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[ARRAYIDX]], align 4, !dbg [[DBG59:![0-9]+]]
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG60:![0-9]+]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG43]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG54]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG51]]
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP23]], 1, !dbg [[DBG47]]
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG47]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG43]], !llvm.loop [[LOOP61:![0-9]+]]
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG50]]
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1, !dbg [[DBG43]]
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG54]], !llvm.loop [[LOOP61:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG43]]
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG54]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4, !dbg [[DBG43]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP25]]), !dbg [[DBG62:![0-9]+]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]], !dbg [[DBG43]]
// CHECK1-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG54]]
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4, !dbg [[DBG54]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP21]]), !dbg [[DBG62:![0-9]+]]
// CHECK1-NEXT: br label [[OMP_PRECOND_END]], !dbg [[DBG54]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void, !dbg [[DBG63:![0-9]+]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[M:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CEN:%.*]]) #[[ATTR3]] !dbg [[DBG64:![0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[M_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CEN_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META65:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66:![0-9]+]]
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66]]
// CHECK1-NEXT: store i32* [[M]], i32** [[M_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[M_ADDR]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66]]
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META69:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66]]
// CHECK1-NEXT: store i32* [[CEN]], i32** [[CEN_ADDR]], align 8
// CHECK1-NEXT: call void @llvm.dbg.declare(metadata i32** [[CEN_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[M_ADDR]], align 8, !dbg [[DBG71:![0-9]+]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[CEN_ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[M_ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[CEN_ADDR]], align 8, !dbg [[DBG71]]
// CHECK1-NEXT: call void @.omp_outlined._debug__(i32* [[TMP3]], i32* [[TMP4]], i32* [[TMP5]], i64 [[TMP1]], i32* [[TMP6]]) #[[ATTR4:[0-9]+]], !dbg [[DBG71]]
// CHECK1-NEXT: ret void, !dbg [[DBG71]]
//
//

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// This testcase checks emission of debug info for threadprivate variables
// present in any clause of OpenMP construct.
@ -7,6 +6,13 @@
// RUN: %clang_cc1 -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// CHECK: define internal void @.omp_outlined._debug__(
// CHECK: call void @llvm.dbg.declare(metadata ptr %.global_tid..addr,
// CHECK: call void @llvm.dbg.declare(metadata ptr %.bound_tid..addr,
// CHECK: call void @llvm.dbg.declare(metadata ptr %nt.addr
// CHECK: store ptr %gbl_dynamic_int, ptr %gbl_dynamic_int.addr, align 8
// CHECK-NOT: call void @llvm.dbg.declare(metadata ptr %gbl_dynamic_int.addr
// CHECK-NOT: call void @llvm.dbg.declare(metadata ptr %gbl_static_int.addr
extern int printf(const char *, ...);
extern void omp_set_num_threads(int);
@ -18,33 +24,6 @@ __thread int gbl_static_int;
#pragma omp threadprivate(gbl_dynamic_int)
// CHECK-LABEL: @main(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[NT:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[OFFSET:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4
// CHECK-NEXT: call void @llvm.dbg.declare(metadata ptr [[NT]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
// CHECK-NEXT: store i32 0, ptr [[NT]], align 4, !dbg [[DBG24]]
// CHECK-NEXT: call void @llvm.dbg.declare(metadata ptr [[OFFSET]], metadata [[META25:![0-9]+]], metadata !DIExpression()), !dbg [[DBG26:![0-9]+]]
// CHECK-NEXT: store i32 10, ptr [[OFFSET]], align 4, !dbg [[DBG26]]
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @gbl_dynamic_int), !dbg [[DBG27:![0-9]+]]
// CHECK-NEXT: store i32 55, ptr [[TMP0]], align 4, !dbg [[DBG28:![0-9]+]]
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @gbl_static_int), !dbg [[DBG29:![0-9]+]]
// CHECK-NEXT: store i32 77, ptr [[TMP1]], align 4, !dbg [[DBG30:![0-9]+]]
// CHECK-NEXT: call void @omp_set_num_threads(i32 noundef 4), !dbg [[DBG31:![0-9]+]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG32:![0-9]+]]
// CHECK-NEXT: store ptr [[NT]], ptr [[TMP2]], align 8, !dbg [[DBG32]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG32]]
// CHECK-NEXT: [[TMP4:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @gbl_dynamic_int), !dbg [[DBG33:![0-9]+]]
// CHECK-NEXT: store ptr [[TMP4]], ptr [[TMP3]], align 8, !dbg [[DBG32]]
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2, !dbg [[DBG32]]
// CHECK-NEXT: [[TMP6:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @gbl_static_int), !dbg [[DBG35:![0-9]+]]
// CHECK-NEXT: store ptr [[TMP6]], ptr [[TMP5]], align 8, !dbg [[DBG32]]
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4:[0-9]+]], i32 1, ptr @.omp_outlined., ptr [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG32]]
// CHECK-NEXT: ret i32 0, !dbg [[DBG36:![0-9]+]]
//
int main() {
int nt = 0;
int offset = 10;

View File

@ -28,8 +28,7 @@ int maini1() {
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
@ -37,16 +36,11 @@ int maini1() {
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -54,22 +48,20 @@ int maini1() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooRi(i32* noundef nonnull align 4 dereferenceable(4) [[B]]) #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: [[CALL1:%.*]] = call noundef i32 @_Z3barv() #[[ATTR7]]
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooRi(i32* noundef nonnull align 4 dereferenceable(4) [[B]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[CALL1:%.*]] = call noundef i32 @_Z3barv() #[[ATTR6]]
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[CALL1]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
@ -88,7 +80,7 @@ int maini1() {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[A_ON_STACK:%.*]] = bitcast i8* [[A]] to i32*
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooRi(i32* noundef nonnull align 4 dereferenceable(4) [[A_ON_STACK]]) #[[ATTR7]]
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooRi(i32* noundef nonnull align 4 dereferenceable(4) [[A_ON_STACK]]) #[[ATTR6]]
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[A]], i64 4)
// CHECK1-NEXT: ret i32 [[CALL]]
//

View File

@ -72,13 +72,13 @@ int test(void) {
{
vxv(v1, v2, v3, N);
}
// CK1: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 1, void ({{.+}})* bitcast (void (i32*, i32*, %struct.anon{{[\.0-9]*}}*)* [[PARALLEL_REGION:@.+]] to void
// CK1: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, [100 x i32]*, [100 x i32]*, [100 x i32]*)* [[PARALLEL_REGION:@.+]] to void
return 0;
}
// CK1: define internal void @__omp_offloading_[[OFFLOAD]]({{.+}})
// CK1: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 1, void ({{.+}})* bitcast (void (i32*, i32*, %struct.anon{{[\.0-9]*}}*)* [[TARGET_REGION:@.+]] to void
// CK1: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, [100 x i32]*, [100 x i32]*, [100 x i32]*)* [[TARGET_REGION:@.+]] to void
// CK1: define internal void [[TARGET_REGION]](
// CK1: call void @t_vxv
@ -167,11 +167,11 @@ void test(int ***v1, int ***v2, int ***v3, int n) {
{
test_base(v1, v2, v3, 0);
}
// CK2: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon{{[\.0-9]*}}*)* [[PARALLEL_REGION:@.+]] to void
// CK2: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32****, i32****, i32****)* [[PARALLEL_REGION:@.+]] to void
}
// CK2: define internal void @__omp_offloading_[[OFFLOAD_1]]({{.+}})
// CK2: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 1, void ({{.+}})* bitcast (void (i32*, i32*, %struct.anon{{[\.0-9]*}}*)* [[TARGET_REGION_1:@.+]] to void
// CK2: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, i32****, i32****, i32****)* [[TARGET_REGION_1:@.+]] to void
// CK2: define internal void [[TARGET_REGION_1]](
// CK2: call void @test_teams

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -119,90 +119,78 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37
// CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 4)
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 4)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -212,151 +200,135 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
// CHECK1-SAME: () #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 3)
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 3)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -366,61 +338,57 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: ret void
//
//
@ -460,90 +428,78 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l29
// CHECK1-SAME: () #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 2)
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 2)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -553,61 +509,57 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: ret void
//
//

View File

@ -58,387 +58,369 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[TMP0]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i8** [[TMP3]], i8*** [[TMP2]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[ARGC_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[ARGC_CASTED]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i8** [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_COMB_LB]], align 8
// CHECK1-NEXT: store i64 9, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP6]], 9
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_COMB_LB]], i64* [[DOTOMP_COMB_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP2]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: store i64 [[TMP12]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK1-NEXT: [[TMP17:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: store i8** [[TMP17]], i8*** [[TMP16]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_COMB_LB]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_COMB_UB]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i8**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP7]], i64 [[TMP8]], i32* [[CONV]], i8** [[TMP9]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP18]], [[TMP19]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: store i64 [[ADD]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP27:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP28:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP10]], i64 0
// CHECK1-NEXT: [[TMP11:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP11]], i64 0
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP13]]
// CHECK1-NEXT: [[TMP14:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP14]], i64 9
// CHECK1-NEXT: [[TMP15:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP15]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP16:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP17:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = sub i64 [[TMP16]], [[TMP17]]
// CHECK1-NEXT: [[TMP19:%.*]] = sdiv exact i64 [[TMP18]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP20:%.*]] = add nuw i64 [[TMP19]], 1
// CHECK1-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP22]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP20]], align 16
// CHECK1-NEXT: store i64 [[TMP20]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP20]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP23]]
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP3]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP6]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP7]], i64 9
// CHECK1-NEXT: [[TMP8:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP8]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP11:%.*]] = sub i64 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1
// CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP15:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP15]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP13]], align 16
// CHECK1-NEXT: store i64 [[TMP13]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP13]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP16]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP23]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP16]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = ptrtoint i8* [[TMP25]] to i64
// CHECK1-NEXT: [[TMP27:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP26]], [[TMP27]]
// CHECK1-NEXT: [[TMP29:%.*]] = sdiv exact i64 [[TMP28]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP29]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8* [[TMP30]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = ptrtoint i8* [[TMP18]] to i64
// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP21:%.*]] = sub i64 [[TMP19]], [[TMP20]]
// CHECK1-NEXT: [[TMP22:%.*]] = sdiv exact i64 [[TMP21]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP22]]
// CHECK1-NEXT: store i8** [[_TMP6]], i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[_TMP6]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP32]], i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP6]] to i8*
// CHECK1-NEXT: store i8* [[TMP34]], i8** [[TMP33]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP35]], align 8
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP38]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP40:%.*]] = bitcast i32* [[TMP39]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP40]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP42]], i64 0
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP43]], i64 0
// CHECK1-NEXT: [[TMP44:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP45:%.*]] = sext i32 [[TMP44]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP45]]
// CHECK1-NEXT: [[TMP46:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP46]], i64 9
// CHECK1-NEXT: [[TMP47:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP47]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP50:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP51:%.*]] = sub i64 [[TMP49]], [[TMP50]]
// CHECK1-NEXT: [[TMP52:%.*]] = sdiv exact i64 [[TMP51]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP53:%.*]] = add nuw i64 [[TMP52]], 1
// CHECK1-NEXT: [[TMP54:%.*]] = mul nuw i64 [[TMP53]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP54]], i64* [[TMP55]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..2 to i8*), i8** [[TMP56]], align 8
// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP57]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..3 to i8*), i8** [[TMP58]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[TMP60]], align 4
// CHECK1-NEXT: [[TMP62:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP63:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP61]], i32 1, i32 2, i8* [[TMP62]])
// CHECK1-NEXT: store i8* [[TMP63]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP64:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = load i32, i32* [[TMP64]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP65]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP66:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP66]], 9
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i32* [[TMP32]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8*, i8** [[TMP35]], i64 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i8*, i8** [[ARRAYIDX8]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, i8* [[TMP36]], i64 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP38:%.*]] = sext i32 [[TMP37]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP38]]
// CHECK1-NEXT: [[TMP39:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8*, i8** [[TMP39]], i64 9
// CHECK1-NEXT: [[TMP40:%.*]] = load i8*, i8** [[ARRAYIDX11]], align 8
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, i8* [[TMP40]], i64 [[LB_ADD_LEN10]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP34]], align 8
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX9]], i8** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint i8* [[ARRAYIDX12]] to i64
// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint i8* [[ARRAYIDX9]] to i64
// CHECK1-NEXT: [[TMP44:%.*]] = sub i64 [[TMP42]], [[TMP43]]
// CHECK1-NEXT: [[TMP45:%.*]] = sdiv exact i64 [[TMP44]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP46:%.*]] = add nuw i64 [[TMP45]], 1
// CHECK1-NEXT: [[TMP47:%.*]] = mul nuw i64 [[TMP46]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP47]], i64* [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..2 to i8*), i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..3 to i8*), i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = load i32, i32* [[TMP53]], align 4
// CHECK1-NEXT: [[TMP55:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP56:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP54]], i32 1, i32 2, i8* [[TMP55]])
// CHECK1-NEXT: store i8* [[TMP56]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP57:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i32, i32* [[TMP57]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP58]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP59]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP67]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP60]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP68]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP61]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP69:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP70:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP12:%.*]] = icmp sle i64 [[TMP69]], [[TMP70]]
// CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP62]], [[TMP63]]
// CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP71:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP71]], 1
// CHECK1-NEXT: [[TMP64:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP64]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]]
// CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP72]], align 8
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP73]], align 8
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP75:%.*]] = load i8**, i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8** [[TMP75]], i8*** [[TMP74]], align 8
// CHECK1-NEXT: [[TMP76:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = load i32, i32* [[TMP76]], align 4
// CHECK1-NEXT: [[TMP78:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP77]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP79:%.*]] = bitcast i8* [[TMP78]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP79]], i32 0, i32 0
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP80]], i32 0, i32 0
// CHECK1-NEXT: [[TMP82:%.*]] = load i8*, i8** [[TMP81]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = bitcast %struct.anon.1* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP82]], i8* align 8 [[TMP83]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP79]], i32 0, i32 1
// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP84]], i32 0, i32 0
// CHECK1-NEXT: [[TMP86:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP86]], i8** [[TMP85]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP88:%.*]] = load i32, i32* [[TMP87]], align 4
// CHECK1-NEXT: [[TMP89:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP88]], i8* [[TMP78]])
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP65]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP66]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP68:%.*]] = load i8**, i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8** [[TMP68]], i8*** [[TMP67]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP70:%.*]] = load i32, i32* [[TMP69]], align 4
// CHECK1-NEXT: [[TMP71:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP70]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP72:%.*]] = bitcast i8* [[TMP71]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP72]], i32 0, i32 0
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP73]], i32 0, i32 0
// CHECK1-NEXT: [[TMP75:%.*]] = load i8*, i8** [[TMP74]], align 8
// CHECK1-NEXT: [[TMP76:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP75]], i8* align 8 [[TMP76]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP72]], i32 0, i32 1
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP77]], i32 0, i32 0
// CHECK1-NEXT: [[TMP79:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP79]], i8** [[TMP78]], align 8
// CHECK1-NEXT: [[TMP80:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load i32, i32* [[TMP80]], align 4
// CHECK1-NEXT: [[TMP82:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP81]], i8* [[TMP71]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP90:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[TMP90]], 1
// CHECK1-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP83]], 1
// CHECK1-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP91:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = load i32, i32* [[TMP91]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP92]])
// CHECK1-NEXT: [[TMP84:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[TMP84]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP85]])
// CHECK1-NEXT: [[TMP86:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = load i32, i32* [[TMP86]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP87]], i32 1)
// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP89:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP89]], i8** [[TMP88]], align 8
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP90]], align 8
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP92:%.*]] = inttoptr i64 [[TMP13]] to i8*
// CHECK1-NEXT: store i8* [[TMP92]], i8** [[TMP91]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP94:%.*]] = load i32, i32* [[TMP93]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP94]], i32 1)
// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP96:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP96]], i8** [[TMP95]], align 8
// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP97]], align 8
// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP99:%.*]] = inttoptr i64 [[TMP20]] to i8*
// CHECK1-NEXT: store i8* [[TMP99]], i8** [[TMP98]], align 8
// CHECK1-NEXT: [[TMP100:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP101:%.*]] = load i32, i32* [[TMP100]], align 4
// CHECK1-NEXT: [[TMP102:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP103:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP101]], i32 2, i64 24, i8* [[TMP102]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP103]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP95:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP94]], i32 2, i64 24, i8* [[TMP95]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP96]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP104:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP105:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP104]], [[TMP105]]
// CHECK1-NEXT: store i32 [[ADD14]], i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP20]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP97:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP98:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP97]], [[TMP98]]
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP13]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP99]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST15:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[TMP108:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP108]] to i32
// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[CONV]], [[CONV16]]
// CHECK1-NEXT: [[CONV18:%.*]] = trunc i32 [[ADD17]] to i8
// CHECK1-NEXT: store i8 [[CONV18]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT19]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP100:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP100]] to i32
// CHECK1-NEXT: [[TMP101:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP101]] to i32
// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]]
// CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK1-NEXT: store i8 [[CONV19]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done21:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB4]], i32 [[TMP101]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP99]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done22:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB4]], i32 [[TMP94]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP109:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP110:%.*]] = atomicrmw add i32* [[TMP6]], i32 [[TMP109]] monotonic, align 4
// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP20]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP111]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY22]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY23:%.*]]
// CHECK1: omp.arraycpy.body23:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST24:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST25:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP112:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP112]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]] monotonic, align 1
// CHECK1-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP103:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP102]] monotonic, align 4
// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP13]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP104]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]]
// CHECK1: omp.arraycpy.body24:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP105:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP105]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP113:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP118:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP113]], i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[TMP114:%.*]] = load i8, i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP114]] to i32
// CHECK1-NEXT: [[TMP115:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP115]] to i32
// CHECK1-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV28]], [[CONV29]]
// CHECK1-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i8
// CHECK1-NEXT: store i8 [[CONV31]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP116:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP117:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP113]], i8 [[TMP116]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP118]] = extractvalue { i8, i1 } [[TMP117]], 0
// CHECK1-NEXT: [[TMP119:%.*]] = extractvalue { i8, i1 } [[TMP117]], 1
// CHECK1-NEXT: br i1 [[TMP119]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP106:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP111:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP106]], i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[TMP108:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP108]] to i32
// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]]
// CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK1-NEXT: store i8 [[CONV32]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP109:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP110:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP106]], i8 [[TMP109]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP111]] = extractvalue { i8, i1 } [[TMP110]], 0
// CHECK1-NEXT: [[TMP112:%.*]] = extractvalue { i8, i1 } [[TMP110]], 1
// CHECK1-NEXT: br i1 [[TMP112]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP111]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY23]]
// CHECK1: omp.arraycpy.done35:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP104]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]]
// CHECK1: omp.arraycpy.done36:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP120:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP120]])
// CHECK1-NEXT: [[TMP113:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP113]])
// CHECK1-NEXT: ret void
//
//
@ -550,7 +532,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -564,7 +546,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -577,29 +559,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -613,7 +595,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -119,84 +119,71 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37
// CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 4), !llvm.access.group !6
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8, !llvm.access.group !6
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8, !llvm.access.group !6
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8, !llvm.access.group !6
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8, !llvm.access.group !6
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]), !llvm.access.group !6
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group !6
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !6
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
@ -205,11 +192,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -219,64 +207,60 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
@ -287,84 +271,71 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
// CHECK1-SAME: () #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 3), !llvm.access.group !15
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8, !llvm.access.group !15
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8, !llvm.access.group !15
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8, !llvm.access.group !15
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8, !llvm.access.group !15
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]]), !llvm.access.group !15
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group !15
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
@ -373,11 +344,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -387,64 +359,60 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
@ -488,84 +456,71 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l29
// CHECK1-SAME: () #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 999
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]], i32 2), !llvm.access.group !21
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8, !llvm.access.group !21
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8, !llvm.access.group !21
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], i64** [[TMP12]], align 8, !llvm.access.group !21
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], i64** [[TMP13]], align 8, !llvm.access.group !21
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]]), !llvm.access.group !21
// CHECK1-NEXT: call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group !21
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !21
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !21
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
@ -574,11 +529,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -588,64 +544,60 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64*, i64** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 999, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP2]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 999
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 1000, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -140,94 +140,89 @@ int main() {
// CHECK1-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[CONV]], i32** [[TMP0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, i32* [[SIVAR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 0, i32* [[SIVAR1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[SIVAR]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !5
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 2, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
// CHECK1-NEXT: store i32 [[ADD4]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
@ -291,94 +286,89 @@ int main() {
// CHECK1-SAME: (i64 noundef [[T_VAR:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[CONV]], i32** [[TMP0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 0, i32* [[T_VAR1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[T_VAR]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !11
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 2, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
// CHECK1-NEXT: store i32 [[ADD4]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK1-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
@ -446,93 +436,88 @@ int main() {
// CHECK3-SAME: (i32 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK3-NEXT: store i32 [[SIVAR]], i32* [[SIVAR_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[SIVAR_ADDR]], i32** [[TMP0]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[SIVAR_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: store i32 0, i32* [[SIVAR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
// CHECK3-NEXT: store i32 0, i32* [[SIVAR1]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[SIVAR]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !6
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 2, i32* [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4
// CHECK3-NEXT: ret void
//
//
@ -595,93 +580,88 @@ int main() {
// CHECK3-SAME: (i32 noundef [[T_VAR:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK3-NEXT: store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[T_VAR_ADDR]], i32** [[TMP0]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK3-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: store i32 0, i32* [[T_VAR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
// CHECK3-NEXT: store i32 0, i32* [[T_VAR1]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[T_VAR]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !12
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: store i32 2, i32* [[I]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4
// CHECK3-NEXT: ret void
//
//
@ -906,98 +886,93 @@ int main() {
// CHECK9-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK9-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32*
// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK9-NEXT: store i32* [[CONV]], i32** [[TMP0]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK9-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
// CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK9-NEXT: store i32 0, i32* [[SIVAR]], align 4
// CHECK9-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
// CHECK9-NEXT: store i32 0, i32* [[SIVAR1]], align 4
// CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK9: cond.true:
// CHECK9-NEXT: br label [[COND_END:%.*]]
// CHECK9: cond.false:
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: br label [[COND_END]]
// CHECK9: cond.end:
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK9: omp.inner.for.cond:
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK9: omp.inner.for.body:
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], [[TMP11]]
// CHECK9-NEXT: store i32 [[ADD2]], i32* [[SIVAR]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK9-NEXT: store i32* [[SIVAR]], i32** [[TMP13]], align 8, !llvm.access.group !4
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
// CHECK9-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK9-NEXT: store i32* [[SIVAR1]], i32** [[TMP11]], align 8, !llvm.access.group !4
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK9: omp.body.continue:
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK9: omp.inner.for.inc:
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK9-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
// CHECK9: omp.inner.for.end:
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK9: omp.loop.exit:
// CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK9: .omp.final.then:
// CHECK9-NEXT: store i32 2, i32* [[I]], align 4
// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK9: .omp.final.done:
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK9-NEXT: store i32 [[ADD4]], i32* [[TMP2]], align 4
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR1]], align 4
// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
// CHECK9-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4
// CHECK9-NEXT: ret void
//
//

View File

@ -421,7 +421,6 @@ int main() {
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
@ -431,30 +430,22 @@ int main() {
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
// CHECK1-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[T_VAR]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK1-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP5]], %struct.S.0** [[TMP4]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP1]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done1:
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP7]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP3]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2StC2Ev
@ -523,11 +514,14 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR7:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR7:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
@ -536,128 +530,126 @@ int main() {
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[T_VAR3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[VEC4:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR5:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[AGG_TMP4:%.*]] = alloca [[STRUCT_ST]], align 4
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[AGG_TMP8:%.*]] = alloca [[STRUCT_ST]], align 4
// CHECK1-NEXT: [[_TMP9:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: [[TMP8:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP7]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP8]], %struct.S.0** [[TMP]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP9]], %struct.S.0** [[_TMP1]], align 8
// CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP3]], %struct.S.0** [[TMP]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP4]], %struct.S.0** [[_TMP1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [2 x i32]* [[TMP4]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i64 8, i1 false)
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast [2 x %struct.S.0]* [[TMP6]] to %struct.S.0*
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE3:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[T_VAR3]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8*
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 8, i1 false)
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP9]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]])
// CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]])
// CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE3]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done3:
// CHECK1-NEXT: [[TMP15:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8
// CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP4]])
// CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP15]], %struct.St* noundef [[AGG_TMP4]])
// CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP4]]) #[[ATTR2]]
// CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP5]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP18]], 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done6:
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8
// CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP8]])
// CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR7]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP10]], %struct.St* noundef [[AGG_TMP8]])
// CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP8]]) #[[ATTR2]]
// CHECK1-NEXT: store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP23]], 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP25]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 [[TMP24]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP5]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP27]] to i64
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM7]]
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast %struct.S.0* [[TMP26]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i64 4, i1 false)
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[T_VAR3]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC4]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 [[TMP19]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP9]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP22]] to i64
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i64 0, i64 [[IDXPROM11]]
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX12]] to i8*
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[TMP21]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
// CHECK1-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP32]])
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
// CHECK1-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i64 2
// CHECK1-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR7]]) #[[ATTR2]]
// CHECK1-NEXT: [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP33]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP28]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE11:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done11:
// CHECK1-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP35]])
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done15:
// CHECK1-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]])
// CHECK1-NEXT: ret void
//
//
@ -875,11 +867,11 @@ int main() {
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR5:[0-9]+]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
@ -890,80 +882,78 @@ int main() {
// CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[G1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** @g1, align 8
// CHECK3-NEXT: store i32* [[TMP3]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8
// CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load volatile i32, i32* @g, align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[G]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load volatile i32, i32* @g, align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[G1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[G]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4
// CHECK3-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: store i32 [[TMP6]], i32* [[SIVAR]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 1
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: store i32 1, i32* [[G]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK3-NEXT: store volatile i32 2, i32* [[TMP15]], align 4
// CHECK3-NEXT: store i32 3, i32* [[SIVAR]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G]], i32** [[TMP16]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP18:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK3-NEXT: store i32* [[TMP18]], i32** [[TMP17]], align 8
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[SIVAR]], i32** [[TMP19]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK3-NEXT: store volatile i32 2, i32* [[TMP13]], align 4
// CHECK3-NEXT: store i32 3, i32* [[SIVAR3]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G]], i32** [[TMP14]], align 8
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP16:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK3-NEXT: store i32* [[TMP16]], i32** [[TMP15]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[SIVAR3]], i32** [[TMP17]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]])
// CHECK3-NEXT: ret void
//
//
@ -1123,22 +1113,19 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5sivar)
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
@ -1149,59 +1136,57 @@ int main() {
// CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[G1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** @g1, align 8
// CHECK4-NEXT: store i32* [[TMP3]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8
// CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load volatile i32, i32* @g, align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[G]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load volatile i32, i32* @g, align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[G1]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[G]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4
// CHECK4-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK4-NEXT: store i32 [[TMP6]], i32* [[SIVAR]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 1
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: store i32 1, i32* [[G]], align 4
// CHECK4-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK4-NEXT: store volatile i32 1, i32* [[TMP15]], align 4
// CHECK4-NEXT: store i32 2, i32* [[SIVAR]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK4-NEXT: store volatile i32 1, i32* [[TMP13]], align 4
// CHECK4-NEXT: store i32 2, i32* [[SIVAR3]], align 4
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 0
// CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
// CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 1
@ -1213,34 +1198,34 @@ int main() {
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP16:%.*]] = load volatile i32, i32* [[G]], align 4
// CHECK4-NEXT: store volatile i32 [[TMP16]], i32* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED4:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK4-NEXT: store i32* [[TMP17]], i32** [[BLOCK_CAPTURED4]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK4-NEXT: store i32 [[TMP18]], i32* [[BLOCK_CAPTURED5]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP19]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP21:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP20]], align 8
// CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP23]](i8* noundef [[TMP21]])
// CHECK4-NEXT: [[TMP14:%.*]] = load volatile i32, i32* [[G]], align 4
// CHECK4-NEXT: store volatile i32 [[TMP14]], i32* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK4-NEXT: store i32* [[TMP15]], i32** [[BLOCK_CAPTURED5]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED6:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR3]], align 4
// CHECK4-NEXT: store i32 [[TMP16]], i32* [[BLOCK_CAPTURED6]], align 4
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP17]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP21]](i8* noundef [[TMP19]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP24]], 1
// CHECK4-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]])
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]])
// CHECK4-NEXT: ret void
//
//

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -116,9 +116,7 @@ int main() {
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
@ -129,8 +127,8 @@ int main() {
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float noundef 2.000000e+00)
// CHECK1-NEXT: store %struct.S* [[TEST]], %struct.S** [[VAR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
// CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
@ -141,8 +139,8 @@ int main() {
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done2:
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done1:
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
@ -172,11 +170,10 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S*, align 8
@ -193,8 +190,6 @@ int main() {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* undef, %struct.S** [[_TMP1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
@ -212,75 +207,75 @@ int main() {
// CHECK1: arrayctor.cont:
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: store %struct.S* [[VAR]], %struct.S** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP10]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load %struct.S*, %struct.S** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S*, %struct.S** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8*
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP11]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i64 4, i1 false)
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8*
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[TMP10]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false)
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
// CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 2
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done8:
// CHECK1-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP20]])
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP19]])
// CHECK1-NEXT: ret void
//
//
@ -295,11 +290,10 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -310,52 +304,50 @@ int main() {
// CHECK1-NEXT: [[I1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
@ -363,34 +355,33 @@ int main() {
// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_1:%.*]], align 4
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.1], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
// CHECK1-NEXT: store %struct.S.1* [[TEST]], %struct.S.1** [[VAR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
// CHECK1-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.1* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done1:
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
//
@ -430,181 +421,178 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.1], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_1:%.*]], align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.1* undef, %struct.S.1** [[_TMP1]], align 8
// CHECK1-NEXT: store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
// CHECK1: arrayctor.loop:
// CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.1* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
// CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYCTOR_CUR]], i64 1
// CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
// CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
// CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
// CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
// CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
// CHECK1: arrayctor.cont:
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: store %struct.S.1* [[VAR]], %struct.S.1** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP10]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load %struct.S.1*, %struct.S.1** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S.1* [[ARRAYIDX5]] to i8*
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S.1* [[TMP11]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i64 4, i1 false)
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5]] to i8*
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false)
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN7]], i64 2
// CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN7]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.1* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done8:
// CHECK1-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP20]])
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_1:%.*]], %struct.S.1* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32 0, i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_1:%.*]], %struct.S.1* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
@ -619,11 +607,10 @@ int main() {
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca double*, align 8
@ -640,69 +627,67 @@ int main() {
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: store double* undef, double** [[_TMP1]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: store double* [[G1]], double** [[_TMP2]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: store double 1.000000e+00, double* [[G]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK3-NEXT: store volatile double 1.000000e+00, double* [[TMP9]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK3-NEXT: store volatile double 1.000000e+00, double* [[TMP8]], align 8
// CHECK3-NEXT: store i32 3, i32* [[SVAR]], align 4
// CHECK3-NEXT: store float 4.000000e+00, float* [[SFVAR]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store double* [[G]], double** [[TMP10]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP12:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK3-NEXT: store double* [[TMP12]], double** [[TMP11]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[SVAR]], i32** [[TMP13]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
// CHECK3-NEXT: store float* [[SFVAR]], float** [[TMP14]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store double* [[G]], double** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP11:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK3-NEXT: store double* [[TMP11]], double** [[TMP10]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[SVAR]], i32** [[TMP12]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
// CHECK3-NEXT: store float* [[SFVAR]], float** [[TMP13]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK3-NEXT: ret void
//
//
@ -722,20 +707,18 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca double*, align 8
@ -752,44 +735,42 @@ int main() {
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: store double* undef, double** [[_TMP1]], align 8
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: store double* [[G1]], double** [[_TMP2]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: store double 1.000000e+00, double* [[G]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK4-NEXT: store volatile double 1.000000e+00, double* [[TMP9]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK4-NEXT: store volatile double 1.000000e+00, double* [[TMP8]], align 8
// CHECK4-NEXT: store i32 2, i32* [[SVAR]], align 4
// CHECK4-NEXT: store float 3.000000e+00, float* [[SFVAR]], align 4
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 0
@ -803,37 +784,37 @@ int main() {
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[TMP10:%.*]] = load volatile double, double* [[G]], align 8
// CHECK4-NEXT: store volatile double [[TMP10]], double* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = load volatile double, double* [[G]], align 8
// CHECK4-NEXT: store volatile double [[TMP9]], double* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED4:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP11:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK4-NEXT: store double* [[TMP11]], double** [[BLOCK_CAPTURED4]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load double*, double** [[_TMP2]], align 8
// CHECK4-NEXT: store double* [[TMP10]], double** [[BLOCK_CAPTURED4]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[SVAR]], align 4
// CHECK4-NEXT: store i32 [[TMP12]], i32* [[BLOCK_CAPTURED5]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[SVAR]], align 4
// CHECK4-NEXT: store i32 [[TMP11]], i32* [[BLOCK_CAPTURED5]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED6:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]], i32 0, i32 8
// CHECK4-NEXT: [[TMP13:%.*]] = load float, float* [[SFVAR]], align 4
// CHECK4-NEXT: store float [[TMP13]], float* [[BLOCK_CAPTURED6]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP14]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8* [[TMP17]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP18]](i8* noundef [[TMP16]])
// CHECK4-NEXT: [[TMP12:%.*]] = load float, float* [[SFVAR]], align 4
// CHECK4-NEXT: store float [[TMP12]], float* [[BLOCK_CAPTURED6]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, double*, i32, float }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP13]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP15:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP14]], align 8
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP17]](i8* noundef [[TMP15]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK4-NEXT: ret void
//
//

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -41,301 +41,295 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i8*** [[ARGV_ADDR]], i8**** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8***)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8*** [[ARGV_ADDR]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8***, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP27:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP28:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i8***, i8**** [[TMP3]], align 8
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8*** [[ARGV]], i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i8***, i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP5]], i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP8]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP9]], i64 9
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP10]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP15:%.*]] = add nuw i64 [[TMP14]], 1
// CHECK1-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP17:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP17]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP15]], align 16
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP15]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP18]]
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP5]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP6]], i64 9
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP7]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP12:%.*]] = add nuw i64 [[TMP11]], 1
// CHECK1-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP14:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP14]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP12]], align 16
// CHECK1-NEXT: store i64 [[TMP12]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP12]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP15]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP18]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP19:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint i8* [[TMP20]] to i64
// CHECK1-NEXT: [[TMP22:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP23:%.*]] = sub i64 [[TMP21]], [[TMP22]]
// CHECK1-NEXT: [[TMP24:%.*]] = sdiv exact i64 [[TMP23]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP24]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint i8* [[TMP17]] to i64
// CHECK1-NEXT: [[TMP19:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP20:%.*]] = sub i64 [[TMP18]], [[TMP19]]
// CHECK1-NEXT: [[TMP21:%.*]] = sdiv exact i64 [[TMP20]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP21]]
// CHECK1-NEXT: store i8** [[_TMP6]], i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8* [[TMP22]], i8** [[_TMP6]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i32* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP33]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i32* [[TMP34]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP35]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP37]], i64 0
// CHECK1-NEXT: [[TMP38:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP38]], i64 0
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP40:%.*]] = sext i32 [[TMP39]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP40]]
// CHECK1-NEXT: [[TMP41:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP41]], i64 9
// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP42]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP43]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]]
// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP48:%.*]] = add nuw i64 [[TMP47]], 1
// CHECK1-NEXT: [[TMP49:%.*]] = mul nuw i64 [[TMP48]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP49]], i64* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP54]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = load i32, i32* [[TMP55]], align 4
// CHECK1-NEXT: [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP58:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP56]], i32 1, i32 2, i8* [[TMP57]])
// CHECK1-NEXT: store i8* [[TMP58]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP59]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP60]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP61]], 9
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast i32* [[TMP31]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP32]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP34:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8*, i8** [[TMP34]], i64 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i8*, i8** [[ARRAYIDX8]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, i8* [[TMP35]], i64 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = sext i32 [[TMP36]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP37]]
// CHECK1-NEXT: [[TMP38:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8*, i8** [[TMP38]], i64 9
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[ARRAYIDX11]], align 8
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, i8* [[TMP39]], i64 [[LB_ADD_LEN10]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP33]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX9]], i8** [[TMP40]], align 8
// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint i8* [[ARRAYIDX12]] to i64
// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint i8* [[ARRAYIDX9]] to i64
// CHECK1-NEXT: [[TMP43:%.*]] = sub i64 [[TMP41]], [[TMP42]]
// CHECK1-NEXT: [[TMP44:%.*]] = sdiv exact i64 [[TMP43]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP45:%.*]] = add nuw i64 [[TMP44]], 1
// CHECK1-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP46]], i64* [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[TMP52]], align 4
// CHECK1-NEXT: [[TMP54:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP55:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], i32 1, i32 2, i8* [[TMP54]])
// CHECK1-NEXT: store i8* [[TMP55]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP57:%.*]] = load i32, i32* [[TMP56]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP57]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP58:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP58]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP62]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP59]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP63]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP60]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP64:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP12:%.*]] = icmp sle i64 [[TMP64]], [[TMP65]]
// CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP61]], [[TMP62]]
// CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP66:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP66]], 1
// CHECK1-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP63]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]]
// CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP67]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP70:%.*]] = load i8**, i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8** [[TMP70]], i8*** [[TMP69]], align 8
// CHECK1-NEXT: [[TMP71:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP72:%.*]] = load i32, i32* [[TMP71]], align 4
// CHECK1-NEXT: [[TMP73:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP72]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8* [[TMP73]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP74]], i32 0, i32 0
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP75]], i32 0, i32 0
// CHECK1-NEXT: [[TMP77:%.*]] = load i8*, i8** [[TMP76]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP77]], i8* align 8 [[TMP78]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP74]], i32 0, i32 1
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP79]], i32 0, i32 0
// CHECK1-NEXT: [[TMP81:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP81]], i8** [[TMP80]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i32, i32* [[TMP82]], align 4
// CHECK1-NEXT: [[TMP84:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP83]], i8* [[TMP73]])
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP64]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP65]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP67:%.*]] = load i8**, i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8** [[TMP67]], i8*** [[TMP66]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = load i32, i32* [[TMP68]], align 4
// CHECK1-NEXT: [[TMP70:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP69]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP71:%.*]] = bitcast i8* [[TMP70]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP71]], i32 0, i32 0
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP72]], i32 0, i32 0
// CHECK1-NEXT: [[TMP74:%.*]] = load i8*, i8** [[TMP73]], align 8
// CHECK1-NEXT: [[TMP75:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP74]], i8* align 8 [[TMP75]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP71]], i32 0, i32 1
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP76]], i32 0, i32 0
// CHECK1-NEXT: [[TMP78:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP78]], i8** [[TMP77]], align 8
// CHECK1-NEXT: [[TMP79:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP80:%.*]] = load i32, i32* [[TMP79]], align 4
// CHECK1-NEXT: [[TMP81:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP80]], i8* [[TMP70]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP85:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[TMP85]], 1
// CHECK1-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP82]], 1
// CHECK1-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP86:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = load i32, i32* [[TMP86]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP87]])
// CHECK1-NEXT: [[TMP88:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = load i32, i32* [[TMP88]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP89]], i32 1)
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP91:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: [[TMP83:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = load i32, i32* [[TMP83]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP84]])
// CHECK1-NEXT: [[TMP85:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP86:%.*]] = load i32, i32* [[TMP85]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP86]], i32 1)
// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP88:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP88]], i8** [[TMP87]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP89]], align 8
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP91:%.*]] = inttoptr i64 [[TMP12]] to i8*
// CHECK1-NEXT: store i8* [[TMP91]], i8** [[TMP90]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP92]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP94:%.*]] = inttoptr i64 [[TMP15]] to i8*
// CHECK1-NEXT: store i8* [[TMP94]], i8** [[TMP93]], align 8
// CHECK1-NEXT: [[TMP95:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[TMP95]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP98:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP96]], i32 2, i64 24, i8* [[TMP97]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP98]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP92:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = load i32, i32* [[TMP92]], align 4
// CHECK1-NEXT: [[TMP94:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP95:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP93]], i32 2, i64 24, i8* [[TMP94]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP95]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP99:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP100:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP99]], [[TMP100]]
// CHECK1-NEXT: store i32 [[ADD14]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP15]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP101]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP96]], [[TMP97]]
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP12]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST15:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP102:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP102]] to i32
// CHECK1-NEXT: [[TMP103:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP103]] to i32
// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[CONV]], [[CONV16]]
// CHECK1-NEXT: [[CONV18:%.*]] = trunc i32 [[ADD17]] to i8
// CHECK1-NEXT: store i8 [[CONV18]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT19]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP99]] to i32
// CHECK1-NEXT: [[TMP100:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP100]] to i32
// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]]
// CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK1-NEXT: store i8 [[CONV19]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP101]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done21:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB3]], i32 [[TMP96]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done22:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB3]], i32 [[TMP93]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP105:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP104]] monotonic, align 4
// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP15]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY22]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY23:%.*]]
// CHECK1: omp.arraycpy.body23:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST24:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST25:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]] monotonic, align 1
// CHECK1-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP102:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP101]] monotonic, align 4
// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP12]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP103]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]]
// CHECK1: omp.arraycpy.body24:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP104:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP104]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP108:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP113:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP108]], i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[TMP109:%.*]] = load i8, i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP109]] to i32
// CHECK1-NEXT: [[TMP110:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP110]] to i32
// CHECK1-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV28]], [[CONV29]]
// CHECK1-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i8
// CHECK1-NEXT: store i8 [[CONV31]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP111:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP112:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP108]], i8 [[TMP111]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP113]] = extractvalue { i8, i1 } [[TMP112]], 0
// CHECK1-NEXT: [[TMP114:%.*]] = extractvalue { i8, i1 } [[TMP112]], 1
// CHECK1-NEXT: br i1 [[TMP114]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP105:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP110:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP105]], i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[TMP106:%.*]] = load i8, i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP106]] to i32
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]]
// CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK1-NEXT: store i8 [[CONV32]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP108:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP109:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP105]], i8 [[TMP108]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP110]] = extractvalue { i8, i1 } [[TMP109]], 0
// CHECK1-NEXT: [[TMP111:%.*]] = extractvalue { i8, i1 } [[TMP109]], 1
// CHECK1-NEXT: br i1 [[TMP111]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY23]]
// CHECK1: omp.arraycpy.done35:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB3]], i32 [[TMP96]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP103]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]]
// CHECK1: omp.arraycpy.done36:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB3]], i32 [[TMP93]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP115:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP115]])
// CHECK1-NEXT: [[TMP116:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP117:%.*]] = load i32, i32* [[TMP116]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP117]])
// CHECK1-NEXT: [[TMP112:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP112]])
// CHECK1-NEXT: [[TMP113:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP114:%.*]] = load i32, i32* [[TMP113]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP114]])
// CHECK1-NEXT: ret void
//
//
@ -447,7 +441,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -461,7 +455,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -474,29 +468,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -510,7 +504,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

View File

@ -55,7 +55,6 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8
// CHECK1-NEXT: [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
@ -175,34 +174,24 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8*
// CHECK1-NEXT: [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]])
// CHECK1-NEXT: store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A]], i32** [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP61]], align 8
// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP62]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[DOTTASK_RED_6]], i8*** [[TMP63]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]])
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[TMP64:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP64]])
// CHECK1-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP59]])
// CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP65]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done8:
// CHECK1-NEXT: [[TMP66:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP66]]
// CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP61]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev
@ -479,71 +468,73 @@ int main(int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i16*, i16** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i8**, i8*** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i16* [[D]], i16** [[D_ADDR]], align 8
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8
// CHECK1-NEXT: store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP2]], i32** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP4]], i64* [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[TMP6]], i16** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP8]], i8*** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP19]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP20:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP21]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast %struct.anon.1* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP24]], i8* align 8 [[TMP25]], i64 40, i1 false)
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP21]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP26]], i32 0, i32 0
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP8]], align 8
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP26]], i32 0, i32 1
// CHECK1-NEXT: [[TMP30:%.*]] = load i8*, i8** [[TMP10]], align 8
// CHECK1-NEXT: store i8* [[TMP30]], i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 6
// CHECK1-NEXT: store i64 4, i64* [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP33]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 9
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP35]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP36:%.*]] = load i64, i64* [[TMP33]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i8* [[TMP20]], i32 1, i64* [[TMP31]], i64* [[TMP32]], i64 [[TMP36]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[TMP2]], i16** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP3]], i8*** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP13]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP14:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP18]], i8* align 8 [[TMP19]], i64 40, i1 false)
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 1
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP3]], align 8
// CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[TMP4]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 6
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 9
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i8* [[TMP14]], i32 1, i64* [[TMP25]], i64* [[TMP26]], i64 [[TMP30]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
@ -581,7 +572,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
@ -596,7 +587,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -625,9 +616,9 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP22]], i32 0, i32 1
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1
// CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
@ -635,14 +626,14 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
// CHECK1-NEXT: [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8*
// CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32*
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP22]], i32 0, i32 2
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2
// CHECK1-NEXT: [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2
// CHECK1-NEXT: [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)

View File

@ -55,7 +55,6 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8
// CHECK1-NEXT: [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
@ -175,34 +174,24 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8*
// CHECK1-NEXT: [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]])
// CHECK1-NEXT: store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A]], i32** [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP61]], align 8
// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP62]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[DOTTASK_RED_6]], i8*** [[TMP63]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]])
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[TMP64:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP64]])
// CHECK1-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP59]])
// CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP65]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done8:
// CHECK1-NEXT: [[TMP66:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP66]]
// CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP61]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev
@ -479,71 +468,73 @@ int main(int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i16*, i16** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i8**, i8*** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i16* [[D]], i16** [[D_ADDR]], align 8
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8
// CHECK1-NEXT: store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP2]], i32** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP4]], i64* [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[TMP6]], i16** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP8]], i8*** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP19]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP20:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP21]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast %struct.anon.1* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP24]], i8* align 8 [[TMP25]], i64 40, i1 false)
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP21]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP26]], i32 0, i32 0
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP8]], align 8
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP26]], i32 0, i32 1
// CHECK1-NEXT: [[TMP30:%.*]] = load i8*, i8** [[TMP10]], align 8
// CHECK1-NEXT: store i8* [[TMP30]], i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 6
// CHECK1-NEXT: store i64 4, i64* [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP33]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP22]], i32 0, i32 9
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP35]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP36:%.*]] = load i64, i64* [[TMP33]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i8* [[TMP20]], i32 1, i64* [[TMP31]], i64* [[TMP32]], i64 [[TMP36]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]])
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: store i16* [[TMP2]], i16** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP3]], i8*** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4
// CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP13]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP14:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP18]], i8* align 8 [[TMP19]], i64 40, i1 false)
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 1
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP3]], align 8
// CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[TMP4]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 6
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 9
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i8* [[TMP14]], i32 1, i64* [[TMP25]], i64* [[TMP26]], i64 [[TMP30]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
@ -581,7 +572,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
@ -596,7 +587,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -625,9 +616,9 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP22]], i32 0, i32 1
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1
// CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
@ -635,14 +626,14 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
// CHECK1-NEXT: [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0
// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8*
// CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32*
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP22]], i32 0, i32 2
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2
// CHECK1-NEXT: [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2
// CHECK1-NEXT: [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple aarch64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple ppc64le-unknown-linux -emit-llvm %s -o - | FileCheck %s
@ -8,26 +7,6 @@
void bar(void);
// CHECK-LABEL: @foo(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_3:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_4:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_5:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_6:%.*]] = alloca [[STRUCT_ANON_5:%.*]], align 1
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 4)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_3]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_4]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.5*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.anon.5* [[OMP_OUTLINED_ARG_AGG_6]])
// CHECK-NEXT: ret void
//
void foo(void) {
#pragma omp metadirective when(device = {kind(any)} \
: parallel)
@ -56,12 +35,46 @@ void foo(void) {
;
}
// CHECK-LABEL: define {{.+}} void @foo()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_1:@.+]] to void
// CHECK-NEXT: @__kmpc_push_num_threads
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_1]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple aarch64-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple ppc64le-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
@ -9,26 +8,6 @@
void bar();
// CHECK-LABEL: @_Z3foov(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_3:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_4:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_5:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_6:%.*]] = alloca [[STRUCT_ANON_5:%.*]], align 1
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 4)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_3]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_4]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.5*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.anon.5* [[OMP_OUTLINED_ARG_AGG_6]])
// CHECK-NEXT: ret void
//
void foo() {
#pragma omp metadirective when(device = {kind(any)} \
: parallel)
@ -57,12 +36,46 @@ void foo() {
;
}
// CHECK-LABEL: define {{.+}} void @_Z3foov()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_1:@.+]] to void
// CHECK-NEXT: @__kmpc_push_num_threads
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_1]](
// CHECK: void @_Z3barv()
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: void @_Z3barv()
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// CHECK: void @_Z3barv()
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: void @_Z3barv()
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple aarch64-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple ppc64le-unknown-linux -emit-llvm %s -fexceptions -fcxx-exceptions -o - -fsanitize-address-use-after-scope | FileCheck %s
@ -9,22 +8,6 @@
void bar();
// CHECK-LABEL: @_Z3foov(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_3:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_4:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_5:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_3]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_4]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]])
// CHECK-NEXT: ret void
//
void foo() {
#pragma omp metadirective when(implementation = {vendor(score(0) \
: llvm)}, \
@ -54,15 +37,40 @@ void foo() {
;
}
// CHECK-LABEL: void @_Z3foov()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: @_Z3barv
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: @_Z3barv
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: @_Z3barv
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// NO-CHECK: call void @__kmpc_for_static_init
// NO-CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// NO-CHECK: call void @__kmpc_for_static_init
// NO-CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif

View File

@ -91,7 +91,7 @@ void bar() {
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 2, i32* @_ZZ4mainE1a, align 4
// CHECK1-NEXT: store double 3.000000e+00, double* [[B]], align 8
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooIiET_v() #[[ATTR9:[0-9]+]]
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooIiET_v() #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: ret i32 [[CALL]]
//
//
@ -109,35 +109,25 @@ void bar() {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[BAR_A:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[BAR_B:%.*]] = alloca double, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP1]], i8* [[TMP2]])
// CHECK1-NEXT: [[TMP4:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP3]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP4]], %struct.anon* [[TMP5]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP3]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP2]], i64 1)
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP1]], i64 0)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[BAR_A:%.*]] = alloca float, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load float, float* [[BAR_A]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP1]] to double
// CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[BAR_A]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
// CHECK1-NEXT: store double [[CONV]], double* addrspacecast (double addrspace(3)* @bar_b to double*), align 8
// CHECK1-NEXT: call void @_Z3bazRf(float* noundef nonnull align 4 dereferenceable(4) [[BAR_A]]) #[[ATTR9]]
// CHECK1-NEXT: call void @_Z3bazRf(float* noundef nonnull align 4 dereferenceable(4) [[BAR_A]]) #[[ATTR7]]
// CHECK1-NEXT: ret void
//
//
@ -147,11 +137,11 @@ void bar() {
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* null) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR5:[0-9]+]]
// CHECK1-NEXT: ret void
//

View File

@ -33,11 +33,9 @@ void test_ds(){
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l14
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK-NEXT: [[DOTTMP_OUTLINED_AGG_ARG2:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [2 x i8*], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
@ -48,30 +46,21 @@ void test_ds(){
// CHECK-NEXT: [[B_ON_STACK:%.*]] = bitcast i8* [[B]] to i32*
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: store i32 10, i32* [[A_ON_STACK]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32* [[A_ON_STACK]], i32** [[TMP2]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK-NEXT: [[TMP4:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP3]], i8* [[TMP4]])
// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP5]] to %struct.anon*
// CHECK-NEXT: store [[STRUCT_ANON]] [[TMP6]], %struct.anon* [[TMP7]], align 8
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP5]])
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[TMP4]], i64 8)
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
// CHECK-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP4]], i64 1)
// CHECK-NEXT: store i32 100, i32* [[B_ON_STACK]], align 4
// CHECK-NEXT: store i32 1000, i32* [[C]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 0
// CHECK-NEXT: store i32* [[B_ON_STACK]], i32** [[TMP8]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 1
// CHECK-NEXT: store i32* [[A_ON_STACK]], i32** [[TMP9]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG2]] to i8*
// CHECK-NEXT: [[TMP11:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 16)
// CHECK-NEXT: [[TMP12:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP10]], i8* [[TMP11]])
// CHECK-NEXT: [[TMP13:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], align 8
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP12]] to %struct.anon.0*
// CHECK-NEXT: store [[STRUCT_ANON_0]] [[TMP13]], %struct.anon.0* [[TMP14]], align 8
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP12]])
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[TMP11]], i64 16)
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 0
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[B_ON_STACK]] to i8*
// CHECK-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 1
// CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
// CHECK-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP9]], i64 2)
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[B]], i64 4)
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[A]], i64 4)
// CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
@ -81,18 +70,16 @@ void test_ds(){
//
//
// CHECK-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK-NEXT: store i32 1000, i32* [[TMP2]], align 4
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32 1000, i32* [[TMP0]], align 4
// CHECK-NEXT: ret void
//
//
@ -102,38 +89,38 @@ void test_ds(){
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon**
// CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.anon** [[TMP3]] to %struct.anon*
// CHECK-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* [[TMP4]]) #[[ATTR4:[0-9]+]]
// CHECK-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR4:[0-9]+]]
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[C1:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[C]], i32** [[C1]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 10000
// CHECK-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10000
// CHECK-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK-NEXT: ret void
//
//
@ -143,14 +130,18 @@ void test_ds(){
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR4]]
// CHECK-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
// CHECK-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
// CHECK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
// CHECK-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], i32* [[TMP8]]) #[[ATTR4]]
// CHECK-NEXT: ret void
//

View File

@ -155,20 +155,20 @@ int main()
// CHECK: define internal void @{{.+}}omp_loop_ref{{.+}}(
// CHECK: [[BODY:%body.addr]] = alloca %class.anon*
// CHECK: [[TMP:%tmp]] = alloca %class.anon*
// CHECK: [[BODY_REF:%body_ref]] = alloca %class.anon.6*
// CHECK: [[REF_TMP:%ref.tmp]] = alloca %class.anon.6
// CHECK: [[TMP8:%tmp.+]] = alloca %class.anon.6*
// CHECK: [[BODY_REF:%body_ref]] = alloca %class.anon.1*
// CHECK: [[REF_TMP:%ref.tmp]] = alloca %class.anon.1
// CHECK: [[TMP8:%tmp.+]] = alloca %class.anon.1*
// CHECK: [[L0:%.+]] = load %class.anon*, %class.anon** [[BODY]]
// CHECK: store %class.anon* [[L0]], %class.anon** [[TMP]]
// CHECK: [[L5:%.+]] = load %class.anon*, %class.anon** [[TMP]]
// CHECK-NOT [[L6:%.+]] = load %class.anon*, %class.anon** [[TMP]]
// CHECK-NOT [[L7:%.+]] = load %class.anon*, %class.anon** [[TMP]]
// CHECK: store %class.anon.6* [[REF_TMP]], %class.anon.6** [[BODY_REF]]
// CHECK:[[L47:%.+]] = load %class.anon.6*, %class.anon.6** [[BODY_REF]]
// CHECK: store %class.anon.6* [[L47]], %class.anon.6** [[TMP8]]
// CHECK: [[L48:%.+]] = load %class.anon.6*, %class.anon.6** [[TMP8]]
// CHECK-NOT: [[L49:%.+]] = load %class.anon.6*, %class.anon.6** [[TMP8]]
// CHECK-NOT: [[L50:%.+]] = load %class.anon.6*, %class.anon.6** [[TMP8]]
// CHECK: store %class.anon.1* [[REF_TMP]], %class.anon.1** [[BODY_REF]]
// CHECK:[[L47:%.+]] = load %class.anon.1*, %class.anon.1** [[BODY_REF]]
// CHECK: store %class.anon.1* [[L47]], %class.anon.1** [[TMP8]]
// CHECK: [[L48:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
// CHECK-NOT: [[L49:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
// CHECK-NOT: [[L50:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
// CHECK: ret void
// CHECK: define internal void @{{.+}}xoo{{.+}}(

View File

@ -28,21 +28,14 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l21
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK1-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP5]], %struct.anon* [[TMP6]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i64 1)
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP2]], i64 0)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -50,44 +43,34 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3usev() #[[ATTR10:[0-9]+]]
// CHECK1-NEXT: call void @_Z3usev() #[[ATTR8:[0-9]+]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z3usev
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP1]], i8* [[TMP2]])
// CHECK1-NEXT: [[TMP4:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP3]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP4]], %struct.anon.0* [[TMP5]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP3]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP2]], i64 1)
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP1]], i64 0)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l23
// CHECK1-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: call void @_Z3usev() #[[ATTR10]]
// CHECK1-NEXT: call void @_Z3usev() #[[ATTR8]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -95,52 +78,42 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z4workv() #[[ATTR10]]
// CHECK1-NEXT: call void @_Z4workv() #[[ATTR8]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
// CHECK1-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] {
// CHECK1-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* null) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l21
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK2-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP5]], %struct.anon* [[TMP6]], align 1
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP4]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i32 1)
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP2]], i32 0)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -148,44 +121,34 @@ int main() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: call void @_Z3usev() #[[ATTR10:[0-9]+]]
// CHECK2-NEXT: call void @_Z3usev() #[[ATTR8:[0-9]+]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3usev
// CHECK2-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK2-NEXT: [[TMP1:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP1]], i8* [[TMP2]])
// CHECK2-NEXT: [[TMP4:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP3]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP4]], %struct.anon.0* [[TMP5]], align 1
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP3]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP2]], i32 1)
// CHECK2-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP1]], i32 0)
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l23
// CHECK2-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK2-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: call void @_Z3usev() #[[ATTR10]]
// CHECK2-NEXT: call void @_Z3usev() #[[ATTR8]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -193,30 +156,27 @@ int main() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: call void @_Z4workv() #[[ATTR10]]
// CHECK2-NEXT: call void @_Z4workv() #[[ATTR8]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
// CHECK2-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] {
// CHECK2-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* null) #[[ATTR4:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: ret void
//

View File

@ -37,8 +37,7 @@ int main() {
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
@ -46,17 +45,12 @@ int main() {
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR9:[0-9]+]]
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP5]], i64 1)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -67,36 +61,28 @@ int main() {
// CHECK1-SAME: (i32* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32** [[C_ADDR]], i32*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK1-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP5]], %struct.anon.0* [[TMP6]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i64 8)
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast i32** [[C_ADDR]] to i8*
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32**)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: call void @_Z3usePi(i32* noundef [[TMP2]]) #[[ATTR9]]
// CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR7]]
// CHECK1-NEXT: ret void
//
//
@ -106,32 +92,31 @@ int main() {
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon**
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon** [[TMP3]] to %struct.anon*
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* [[TMP4]]) #[[ATTR5:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK1-NEXT: call void @_Z4workPi(i32* noundef [[TMP3]]) #[[ATTR9]]
// CHECK1-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK1-NEXT: call void @_Z4workPi(i32* noundef [[TMP1]]) #[[ATTR7]]
// CHECK1-NEXT: ret void
//
//
@ -145,7 +130,7 @@ int main() {
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast i32* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: call void @__atomic_load(i64 noundef 4, i8* noundef [[TMP1]], i8* noundef [[TMP2]], i32 noundef 0) #[[ATTR9]]
// CHECK1-NEXT: call void @__atomic_load(i64 noundef 4, i8* noundef [[TMP1]], i8* noundef [[TMP2]], i32 noundef 0) #[[ATTR7]]
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ATOMIC_TEMP]], align 4
@ -154,7 +139,7 @@ int main() {
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[ATOMIC_TEMP1]] to i8*
// CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 4, i8* noundef [[TMP4]], i8* noundef [[TMP5]], i8* noundef [[TMP6]], i32 noundef 0, i32 noundef 0) #[[ATTR9]]
// CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 4, i8* noundef [[TMP4]], i8* noundef [[TMP5]], i8* noundef [[TMP6]], i32 noundef 0, i32 noundef 0) #[[ATTR7]]
// CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: ret void
@ -166,15 +151,16 @@ int main() {
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR5]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32***
// CHECK1-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[TMP4]], align 8
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32** [[TMP5]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
@ -182,8 +168,7 @@ int main() {
// CHECK2-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
@ -191,17 +176,12 @@ int main() {
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR9:[0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR7:[0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP5]], i32 1)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -212,36 +192,28 @@ int main() {
// CHECK2-SAME: (i32* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32** [[C_ADDR]], i32*** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK2-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP5]], %struct.anon.0* [[TMP6]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP4]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i32 4)
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast i32** [[C_ADDR]] to i8*
// CHECK2-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32**)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 1)
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: call void @_Z3usePi(i32* noundef [[TMP2]]) #[[ATTR9]]
// CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 4
// CHECK2-NEXT: call void @_Z3usePi(i32* noundef [[TMP0]]) #[[ATTR7]]
// CHECK2-NEXT: ret void
//
//
@ -251,32 +223,31 @@ int main() {
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon**
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon** [[TMP3]] to %struct.anon*
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* [[TMP4]]) #[[ATTR5:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR4:[0-9]+]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 4
// CHECK2-NEXT: call void @_Z4workPi(i32* noundef [[TMP3]]) #[[ATTR9]]
// CHECK2-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 4
// CHECK2-NEXT: call void @_Z4workPi(i32* noundef [[TMP1]]) #[[ATTR7]]
// CHECK2-NEXT: ret void
//
//
@ -290,7 +261,7 @@ int main() {
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[C_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast i32* [[ATOMIC_TEMP]] to i8*
// CHECK2-NEXT: call void @__atomic_load(i32 noundef 4, i8* noundef [[TMP1]], i8* noundef [[TMP2]], i32 noundef 0) #[[ATTR9]]
// CHECK2-NEXT: call void @__atomic_load(i32 noundef 4, i8* noundef [[TMP1]], i8* noundef [[TMP2]], i32 noundef 0) #[[ATTR7]]
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK2: atomic_cont:
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ATOMIC_TEMP]], align 4
@ -299,7 +270,7 @@ int main() {
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i32* [[ATOMIC_TEMP]] to i8*
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i32* [[ATOMIC_TEMP1]] to i8*
// CHECK2-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i32 noundef 4, i8* noundef [[TMP4]], i8* noundef [[TMP5]], i8* noundef [[TMP6]], i32 noundef 0, i32 noundef 0) #[[ATTR9]]
// CHECK2-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i32 noundef 4, i8* noundef [[TMP4]], i8* noundef [[TMP5]], i8* noundef [[TMP6]], i32 noundef 0, i32 noundef 0) #[[ATTR7]]
// CHECK2-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK2: atomic_exit:
// CHECK2-NEXT: ret void
@ -311,14 +282,15 @@ int main() {
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR5]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32***
// CHECK2-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[TMP4]], align 4
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32** [[TMP5]]) #[[ATTR4]]
// CHECK2-NEXT: ret void
//

View File

@ -77,12 +77,9 @@ int bar(int n){
// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG2:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_3:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG4:%.*]] = alloca [[STRUCT_ANON_1]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
@ -90,32 +87,14 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = call align 16 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK1-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP5]], %struct.anon* [[TMP6]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i64 1)
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG2]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = call align 16 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP7]], i8* [[TMP8]])
// CHECK1-NEXT: [[TMP10:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], align 1
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP9]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP10]], %struct.anon.0* [[TMP11]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP9]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP8]], i64 1)
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.anon.1* [[DOTTMP_OUTLINED_AGG_ARG4]] to i8*
// CHECK1-NEXT: [[TMP13:%.*]] = call align 16 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP14:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP12]], i8* [[TMP13]])
// CHECK1-NEXT: [[TMP15:%.*]] = load [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_3]], align 1
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP14]] to %struct.anon.1*
// CHECK1-NEXT: store [[STRUCT_ANON_1]] [[TMP15]], %struct.anon.1* [[TMP16]], align 1
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.1*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8* [[TMP14]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP13]], i64 1)
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i64 0)
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i64 0)
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i64 0)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 4
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
@ -124,16 +103,13 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 42, i32* [[A]], align 4
// CHECK1-NEXT: ret void
//
@ -144,26 +120,23 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* null) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 43, i32* [[A]], align 4
// CHECK1-NEXT: ret void
//
@ -174,26 +147,23 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* null) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 44, i32* [[A]], align 4
// CHECK1-NEXT: ret void
//
@ -204,12 +174,12 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.1* null) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
// CHECK1-NEXT: ret void
//
//
@ -220,8 +190,7 @@ int bar(int n){
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_2]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
// CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
@ -235,28 +204,22 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast %struct.anon.2* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP4:%.*]] = call align 16 i8* @__kmpc_alloc_shared(i64 1)
// CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP3]], i8* [[TMP4]])
// CHECK1-NEXT: [[TMP6:%.*]] = load [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP5]] to %struct.anon.2*
// CHECK1-NEXT: store [[STRUCT_ANON_2]] [[TMP6]], %struct.anon.2* [[TMP7]], align 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1000
// CHECK1-NEXT: [[TMP9:%.*]] = zext i1 [[CMP]] to i32
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP9]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.2*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8* [[TMP5]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP4]], i64 1)
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000
// CHECK1-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i64 0)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV1]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i16, i16* [[CONV2]], align 2
// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP11]] to i32
// CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV2]], align 2
// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP7]] to i32
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
// CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
// CHECK1-NEXT: store i16 [[CONV5]], i16* [[CONV2]], align 2
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
@ -265,20 +228,17 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 45, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
@ -288,12 +248,12 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.2* null) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
// CHECK1-NEXT: ret void
//
//
@ -301,8 +261,7 @@ int bar(int n){
// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_3]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
@ -314,18 +273,13 @@ int bar(int n){
// CHECK1-NEXT: [[A_ON_STACK:%.*]] = bitcast i8* [[A1]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[A_ON_STACK]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A_ON_STACK]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 16 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.3*
// CHECK1-NEXT: store [[STRUCT_ANON_3]] [[TMP7]], %struct.anon.3* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.3*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ON_STACK]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP5]], i64 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ON_STACK]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[A_ON_STACK]], align 4
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[A1]], i64 4)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
@ -335,44 +289,42 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = call i64 @__kmpc_warp_active_thread_mask()
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i64 @__kmpc_warp_active_thread_mask()
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
// CHECK1-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]]
// CHECK1: omp.critical.loop:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], [[NVPTX_NUM_THREADS]]
// CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], [[NVPTX_NUM_THREADS]]
// CHECK1-NEXT: br i1 [[TMP4]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]]
// CHECK1: omp.critical.test:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP4]], [[TMP7]]
// CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP2]], [[TMP5]]
// CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]]
// CHECK1: omp.critical.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK1-NEXT: br label [[OMP_CRITICAL_SYNC]]
// CHECK1: omp.critical.sync:
// CHECK1-NEXT: call void @__kmpc_syncwarp(i64 [[TMP3]])
// CHECK1-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK1-NEXT: store i32 [[TMP12]], i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: call void @__kmpc_syncwarp(i64 [[TMP1]])
// CHECK1-NEXT: [[TMP10:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[CRITICAL_COUNTER]], align 4
// CHECK1-NEXT: br label [[OMP_CRITICAL_LOOP]]
// CHECK1: omp.critical.exit:
// CHECK1-NEXT: ret void
@ -384,15 +336,16 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.3**
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3** [[TMP3]] to %struct.anon.3*
// CHECK1-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.3* [[TMP4]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK1-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR3]]
// CHECK1-NEXT: ret void
//
//
@ -400,44 +353,23 @@ int bar(int n){
// CHECK2-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG2:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_3:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG4:%.*]] = alloca [[STRUCT_ANON_1]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP3:%.*]] = call align 4 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP2]], i8* [[TMP3]])
// CHECK2-NEXT: [[TMP5:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP4]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP5]], %struct.anon* [[TMP6]], align 1
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP4]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP3]], i32 1)
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG2]] to i8*
// CHECK2-NEXT: [[TMP8:%.*]] = call align 4 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP7]], i8* [[TMP8]])
// CHECK2-NEXT: [[TMP10:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], align 1
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP9]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP10]], %struct.anon.0* [[TMP11]], align 1
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP9]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP8]], i32 1)
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast %struct.anon.1* [[DOTTMP_OUTLINED_AGG_ARG4]] to i8*
// CHECK2-NEXT: [[TMP13:%.*]] = call align 4 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP14:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP12]], i8* [[TMP13]])
// CHECK2-NEXT: [[TMP15:%.*]] = load [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_3]], align 1
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP14]] to %struct.anon.1*
// CHECK2-NEXT: store [[STRUCT_ANON_1]] [[TMP15]], %struct.anon.1* [[TMP16]], align 1
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.1*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8* [[TMP14]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP13]], i32 1)
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i32 0)
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 0)
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i32 0)
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
@ -446,16 +378,13 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: store i32 42, i32* [[A]], align 4
// CHECK2-NEXT: ret void
//
@ -466,26 +395,23 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* null) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: store i32 43, i32* [[A]], align 4
// CHECK2-NEXT: ret void
//
@ -496,26 +422,23 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* null) #[[ATTR3]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: store i32 44, i32* [[A]], align 4
// CHECK2-NEXT: ret void
//
@ -526,12 +449,12 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.1* null) #[[ATTR3]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]]
// CHECK2-NEXT: ret void
//
//
@ -542,8 +465,7 @@ int bar(int n){
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_2]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
@ -555,28 +477,22 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast %struct.anon.2* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP4:%.*]] = call align 4 i8* @__kmpc_alloc_shared(i32 1)
// CHECK2-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP3]], i8* [[TMP4]])
// CHECK2-NEXT: [[TMP6:%.*]] = load [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP5]] to %struct.anon.2*
// CHECK2-NEXT: store [[STRUCT_ANON_2]] [[TMP6]], %struct.anon.2* [[TMP7]], align 1
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1000
// CHECK2-NEXT: [[TMP9:%.*]] = zext i1 [[CMP]] to i32
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP9]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.2*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8* [[TMP5]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP4]], i32 1)
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000
// CHECK2-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i32 0)
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = sext i16 [[TMP11]] to i32
// CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = sext i16 [[TMP7]] to i32
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
// CHECK2-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 2
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK2-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
@ -585,20 +501,17 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__3
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: store i32 45, i32* [[A]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK2-NEXT: ret void
//
//
@ -608,12 +521,12 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.2* null) #[[ATTR3]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]]
// CHECK2-NEXT: ret void
//
//
@ -621,8 +534,7 @@ int bar(int n){
// CHECK2-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_3]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
@ -633,18 +545,13 @@ int bar(int n){
// CHECK2-NEXT: [[A_ON_STACK:%.*]] = bitcast i8* [[A1]] to i32*
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[A_ON_STACK]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[A_ON_STACK]], i32** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 4 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.3*
// CHECK2-NEXT: store [[STRUCT_ANON_3]] [[TMP7]], %struct.anon.3* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.3*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ON_STACK]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
// CHECK2-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP5]], i32 1)
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ON_STACK]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[A_ON_STACK]], align 4
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[A1]], i32 4)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
@ -654,44 +561,42 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__4
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = call i64 @__kmpc_warp_active_thread_mask()
// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = call i64 @__kmpc_warp_active_thread_mask()
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
// CHECK2-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]]
// CHECK2: omp.critical.loop:
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], [[NVPTX_NUM_THREADS]]
// CHECK2-NEXT: br i1 [[TMP6]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], [[NVPTX_NUM_THREADS]]
// CHECK2-NEXT: br i1 [[TMP4]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]]
// CHECK2: omp.critical.test:
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP4]], [[TMP7]]
// CHECK2-NEXT: br i1 [[TMP8]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP2]], [[TMP5]]
// CHECK2-NEXT: br i1 [[TMP6]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]]
// CHECK2: omp.critical.body:
// CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP2]], align 4
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @"_gomp_critical_user_$var")
// CHECK2-NEXT: br label [[OMP_CRITICAL_SYNC]]
// CHECK2: omp.critical.sync:
// CHECK2-NEXT: call void @__kmpc_syncwarp(i64 [[TMP3]])
// CHECK2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK2-NEXT: store i32 [[TMP12]], i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: call void @__kmpc_syncwarp(i64 [[TMP1]])
// CHECK2-NEXT: [[TMP10:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK2-NEXT: store i32 [[TMP10]], i32* [[CRITICAL_COUNTER]], align 4
// CHECK2-NEXT: br label [[OMP_CRITICAL_LOOP]]
// CHECK2: omp.critical.exit:
// CHECK2-NEXT: ret void
@ -703,14 +608,15 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.3**
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3** [[TMP3]] to %struct.anon.3*
// CHECK2-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.3* [[TMP4]]) #[[ATTR3]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4
// CHECK2-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]]
// CHECK2-NEXT: ret void
//

View File

@ -37,8 +37,7 @@ int bar(int n){
// CHECK-NEXT: entry:
// CHECK-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
// CHECK-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
@ -52,21 +51,17 @@ int bar(int n){
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK-NEXT: store i32 [[TMP3]], i32* [[D_ON_STACK]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[TMP4]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: store i32* [[D_ON_STACK]], i32** [[TMP5]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK-NEXT: [[TMP7:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 16)
// CHECK-NEXT: [[TMP8:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP6]], i8* [[TMP7]])
// CHECK-NEXT: [[TMP9:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP8]] to %struct.anon*
// CHECK-NEXT: store [[STRUCT_ANON]] [[TMP9]], %struct.anon* [[TMP10]], align 8
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8* [[TMP8]])
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[TMP7]], i64 16)
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK-NEXT: [[TMP5:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8*
// CHECK-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[D_ON_STACK]] to i8*
// CHECK-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP8]], i64 2)
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: call void @__kmpc_free_shared(i8* [[D]], i64 4)
// CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
@ -76,11 +71,12 @@ int bar(int n){
//
//
// CHECK-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -90,79 +86,77 @@ int bar(int n){
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK: omp.dispatch.cond:
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 9
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK: omp.dispatch.body:
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], [[TMP15]]
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]]
// CHECK-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK: omp.dispatch.inc:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
// CHECK-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK: omp.dispatch.end:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]])
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
// CHECK-NEXT: ret void
//
//
@ -172,14 +166,18 @@ int bar(int n){
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon**
// CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.anon** [[TMP3]] to %struct.anon*
// CHECK-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon* [[TMP4]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]**
// CHECK-NEXT: [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
// CHECK-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
// CHECK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
// CHECK-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: ret void
//

View File

@ -149,8 +149,7 @@ void unreachable_call() {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[PTR1_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[PTR2_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: store i32* [[PTR1]], i32** [[PTR1_ADDR]], align 8
// CHECK1-NEXT: store i32** [[PTR2]], i32*** [[PTR2_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[PTR2_ADDR]], align 8
@ -159,18 +158,14 @@ void unreachable_call() {
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32** [[PTR1_ADDR]], i32*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32** [[TMP0]], i32*** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP6:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 16)
// CHECK1-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP5]], i8* [[TMP6]])
// CHECK1-NEXT: [[TMP8:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP8]], %struct.anon* [[TMP9]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP7]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP6]], i64 16)
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32** [[PTR1_ADDR]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32** [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32**, i32**)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP7]], i64 2)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -178,28 +173,27 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32** nonnull align 8 dereferenceable(8) [[PTR1:%.*]], i32** nonnull align 8 dereferenceable(8) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[PTR1_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[PTR2_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[TMP7]], align 4
// CHECK1-NEXT: store i32** [[PTR1]], i32*** [[PTR1_ADDR]], align 8
// CHECK1-NEXT: store i32** [[PTR2]], i32*** [[PTR2_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[PTR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[PTR2_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[TMP4]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39
// CHECK1-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
@ -212,7 +206,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47
// CHECK1-SAME: (i64 [[AA:%.*]]) #[[ATTR6]] {
// CHECK1-SAME: (i64 [[AA:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
@ -238,7 +232,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53
// CHECK1-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR6]] {
// CHECK1-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
@ -318,7 +312,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2TTIxcEixEi
// CHECK1-SAME: (%struct.TT* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR7:[0-9]+]] comdat align 2 {
// CHECK1-SAME: (%struct.TT* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR5:[0-9]+]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.TT*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
@ -330,7 +324,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR6]] {
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
@ -372,7 +366,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108
// CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR6]] {
// CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
@ -419,36 +413,31 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z3baziRd
// CHECK1-SAME: (i32 [[F1:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR7]] {
// CHECK1-SAME: (i32 [[F1:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR5]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK1-NEXT: [[F:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[F_ON_STACK:%.*]] = bitcast i8* [[F]] to i32*
// CHECK1-NEXT: store i32 [[F1]], i32* [[F_ON_STACK]], align 4
// CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[F_ON_STACK]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[A_ADDR]], align 8
// CHECK1-NEXT: store double* [[TMP3]], double** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 16)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP7]], %struct.anon.0* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 16)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[F_ON_STACK]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load double*, double** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32* [[F_ON_STACK]] to i8*
// CHECK1-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast double* [[TMP1]] to i8*
// CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, double*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP6]], i64 2)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[F_ON_STACK]], align 4
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[F]], i64 4)
// CHECK1-NEXT: ret i32 [[TMP9]]
// CHECK1-NEXT: ret i32 [[TMP7]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142
// CHECK1-SAME: () #[[ATTR6]] {
// CHECK1-SAME: () #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
@ -464,7 +453,7 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR6]] {
// CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
@ -498,44 +487,47 @@ void unreachable_call() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[F:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[F_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca double*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load double*, double** [[TMP3]], align 8
// CHECK1-NEXT: store double* [[TMP4]], double** [[TMP]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load double*, double** [[TMP]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load double, double* [[TMP5]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP6]]
// CHECK1-NEXT: store i32* [[F]], i32** [[F_ADDR]], align 8
// CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[F_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load double*, double** [[A_ADDR]], align 8
// CHECK1-NEXT: store double* [[TMP1]], double** [[TMP]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load double*, double** [[TMP]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load double, double* [[TMP2]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[CONV]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] {
// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to double**
// CHECK1-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP7]], align 8
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], double* [[TMP8]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: ret void
//
//
@ -544,8 +536,7 @@ void unreachable_call() {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[PTR1_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[PTR2_ADDR:%.*]] = alloca i32**, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 4
// CHECK2-NEXT: store i32* [[PTR1]], i32** [[PTR1_ADDR]], align 4
// CHECK2-NEXT: store i32** [[PTR2]], i32*** [[PTR2_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[PTR2_ADDR]], align 4
@ -554,18 +545,14 @@ void unreachable_call() {
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32** [[PTR1_ADDR]], i32*** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: store i32** [[TMP0]], i32*** [[TMP4]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP6:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 8)
// CHECK2-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP5]], i8* [[TMP6]])
// CHECK2-NEXT: [[TMP8:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP8]], %struct.anon* [[TMP9]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP7]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP6]], i32 8)
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i32** [[PTR1_ADDR]] to i8*
// CHECK2-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i32** [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 2, i32 -1, i8* bitcast (void (i32*, i32*, i32**, i32**)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP7]], i32 2)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -573,28 +560,27 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32** nonnull align 4 dereferenceable(4) [[PTR1:%.*]], i32** nonnull align 4 dereferenceable(4) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[PTR1_ADDR:%.*]] = alloca i32**, align 4
// CHECK2-NEXT: [[PTR2_ADDR:%.*]] = alloca i32**, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP2]], align 4
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[TMP7]], align 4
// CHECK2-NEXT: store i32** [[PTR1]], i32*** [[PTR1_ADDR]], align 4
// CHECK2-NEXT: store i32** [[PTR2]], i32*** [[PTR2_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[PTR1_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[PTR2_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP0]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[TMP4]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l39
// CHECK2-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
@ -607,7 +593,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l47
// CHECK2-SAME: (i32 [[AA:%.*]]) #[[ATTR6]] {
// CHECK2-SAME: (i32 [[AA:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
@ -633,7 +619,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l53
// CHECK2-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR6]] {
// CHECK2-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
@ -712,7 +698,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN2TTIxcEixEi
// CHECK2-SAME: (%struct.TT* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR7:[0-9]+]] comdat align 2 {
// CHECK2-SAME: (%struct.TT* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 [[I:%.*]]) #[[ATTR5:[0-9]+]] comdat align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.TT*, align 4
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
@ -724,7 +710,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l90
// CHECK2-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR6]] {
// CHECK2-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
@ -765,7 +751,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l108
// CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR6]] {
// CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
@ -811,36 +797,31 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3baziRd
// CHECK2-SAME: (i32 [[F1:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR7]] {
// CHECK2-SAME: (i32 [[F1:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR5]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK2-NEXT: [[F:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[F_ON_STACK:%.*]] = bitcast i8* [[F]] to i32*
// CHECK2-NEXT: store i32 [[F1]], i32* [[F_ON_STACK]], align 4
// CHECK2-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[F_ON_STACK]], i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: [[TMP3:%.*]] = load double*, double** [[A_ADDR]], align 4
// CHECK2-NEXT: store double* [[TMP3]], double** [[TMP2]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 8)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP7]], %struct.anon.0* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 8)
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[F_ON_STACK]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load double*, double** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i32* [[F_ON_STACK]] to i8*
// CHECK2-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast double* [[TMP1]] to i8*
// CHECK2-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, double*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP6]], i32 2)
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[F_ON_STACK]], align 4
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[F]], i32 4)
// CHECK2-NEXT: ret i32 [[TMP9]]
// CHECK2-NEXT: ret i32 [[TMP7]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16unreachable_callv_l142
// CHECK2-SAME: () #[[ATTR6]] {
// CHECK2-SAME: () #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
@ -856,7 +837,7 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l74
// CHECK2-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR6]] {
// CHECK2-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR4]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
@ -889,43 +870,46 @@ void unreachable_call() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[F:%.*]], double* nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[F_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca double*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load double*, double** [[TMP3]], align 4
// CHECK2-NEXT: store double* [[TMP4]], double** [[TMP]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load double*, double** [[TMP]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load double, double* [[TMP5]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP6]]
// CHECK2-NEXT: store i32* [[F]], i32** [[F_ADDR]], align 4
// CHECK2-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[F_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load double*, double** [[A_ADDR]], align 4
// CHECK2-NEXT: store double* [[TMP1]], double** [[TMP]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load double*, double** [[TMP]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load double, double* [[TMP2]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP3]]
// CHECK2-NEXT: [[CONV:%.*]] = fptosi double [[ADD]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[TMP2]], align 4
// CHECK2-NEXT: store i32 [[CONV]], i32* [[TMP0]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] {
// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR8:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 1
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to double**
// CHECK2-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP7]], align 4
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], double* [[TMP8]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: ret void
//

View File

@ -55,8 +55,7 @@ int bar(int n){
// CHECK1-SAME: (i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
@ -64,16 +63,11 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i16* [[TMP0]], i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -81,22 +75,20 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP2]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i16, i16* [[TMP0]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
// CHECK1-NEXT: store i16 [[CONV1]], i16* [[TMP2]], align 2
// CHECK1-NEXT: store i16 [[CONV1]], i16* [[TMP0]], align 2
// CHECK1-NEXT: ret void
//
//
@ -106,8 +98,7 @@ int bar(int n){
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
@ -119,20 +110,17 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i16* [[TMP1]], i16** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store [10 x i32]* [[TMP2]], [10 x i32]** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP9:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 24)
// CHECK1-NEXT: [[TMP10:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP8]], i8* [[TMP9]])
// CHECK1-NEXT: [[TMP11:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP11]], %struct.anon.0* [[TMP12]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP10]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP9]], i64 24)
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i16* [[TMP1]] to i8*
// CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast [10 x i32]* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*, [10 x i32]*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP11]], i64 3)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -140,32 +128,32 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[TMP4]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
// CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK1-NEXT: store i16 [[CONV2]], i16* [[TMP4]], align 2
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP6]], i64 0, i64 2
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i16 [[CONV2]], i16* [[TMP1]], align 2
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 2
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
@ -174,8 +162,7 @@ int bar(int n){
// CHECK2-SAME: (i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
@ -183,16 +170,11 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i16* [[TMP0]], i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i32 1)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -200,22 +182,20 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP2]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i16, i16* [[TMP0]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
// CHECK2-NEXT: store i16 [[CONV1]], i16* [[TMP2]], align 2
// CHECK2-NEXT: store i16 [[CONV1]], i16* [[TMP0]], align 2
// CHECK2-NEXT: ret void
//
//
@ -225,8 +205,7 @@ int bar(int n){
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
@ -238,20 +217,17 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: store i16* [[TMP1]], i16** [[TMP6]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK2-NEXT: store [10 x i32]* [[TMP2]], [10 x i32]** [[TMP7]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP9:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 12)
// CHECK2-NEXT: [[TMP10:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP8]], i8* [[TMP9]])
// CHECK2-NEXT: [[TMP11:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP11]], %struct.anon.0* [[TMP12]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP10]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP9]], i32 12)
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i16* [[TMP1]] to i8*
// CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast [10 x i32]* [[TMP2]] to i8*
// CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*, [10 x i32]*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP11]], i32 3)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -259,32 +235,32 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK2-NEXT: [[TMP6:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[TMP4]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
// CHECK2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK2-NEXT: store i16 [[CONV2]], i16* [[TMP4]], align 2
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP6]], i32 0, i32 2
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK2-NEXT: store i16 [[CONV2]], i16* [[TMP1]], align 2
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i32 0, i32 2
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK2-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//

View File

@ -50,8 +50,7 @@ int bar(int n){
// CHECK1-SAME: (i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
@ -59,16 +58,11 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i16* [[TMP0]], i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK1-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -76,22 +70,20 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP2]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i16, i16* [[TMP0]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
// CHECK1-NEXT: store i16 [[CONV1]], i16* [[TMP2]], align 2
// CHECK1-NEXT: store i16 [[CONV1]], i16* [[TMP0]], align 2
// CHECK1-NEXT: ret void
//
//
@ -102,8 +94,7 @@ int bar(int n){
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
@ -118,20 +109,17 @@ int bar(int n){
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i16* [[TMP1]], i16** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store [10 x i32]* [[TMP2]], [10 x i32]** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 24)
// CHECK1-NEXT: [[TMP11:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP9]], i8* [[TMP10]])
// CHECK1-NEXT: [[TMP12:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP12]], %struct.anon.0* [[TMP13]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP11]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP10]], i64 24)
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i16* [[TMP1]] to i8*
// CHECK1-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [10 x i32]* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*, [10 x i32]*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP12]], i64 3)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -139,32 +127,32 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[TMP4]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
// CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK1-NEXT: store i16 [[CONV2]], i16* [[TMP4]], align 2
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP6]], i64 0, i64 2
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i16 [[CONV2]], i16* [[TMP1]], align 2
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 2
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
@ -173,8 +161,7 @@ int bar(int n){
// CHECK2-SAME: (i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true)
@ -182,16 +169,11 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i16* [[TMP0]], i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon*
// CHECK2-NEXT: store [[STRUCT_ANON]] [[TMP7]], %struct.anon* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon*)* @__omp_outlined__ to i8*), i8* null, i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 1024, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i32 1)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -199,22 +181,20 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP2]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i16, i16* [[TMP0]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16
// CHECK2-NEXT: store i16 [[CONV1]], i16* [[TMP2]], align 2
// CHECK2-NEXT: store i16 [[CONV1]], i16* [[TMP0]], align 2
// CHECK2-NEXT: ret void
//
//
@ -225,8 +205,7 @@ int bar(int n){
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x i8*], align 4
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
@ -240,20 +219,17 @@ int bar(int n){
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP6]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: store i16* [[TMP1]], i16** [[TMP7]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK2-NEXT: store [10 x i32]* [[TMP2]], [10 x i32]** [[TMP8]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP10:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 12)
// CHECK2-NEXT: [[TMP11:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP9]], i8* [[TMP10]])
// CHECK2-NEXT: [[TMP12:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP12]], %struct.anon.0* [[TMP13]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP11]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP10]], i32 12)
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast i16* [[TMP1]] to i8*
// CHECK2-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast [10 x i32]* [[TMP2]] to i8*
// CHECK2-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast [3 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 [[TMP5]], i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*, [10 x i32]*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP12]], i32 3)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -261,32 +237,32 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK2-NEXT: [[TMP6:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[TMP4]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP4]] to i32
// CHECK2-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1
// CHECK2-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16
// CHECK2-NEXT: store i16 [[CONV2]], i16* [[TMP4]], align 2
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP6]], i32 0, i32 2
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK2-NEXT: store i16 [[CONV2]], i16* [[TMP1]], align 2
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i32 0, i32 2
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK2-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//

View File

@ -53,7 +53,7 @@ int bar(int n){
// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
@ -63,12 +63,13 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK1-NEXT: store i8 [[TMP3]], i8* [[TMP2]], align 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i8*
// CHECK1-NEXT: store i8 [[TMP2]], i8* [[CONV1]], align 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -76,20 +77,16 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[TMP1]], align 1
// CHECK1-NEXT: store i8 [[TMP2]], i8* [[A]], align 1
// CHECK1-NEXT: store i8 49, i8* [[A]], align 1
// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i8*
// CHECK1-NEXT: store i8 49, i8* [[CONV]], align 1
// CHECK1-NEXT: ret void
//
//
@ -97,7 +94,7 @@ int bar(int n){
// CHECK1-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 2
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
@ -107,12 +104,13 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK1-NEXT: store i16 [[TMP3]], i16* [[TMP2]], align 2
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -120,20 +118,16 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[AA]], align 2
// CHECK1-NEXT: store i16 1, i16* [[AA]], align 2
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: store i16 1, i16* [[CONV]], align 2
// CHECK1-NEXT: ret void
//
//
@ -141,7 +135,7 @@ int bar(int n){
// CHECK1-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 2
// CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
@ -151,12 +145,13 @@ int bar(int n){
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK1-NEXT: store i16 [[TMP3]], i16* [[TMP2]], align 2
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP3]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -164,78 +159,58 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_2]], align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK1-NEXT: store i16 [[TMP2]], i16* [[AA]], align 2
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i16* [[AA]], i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.2* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.2*
// CHECK1-NEXT: store [[STRUCT_ANON_2]] [[TMP7]], %struct.anon.2* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.2*)* @__omp_outlined__3 to i8*), i8* null, i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i16* [[CONV]] to i8*
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP4]], i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_3]], align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i16* [[TMP2]], i16** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK1-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.3*
// CHECK1-NEXT: store [[STRUCT_ANON_3]] [[TMP7]], %struct.anon.3* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.3*)* @__omp_outlined__4 to i8*), i8* null, i8* [[TMP6]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i64 8)
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__4 to i8*), i8* null, i8** [[TMP5]], i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 8
// CHECK1-NEXT: store i16 1, i16* [[TMP2]], align 2
// CHECK1-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 8
// CHECK1-NEXT: store i16 1, i16* [[TMP0]], align 2
// CHECK1-NEXT: ret void
//
//
@ -243,7 +218,7 @@ int bar(int n){
// CHECK2-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
@ -253,12 +228,13 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: [[TMP3:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK2-NEXT: store i8 [[TMP3]], i8* [[TMP2]], align 1
// CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i32* [[A_CASTED]] to i8*
// CHECK2-NEXT: store i8 [[TMP2]], i8* [[CONV1]], align 1
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_CASTED]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -266,20 +242,16 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[A:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[TMP1]], align 1
// CHECK2-NEXT: store i8 [[TMP2]], i8* [[A]], align 1
// CHECK2-NEXT: store i8 49, i8* [[A]], align 1
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i32* [[A_ADDR]] to i8*
// CHECK2-NEXT: store i8 49, i8* [[CONV]], align 1
// CHECK2-NEXT: ret void
//
//
@ -287,7 +259,7 @@ int bar(int n){
// CHECK2-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 2
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
@ -297,12 +269,13 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: store i16 [[TMP3]], i16* [[TMP2]], align 2
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2]]
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]]) #[[ATTR2]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -310,20 +283,16 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[AA]], align 2
// CHECK2-NEXT: store i16 1, i16* [[AA]], align 2
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK2-NEXT: store i16 1, i16* [[CONV]], align 2
// CHECK2-NEXT: ret void
//
//
@ -331,7 +300,7 @@ int bar(int n){
// CHECK2-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 2
// CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
@ -341,12 +310,13 @@ int bar(int n){
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: store i16 [[TMP3]], i16* [[TMP2]], align 2
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2]]
// CHECK2-NEXT: call void @__omp_outlined__2(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP3]]) #[[ATTR2]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -354,77 +324,57 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 4
// CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_2]], align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
// CHECK2-NEXT: store i16 [[TMP2]], i16* [[AA]], align 2
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i16* [[AA]], i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.2* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.2*
// CHECK2-NEXT: store [[STRUCT_ANON_2]] [[TMP7]], %struct.anon.2* [[TMP8]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.2*)* @__omp_outlined__3 to i8*), i8* null, i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
// CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP1:%.*]] = bitcast i16* [[CONV]] to i8*
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[TMP0]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__3 to i8*), i8* null, i8** [[TMP4]], i32 1)
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__3
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_3]], align 8
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i16* [[TMP2]], i16** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.3* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP5:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP4]], i8* [[TMP5]])
// CHECK2-NEXT: [[TMP7:%.*]] = load [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to %struct.anon.3*
// CHECK2-NEXT: store [[STRUCT_ANON_3]] [[TMP7]], %struct.anon.3* [[TMP8]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.3*)* @__omp_outlined__4 to i8*), i8* null, i8* [[TMP6]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP5]], i32 4)
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast i16* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i16*)* @__omp_outlined__4 to i8*), i8* null, i8** [[TMP5]], i32 1)
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__4
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[AA:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 4
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i16*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i16*, i16** [[TMP1]], align 4
// CHECK2-NEXT: store i16 1, i16* [[TMP2]], align 2
// CHECK2-NEXT: store i16* [[AA]], i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[AA_ADDR]], align 4
// CHECK2-NEXT: store i16 1, i16* [[TMP0]], align 2
// CHECK2-NEXT: ret void
//

View File

@ -35,7 +35,6 @@ int bar(int n){
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l16
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
@ -45,7 +44,7 @@ int bar(int n){
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]]) #[[ATTR5:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -53,97 +52,86 @@ int bar(int n){
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[I:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
// CHECK1-NEXT: [[I_ON_STACK:%.*]] = bitcast i8* [[I]] to i32*
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I_ON_STACK]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[I_ON_STACK]], i32** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
// CHECK1-NEXT: [[TMP12:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP10]], i8* [[TMP11]])
// CHECK1-NEXT: [[TMP13:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP12]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP13]], %struct.anon.0* [[TMP14]], align 8
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP12]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP11]], i64 8)
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i32* [[I_ON_STACK]] to i8*
// CHECK1-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP10]], i64 1)
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[I]], i64 4)
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
@ -153,22 +141,22 @@ int bar(int n){
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR4]]
// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR5]]
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l16
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
@ -178,7 +166,7 @@ int bar(int n){
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR4:[0-9]+]]
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]]) #[[ATTR5:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -186,97 +174,86 @@ int bar(int n){
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[I:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[I_ON_STACK:%.*]] = bitcast i8* [[I]] to i32*
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I_ON_STACK]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[I_ON_STACK]], i32** [[TMP9]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP11:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
// CHECK2-NEXT: [[TMP12:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP10]], i8* [[TMP11]])
// CHECK2-NEXT: [[TMP13:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP12]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP13]], %struct.anon.0* [[TMP14]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8* [[TMP12]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP11]], i32 4)
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast i32* [[I_ON_STACK]] to i8*
// CHECK2-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP10]], i32 1)
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[I]], i32 4)
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP2]], align 4
// CHECK2-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK2-NEXT: ret void
//
//
@ -286,14 +263,15 @@ int bar(int n){
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8*, align 4
// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: call void @__kmpc_get_shared_variables_aggregate(i8** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.anon.0**
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast %struct.anon.0** [[TMP3]] to %struct.anon.0*
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[TMP4]]) #[[ATTR4]]
// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR5]]
// CHECK2-NEXT: ret void
//

View File

@ -35,7 +35,8 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
@ -49,17 +50,17 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK1: user_code.entry:
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[TMP6]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[ARGC_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[ARGC_CASTED]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i64 [[TMP4]], i32* [[TMP0]], i64 [[TMP6]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK1-NEXT: ret void
// CHECK1: worker.exit:
@ -67,284 +68,272 @@ int main(int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK1-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 8
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP12]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK1-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP18]], [[ADD]]
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
// CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = zext i32 [[TMP20]] to i64
// CHECK1-NEXT: store i64 [[TMP21]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
// CHECK1-NEXT: store i64 [[TMP23]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: store i64 [[TMP27]], i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: store i32 [[TMP29]], i32* [[TMP28]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
// CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 5
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: store i32 [[TMP32]], i32* [[TMP31]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK1-NEXT: [[TMP34:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 40)
// CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP33]], i8* [[TMP34]])
// CHECK1-NEXT: [[TMP36:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 1
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8* [[TMP35]] to %struct.anon.0*
// CHECK1-NEXT: store [[STRUCT_ANON_0]] [[TMP36]], %struct.anon.0* [[TMP37]], align 1
// CHECK1-NEXT: [[TMP38:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP38]], align 4
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP39]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP35]])
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[TMP34]], i64 40)
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[CONV8:%.*]] = bitcast i64* [[ARGC_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP18]], i32* [[CONV8]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, i64* [[ARGC_CASTED]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP20]], i32* [[CONV9]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK1-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP15]] to i8*
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK1-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP17]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
// CHECK1-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP19]] to i8*
// CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
// CHECK1-NEXT: [[TMP31:%.*]] = inttoptr i64 [[TMP21]] to i8*
// CHECK1-NEXT: store i8* [[TMP31]], i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP33]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i64, i64, i64, i32*, i64)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP34]], i64 5)
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
// CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
// CHECK1-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
// CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
// CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP41]], [[TMP42]]
// CHECK1-NEXT: br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
// CHECK1: cond.true14:
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: br label [[COND_END16:%.*]]
// CHECK1: cond.false15:
// CHECK1-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP44]], [[TMP45]]
// CHECK1-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP46]], [[TMP47]]
// CHECK1-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK1: cond.true11:
// CHECK1-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: br label [[COND_END13:%.*]]
// CHECK1: cond.false12:
// CHECK1-NEXT: [[TMP49:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END13]]
// CHECK1: cond.end13:
// CHECK1-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP48]], [[COND_TRUE11]] ], [ [[TMP49]], [[COND_FALSE12]] ]
// CHECK1-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP50]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[COND_END16]]
// CHECK1: cond.end16:
// CHECK1-NEXT: [[COND17:%.*]] = phi i32 [ [[TMP43]], [[COND_TRUE14]] ], [ [[TMP44]], [[COND_FALSE15]] ]
// CHECK1-NEXT: store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP52]])
// CHECK1-NEXT: [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
// CHECK1-NEXT: store i64 [[TMP4]], i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 8
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 5
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 8
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP12]], 0
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: store i32 0, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP13]]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP15]] to i32
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP16]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP5]] to i32
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP19]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP17]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV1]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP9]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP21]] to i32
// CHECK1-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP20]], [[CONV6]]
// CHECK1-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP11]] to i32
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP10]], [[CONV8]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 8
// CHECK1-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP22]] to i32
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV10:%.*]] = trunc i64 [[TMP12]] to i32
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV8]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV10]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
// CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP12:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP29]], 1
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[I5]]) #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: [[CALL11:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[TMP8]]) #[[ATTR7]]
// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[CALL]], [[CALL11]]
// CHECK1-NEXT: [[CALL13:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[ARGC]]) #[[ATTR7]]
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[ADD12]], [[CALL13]]
// CHECK1-NEXT: store i32 [[ADD14]], i32* [[TMP8]], align 4
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I7]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[I7]]) #[[ATTR5:[0-9]+]]
// CHECK1-NEXT: [[CALL13:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[TMP0]]) #[[ATTR5]]
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CALL]], [[CALL13]]
// CHECK1-NEXT: [[CALL15:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[CONV]]) #[[ATTR5]]
// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[ADD14]], [[CALL15]]
// CHECK1-NEXT: store i32 [[ADD16]], i32* [[TMP0]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP30]], 1
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK1-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
// CHECK1-NEXT: store i32 [[ADD16]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
// CHECK1-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK1-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK1-NEXT: store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP36]])
// CHECK1-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP26]])
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-NEXT: ret void
@ -356,7 +345,8 @@ int main(int argc, char **argv) {
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[ARGC_CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
@ -368,17 +358,15 @@ int main(int argc, char **argv) {
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK2: user_code.entry:
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP5]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[TMP6]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[ARGC_CASTED]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARGC_CASTED]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32 [[TMP4]], i32* [[TMP0]], i32 [[TMP6]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
// CHECK2-NEXT: ret void
// CHECK2: worker.exit:
@ -386,13 +374,13 @@ int main(int argc, char **argv) {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
@ -402,146 +390,138 @@ int main(int argc, char **argv) {
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTTMP_OUTLINED_AGG_ARG:%.*]] = alloca [[STRUCT_ANON_0]], align 8
// CHECK2-NEXT: [[ARGC_CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x i8*], align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[ARGC]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 0, i32* [[I]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK2: omp.precond.then:
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
// CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP12]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK2-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP18]], [[ADD]]
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK2-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 4
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP23]], i32* [[TMP22]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: store i32 [[TMP25]], i32* [[TMP24]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK2-NEXT: store i32 [[TMP27]], i32* [[TMP26]], align 4
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
// CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP28]], align 4
// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: store i32 [[TMP30]], i32* [[TMP29]], align 4
// CHECK2-NEXT: [[TMP31:%.*]] = bitcast %struct.anon.0* [[DOTTMP_OUTLINED_AGG_ARG]] to i8*
// CHECK2-NEXT: [[TMP32:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 20)
// CHECK2-NEXT: [[TMP33:%.*]] = call i8* @__kmpc_alloc_aggregate_arg(i8* [[TMP31]], i8* [[TMP32]])
// CHECK2-NEXT: [[TMP34:%.*]] = load [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], align 4
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8* [[TMP33]] to %struct.anon.0*
// CHECK2-NEXT: store [[STRUCT_ANON_0]] [[TMP34]], %struct.anon.0* [[TMP35]], align 4
// CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP37]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8* [[TMP33]])
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[TMP32]], i32 20)
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP16]], i32* [[ARGC_CASTED]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARGC_CASTED]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP21:%.*]] = inttoptr i32 [[TMP14]] to i8*
// CHECK2-NEXT: store i8* [[TMP21]], i8** [[TMP20]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK2-NEXT: [[TMP23:%.*]] = inttoptr i32 [[TMP15]] to i8*
// CHECK2-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
// CHECK2-NEXT: [[TMP25:%.*]] = inttoptr i32 [[TMP17]] to i8*
// CHECK2-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK2-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 4
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
// CHECK2-NEXT: [[TMP29:%.*]] = inttoptr i32 [[TMP19]] to i8*
// CHECK2-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 4
// CHECK2-NEXT: [[TMP30:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast [5 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB4]], i32 [[TMP31]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP32]], i32 5)
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
// CHECK2-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP40]], [[TMP41]]
// CHECK2-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
// CHECK2-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP42]], [[TMP43]]
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP37]], [[TMP38]]
// CHECK2-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP44]], [[TMP45]]
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP39]], [[TMP40]]
// CHECK2-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK2: cond.true11:
// CHECK2-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: br label [[COND_END13:%.*]]
// CHECK2: cond.false12:
// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END13]]
// CHECK2: cond.end13:
// CHECK2-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP46]], [[COND_TRUE11]] ], [ [[TMP47]], [[COND_FALSE12]] ]
// CHECK2-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP41]], [[COND_TRUE11]] ], [ [[TMP42]], [[COND_FALSE12]] ]
// CHECK2-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP48]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: [[TMP49:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP50:%.*]] = load i32, i32* [[TMP49]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP50]])
// CHECK2-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP45]])
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.end:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURED_OMP_PREVIOUS_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
@ -554,110 +534,100 @@ int main(int argc, char **argv) {
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 2
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[ARGC]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 3
// CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP12]], 0
// CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK2-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 0, i32* [[I]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP13]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK2: omp.precond.then:
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_LB]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP19]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP17]])
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP9]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP20]], [[TMP21]]
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURED_OMP_PREVIOUS_UB]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP22]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP29]], 1
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I4]], align 4
// CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[I4]]) #[[ATTR7:[0-9]+]]
// CHECK2-NEXT: [[CALL8:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[TMP8]]) #[[ATTR7]]
// CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[I4]]) #[[ATTR5:[0-9]+]]
// CHECK2-NEXT: [[CALL8:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[TMP0]]) #[[ATTR5]]
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[CALL]], [[CALL8]]
// CHECK2-NEXT: [[CALL10:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[ARGC]]) #[[ATTR7]]
// CHECK2-NEXT: [[CALL10:%.*]] = call noundef i32 @_Z3fooPi(i32* noundef [[ARGC_ADDR]]) #[[ATTR5]]
// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[ADD9]], [[CALL10]]
// CHECK2-NEXT: store i32 [[ADD11]], i32* [[TMP8]], align 4
// CHECK2-NEXT: store i32 [[ADD11]], i32* [[TMP0]], align 4
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP30]], 1
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK2-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK2-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP36]])
// CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP26]])
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.end:
// CHECK2-NEXT: ret void

View File

@ -80,7 +80,6 @@ int main (int argc, char **argv) {
// CHECK1-SAME: (i64 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
@ -94,11 +93,9 @@ int main (int argc, char **argv) {
// CHECK1-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[ARGC_ON_STACK]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ON_STACK]], i32** [[TMP3]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[ARGC_ON_STACK]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i64 4)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
@ -107,18 +104,16 @@ int main (int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
@ -126,7 +121,6 @@ int main (int argc, char **argv) {
// CHECK1-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
@ -139,11 +133,9 @@ int main (int argc, char **argv) {
// CHECK1-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i8***
// CHECK1-NEXT: store i8** [[TMP1]], i8*** [[ARGC_ON_STACK]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i8*** [[ARGC_ON_STACK]], i8**** [[TMP3]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3]]
// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8*** [[ARGC_ON_STACK]]) #[[ATTR3]]
// CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i64 8)
// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK1-NEXT: ret void
@ -152,18 +144,16 @@ int main (int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8
// CHECK1-NEXT: store i8** null, i8*** [[TMP2]], align 8
// CHECK1-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** null, i8*** [[TMP0]], align 8
// CHECK1-NEXT: ret void
//
//
@ -171,7 +161,6 @@ int main (int argc, char **argv) {
// CHECK2-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
@ -184,11 +173,9 @@ int main (int argc, char **argv) {
// CHECK2-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i32*
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[ARGC_ON_STACK]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[ARGC_ON_STACK]], i32** [[TMP3]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[ARGC_ON_STACK]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i32 4)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
@ -197,18 +184,16 @@ int main (int argc, char **argv) {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK2-NEXT: store i32 0, i32* [[TMP2]], align 4
// CHECK2-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i32 0, i32* [[TMP0]], align 4
// CHECK2-NEXT: ret void
//
//
@ -216,7 +201,6 @@ int main (int argc, char **argv) {
// CHECK2-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 4
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 4
@ -229,11 +213,9 @@ int main (int argc, char **argv) {
// CHECK2-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i8***
// CHECK2-NEXT: store i8** [[TMP1]], i8*** [[ARGC_ON_STACK]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK2-NEXT: store i8*** [[ARGC_ON_STACK]], i8**** [[TMP3]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3]]
// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8*** [[ARGC_ON_STACK]]) #[[ATTR3]]
// CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i32 4)
// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK2-NEXT: ret void
@ -242,18 +224,16 @@ int main (int argc, char **argv) {
//
//
// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 4
// CHECK2-NEXT: store i8** null, i8*** [[TMP2]], align 4
// CHECK2-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 4
// CHECK2-NEXT: store i8** null, i8*** [[TMP0]], align 4
// CHECK2-NEXT: ret void
//
//
@ -263,7 +243,6 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
@ -281,11 +260,9 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC3]] to i32*
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[ARGC_ON_STACK]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[ARGC_ON_STACK]], i32** [[TMP3]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK3-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK3-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[ARGC_ON_STACK]]) #[[ATTR3:[0-9]+]]
// CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[ARGC3]], i64 4)
// CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK3-NEXT: ret void
@ -294,18 +271,16 @@ int main (int argc, char **argv) {
//
//
// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK3-NEXT: store i32 0, i32* [[TMP2]], align 4
// CHECK3-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK3-NEXT: store i32 0, i32* [[TMP0]], align 4
// CHECK3-NEXT: ret void
//
//
@ -315,7 +290,6 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
@ -332,11 +306,9 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC2]] to i8***
// CHECK3-NEXT: store i8** [[TMP1]], i8*** [[ARGC_ON_STACK]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store i8*** [[ARGC_ON_STACK]], i8**** [[TMP3]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK3-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3]]
// CHECK3-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8*** [[ARGC_ON_STACK]]) #[[ATTR3]]
// CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[ARGC2]], i64 8)
// CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK3-NEXT: ret void
@ -345,18 +317,16 @@ int main (int argc, char **argv) {
//
//
// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]]) #[[ATTR2]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8
// CHECK3-NEXT: store i8** null, i8*** [[TMP2]], align 8
// CHECK3-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
// CHECK3-NEXT: store i8** null, i8*** [[TMP0]], align 8
// CHECK3-NEXT: ret void
//
//
@ -366,7 +336,6 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
@ -381,11 +350,9 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i32*
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[ARGC_ON_STACK]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store i32* [[ARGC_ON_STACK]], i32** [[TMP3]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK4-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3:[0-9]+]]
// CHECK4-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[ARGC_ON_STACK]]) #[[ATTR3:[0-9]+]]
// CHECK4-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i32 4)
// CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK4-NEXT: ret void
@ -394,18 +361,16 @@ int main (int argc, char **argv) {
//
//
// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 4
// CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
// CHECK4-NEXT: store i32 0, i32* [[TMP2]], align 4
// CHECK4-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[TMP0]], align 4
// CHECK4-NEXT: ret void
//
//
@ -415,7 +380,6 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 4
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
@ -430,11 +394,9 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[ARGC_ON_STACK:%.*]] = bitcast i8* [[ARGC1]] to i8***
// CHECK4-NEXT: store i8** [[TMP1]], i8*** [[ARGC_ON_STACK]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store i8*** [[ARGC_ON_STACK]], i8**** [[TMP3]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK4-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR3]]
// CHECK4-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8*** [[ARGC_ON_STACK]]) #[[ATTR3]]
// CHECK4-NEXT: call void @__kmpc_free_shared(i8* [[ARGC1]], i32 4)
// CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK4-NEXT: ret void
@ -443,17 +405,15 @@ int main (int argc, char **argv) {
//
//
// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__1
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 4
// CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 4
// CHECK4-NEXT: store i8** null, i8*** [[TMP2]], align 4
// CHECK4-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 4
// CHECK4-NEXT: store i8** null, i8*** [[TMP0]], align 4
// CHECK4-NEXT: ret void
//

File diff suppressed because it is too large Load Diff

View File

@ -49,38 +49,34 @@ int main() {
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @"?main@Test@@SAXXZ"()
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[T:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: invoke void @"?foo@@YAXXZ"()
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
// CHECK1: catch.dispatch:
// CHECK1-NEXT: [[TMP1:%.*]] = catchswitch within none [label %catch] unwind label [[TERMINATE:%.*]]
// CHECK1-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch] unwind label [[TERMINATE:%.*]]
// CHECK1: catch:
// CHECK1-NEXT: [[TMP2:%.*]] = catchpad within [[TMP1]] [%rtti.TypeDescriptor2* @"??_R0H@8", i32 0, i32* %t]
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.var) [ "funclet"(token [[TMP2]]) ]
// CHECK1-NEXT: invoke void @"?bar@@YAXXZ"() [ "funclet"(token [[TMP2]]) ]
// CHECK1-NEXT: [[TMP1:%.*]] = catchpad within [[TMP0]] [%rtti.TypeDescriptor2* @"??_R0H@8", i32 0, i32* %t]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], [8 x i32]* @.gomp_critical_user_.var) [ "funclet"(token [[TMP1]]) ]
// CHECK1-NEXT: invoke void @"?bar@@YAXXZ"() [ "funclet"(token [[TMP1]]) ]
// CHECK1-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE2:%.*]]
// CHECK1: invoke.cont1:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.var) [ "funclet"(token [[TMP2]]) ]
// CHECK1-NEXT: catchret from [[TMP2]] to label [[CATCHRET_DEST:%.*]]
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], [8 x i32]* @.gomp_critical_user_.var) [ "funclet"(token [[TMP1]]) ]
// CHECK1-NEXT: catchret from [[TMP1]] to label [[CATCHRET_DEST:%.*]]
// CHECK1: catchret.dest:
// CHECK1-NEXT: br label [[TRY_CONT:%.*]]
// CHECK1: try.cont:
@ -88,51 +84,49 @@ int main() {
// CHECK1: invoke.cont:
// CHECK1-NEXT: br label [[TRY_CONT]]
// CHECK1: terminate:
// CHECK1-NEXT: [[TMP5:%.*]] = cleanuppad within none []
// CHECK1-NEXT: call void @"?terminate@@YAXXZ"() #[[ATTR7:[0-9]+]] [ "funclet"(token [[TMP5]]) ]
// CHECK1-NEXT: [[TMP4:%.*]] = cleanuppad within none []
// CHECK1-NEXT: call void @"?terminate@@YAXXZ"() #[[ATTR7:[0-9]+]] [ "funclet"(token [[TMP4]]) ]
// CHECK1-NEXT: unreachable
// CHECK1: terminate2:
// CHECK1-NEXT: [[TMP6:%.*]] = cleanuppad within [[TMP2]] []
// CHECK1-NEXT: call void @"?terminate@@YAXXZ"() #[[ATTR7]] [ "funclet"(token [[TMP6]]) ]
// CHECK1-NEXT: [[TMP5:%.*]] = cleanuppad within [[TMP1]] []
// CHECK1-NEXT: call void @"?terminate@@YAXXZ"() #[[ATTR7]] [ "funclet"(token [[TMP5]]) ]
// CHECK1-NEXT: unreachable
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[J:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[J_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[LOCAL_J:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COPYPRIVATE_DID_IT:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COPYPRIVATE_CPR_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32* [[J]], i32** [[J_ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[J_ADDR]], align 8
// CHECK1-NEXT: store i32 3, i32* [[LOCAL_J]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK1-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: store i32 4, i32* [[LOCAL_J]], align 4
// CHECK1-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i32* [[LOCAL_J]] to i8*
// CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4
// CHECK1-NEXT: call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.copyprivate.copy_func, i32 [[TMP10]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[LOCAL_J]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[LOCAL_J]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4
// CHECK1-NEXT: call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.copyprivate.copy_func, i32 [[TMP8]])
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[LOCAL_J]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//

View File

@ -54,11 +54,14 @@ int main() {
// CHECK-DAG: !DISubprogram(name: "__captured_stmt"
// CHECK-DAG-SAME: flags: DIFlagArtificial
// CHECK-DAG: !DISubprogram(name: ".omp_outlined."
// CHECK-DAG: !DISubprogram(name: ".omp_outlined._debug__"
// CHECK-DAG-SAME: flags: DIFlagArtificial
// CHECK-DAG: !DISubprogram(linkageName: ".omp_task_entry."
// CHECK-DAG-SAME: flags: DIFlagArtificial
// CHECK-DAG: !DISubprogram(name: ".omp_outlined."
// CHECK-DAG-SAME: flags: DIFlagArtificial
// CHECK-DAG: !DISubprogram(name: ".omp_outlined..1"
// CHECK-DAG-SAME: flags: DIFlagArtificial

View File

@ -78,9 +78,6 @@ int main (int argc, char **argv) {
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
@ -90,56 +87,45 @@ int main (int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[VLA]], i32** [[TMP4]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP5]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[VLA]], i32** [[TMP7]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(i8** noundef [[TMP8]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]])
// CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(i8** noundef [[TMP3]])
// CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP9]])
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP10]]
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP4]])
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP5]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP5]])
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP2]])
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @global, align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: unreachable
//
//
@ -159,121 +145,105 @@ int main (int argc, char **argv) {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP2]], align 16
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[VLA]], i32** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: store i32* [[GLOBAL]], i32** [[TMP6]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP7]])
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16
// CHECK1-NEXT: store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP7]])
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP3]])
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP9:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP10]]) #[[ATTR6]]
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]]
// CHECK1-NEXT: unreachable
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP5]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP6]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP5]])
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP2]])
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @global, align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR6]]
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6]]
// CHECK1-NEXT: unreachable
//
//
@ -281,7 +251,6 @@ int main (int argc, char **argv) {
// CHECK1-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR3]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
// CHECK1-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0
@ -289,43 +258,38 @@ int main (int argc, char **argv) {
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
// CHECK1-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i8*** [[ARGC_ADDR]], i8**** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP5]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[VAR:%.*]] = alloca double*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[TMP2]], align 8
// CHECK1-NEXT: invoke void @_Z3fooIPPcEvT_(i8** noundef [[TMP5]])
// CHECK1-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8
// CHECK1-NEXT: invoke void @_Z3fooIPPcEvT_(i8** noundef [[TMP2]])
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[VAR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = mul nsw i64 0, [[TMP4]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP7]]
// CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]]
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]]
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP8:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP8]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP9]]) #[[ATTR6]]
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]]
// CHECK1-NEXT: unreachable
//
//
@ -345,9 +309,6 @@ int main (int argc, char **argv) {
// CHECK2-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META18:![0-9]+]], metadata !DIExpression()), !dbg [[DBG19:![0-9]+]]
@ -361,69 +322,59 @@ int main (int argc, char **argv) {
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG23]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG26:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META27:![0-9]+]], metadata !DIExpression()), !dbg [[DBG31:![0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG32:![0-9]+]]
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP3]], align 8, !dbg [[DBG32]]
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG32]]
// CHECK2-NEXT: store i32* [[VLA]], i32** [[TMP4]], align 8, !dbg [[DBG32]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG32]]
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 0, !dbg [[DBG33:![0-9]+]]
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP5]], align 8, !dbg [[DBG33]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]]), !dbg [[DBG33]]
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 0, !dbg [[DBG34:![0-9]+]]
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8, !dbg [[DBG34]]
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 1, !dbg [[DBG34]]
// CHECK2-NEXT: store i32* [[VLA]], i32** [[TMP7]], align 8, !dbg [[DBG34]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]]), !dbg [[DBG34]]
// CHECK2-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG35:![0-9]+]]
// CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(i8** noundef [[TMP8]]), !dbg [[DBG36:![0-9]+]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG32:![0-9]+]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]]), !dbg [[DBG33:![0-9]+]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG34:![0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG35:![0-9]+]]
// CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(i8** noundef [[TMP3]]), !dbg [[DBG36:![0-9]+]]
// CHECK2-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG37:![0-9]+]]
// CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG38:![0-9]+]]
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP9]]), !dbg [[DBG38]]
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG38]]
// CHECK2-NEXT: ret i32 [[TMP10]], !dbg [[DBG38]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG38:![0-9]+]]
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG38]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG38]]
// CHECK2-NEXT: ret i32 [[TMP5]], !dbg [[DBG38]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG39:![0-9]+]] {
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG39:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META50:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META47:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51]]
// CHECK2-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon** [[__CONTEXT_ADDR]], metadata [[META53:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG54:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0, !dbg [[DBG54]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG54]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1, !dbg [[DBG54]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG54]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG55:![0-9]+]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG55]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP5]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG56:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META50:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META51:![0-9]+]], metadata !DIExpression()), !dbg [[DBG52:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG53:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG53]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG54:![0-9]+]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG54]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP2]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG53]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG57:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG58:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG59:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG57]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG55:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG56:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG57:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG55]]
// CHECK2: terminate.lpad:
// CHECK2-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG56]]
// CHECK2-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG56]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR7:[0-9]+]], !dbg [[DBG56]]
// CHECK2-NEXT: unreachable, !dbg [[DBG56]]
// CHECK2-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG53]]
// CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG53]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7:[0-9]+]], !dbg [[DBG53]]
// CHECK2-NEXT: unreachable, !dbg [[DBG53]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
// CHECK2-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat !dbg [[DBG60:![0-9]+]] {
// CHECK2-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat !dbg [[DBG58:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META65:![0-9]+]], metadata !DIExpression()), !dbg [[DBG66:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG67:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META63:![0-9]+]], metadata !DIExpression()), !dbg [[DBG64:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG65:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
@ -433,204 +384,331 @@ int main (int argc, char **argv) {
// CHECK2-NEXT: unreachable
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] !dbg [[DBG70:![0-9]+]] {
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG66:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG68:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META69:![0-9]+]], metadata !DIExpression()), !dbg [[DBG68]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG68]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META71:![0-9]+]], metadata !DIExpression()), !dbg [[DBG68]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG72:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG72]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG72]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG72]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG72]]
// CHECK2-NEXT: call void @.omp_outlined._debug__(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG72]]
// CHECK2-NEXT: ret void, !dbg [[DBG72]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.1
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG75:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META78:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
// CHECK2-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon.0** [[__CONTEXT_ADDR]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG81:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0, !dbg [[DBG81]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG81]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
// CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG81]]
// CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG81]]
// CHECK2-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP2]], align 16, !dbg [[DBG81]]
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG81]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]]
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG85:![0-9]+]]
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8, !dbg [[DBG85]]
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG85]]
// CHECK2-NEXT: store i32* [[VLA]], i32** [[TMP5]], align 8, !dbg [[DBG85]]
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2, !dbg [[DBG85]]
// CHECK2-NEXT: store i32* [[GLOBAL]], i32** [[TMP6]], align 8, !dbg [[DBG85]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG85]]
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG86:![0-9]+]]
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP7]]), !dbg [[DBG86]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META81:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG82:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
// CHECK2-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG82]]
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG82]]
// CHECK2-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16, !dbg [[DBG82]]
// CHECK2-NEXT: store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG82]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA1]], metadata [[META85:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]]), !dbg [[DBG82]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG86:![0-9]+]]
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP2]]), !dbg [[DBG86]]
// CHECK2-NEXT: ret void, !dbg [[DBG88:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG89:![0-9]+]] {
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.2
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG89:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META96:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG93:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META98:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97]]
// CHECK2-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon.1** [[__CONTEXT_ADDR]], metadata [[META99:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG100:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8, !dbg [[DBG100]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG101:![0-9]+]]
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG101]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP7]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG102:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META94:![0-9]+]], metadata !DIExpression()), !dbg [[DBG93]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG93]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META96:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97:![0-9]+]]
// CHECK2-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META98:![0-9]+]], metadata !DIExpression()), !dbg [[DBG99:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG100:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG100]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG101:![0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG101]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP3]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG100]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4, !dbg [[DBG103:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG104:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG105:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG103]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG102:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG103:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG104:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG102]]
// CHECK2: terminate.lpad:
// CHECK2-NEXT: [[TMP9:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG102]]
// CHECK2-NEXT: [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0, !dbg [[DBG102]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP10]]) #[[ATTR7]], !dbg [[DBG102]]
// CHECK2-NEXT: unreachable, !dbg [[DBG102]]
// CHECK2-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG100]]
// CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG100]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG100]]
// CHECK2-NEXT: unreachable, !dbg [[DBG100]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] !dbg [[DBG106:![0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] !dbg [[DBG105:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META113:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META106:![0-9]+]], metadata !DIExpression()), !dbg [[DBG107:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]]
// CHECK2-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon.2** [[__CONTEXT_ADDR]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG117:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0, !dbg [[DBG117]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG117]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1, !dbg [[DBG117]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG117]]
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG118:![0-9]+]]
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP5]], align 8, !dbg [[DBG118]]
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG118]]
// CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP6]], align 8, !dbg [[DBG118]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG118]]
// CHECK2-NEXT: ret void, !dbg [[DBG119:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META108:![0-9]+]], metadata !DIExpression()), !dbg [[DBG107]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META109:![0-9]+]], metadata !DIExpression()), !dbg [[DBG107]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META110:![0-9]+]], metadata !DIExpression()), !dbg [[DBG107]]
// CHECK2-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META111:![0-9]+]], metadata !DIExpression()), !dbg [[DBG107]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG112:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG112]]
// CHECK2-NEXT: call void @.omp_outlined._debug__.2(i32* [[TMP3]], i32* [[TMP4]], i64 [[TMP0]], i32* [[TMP5]], i32* [[TMP6]]) #[[ATTR6]], !dbg [[DBG112]]
// CHECK2-NEXT: ret void, !dbg [[DBG112]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG120:![0-9]+]] {
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG113:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META114:![0-9]+]], metadata !DIExpression()), !dbg [[DBG115:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META129:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128]]
// CHECK2-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon.3** [[__CONTEXT_ADDR]], metadata [[META130:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG131:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0, !dbg [[DBG131]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG131]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[TMP0]], i32 0, i32 1, !dbg [[DBG131]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG131]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG132:![0-9]+]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG132]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP5]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG133:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG115]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META117:![0-9]+]], metadata !DIExpression()), !dbg [[DBG115]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG118:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG118]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG118]]
// CHECK2-NEXT: call void @.omp_outlined._debug__.1(i32* [[TMP1]], i32* [[TMP2]], i64 [[TMP0]]) #[[ATTR6]], !dbg [[DBG118]]
// CHECK2-NEXT: ret void, !dbg [[DBG118]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.5
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG119:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META120:![0-9]+]], metadata !DIExpression()), !dbg [[DBG121:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META122:![0-9]+]], metadata !DIExpression()), !dbg [[DBG121]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG121]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG126:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG126]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]]), !dbg [[DBG126]]
// CHECK2-NEXT: ret void, !dbg [[DBG127:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.6
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG128:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META129:![0-9]+]], metadata !DIExpression()), !dbg [[DBG130:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG130]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG130]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META133:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG135:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG135]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG136:![0-9]+]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG136]]
// CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 noundef [[TMP2]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG135]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG134:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG135:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG136:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG134]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG137:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG138:![0-9]+]]
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG139:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG137]]
// CHECK2: terminate.lpad:
// CHECK2-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG133]]
// CHECK2-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG133]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR7]], !dbg [[DBG133]]
// CHECK2-NEXT: unreachable, !dbg [[DBG133]]
// CHECK2-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG135]]
// CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG135]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7]], !dbg [[DBG135]]
// CHECK2-NEXT: unreachable, !dbg [[DBG135]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG140:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG142:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG142]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG142]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG142]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG146:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: call void @.omp_outlined._debug__.6(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG146]]
// CHECK2-NEXT: ret void, !dbg [[DBG146]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG147:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META148:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META151:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149]]
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META152:![0-9]+]], metadata !DIExpression()), !dbg [[DBG149]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG153:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG153]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG153]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG153]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG153]]
// CHECK2-NEXT: call void @.omp_outlined._debug__.5(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG153]]
// CHECK2-NEXT: ret void, !dbg [[DBG153]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
// CHECK2-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG137:![0-9]+]] {
// CHECK2-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG154:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8
// CHECK2-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
// CHECK2-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG143:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG144:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG144]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG144]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG144]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG144]]
// CHECK2-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG145:![0-9]+]]
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG146:![0-9]+]]
// CHECK2-NEXT: store i8*** [[ARGC_ADDR]], i8**** [[TMP4]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG146]]
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP5]], align 8, !dbg [[DBG146]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG146]]
// CHECK2-NEXT: ret i32 0, !dbg [[DBG147:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META159:![0-9]+]], metadata !DIExpression()), !dbg [[DBG160:![0-9]+]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG161:![0-9]+]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG161]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG161]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG161]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG161]]
// CHECK2-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG162:![0-9]+]]
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]]), !dbg [[DBG163:![0-9]+]]
// CHECK2-NEXT: ret i32 0, !dbg [[DBG164:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG148:![0-9]+]] {
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.9
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG165:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[VAR:%.*]] = alloca double*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META155:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META169:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156]]
// CHECK2-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata %struct.anon.4** [[__CONTEXT_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156]]
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG159:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0, !dbg [[DBG159]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8, !dbg [[DBG159]]
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1, !dbg [[DBG159]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8, !dbg [[DBG159]]
// CHECK2-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[TMP2]], align 8, !dbg [[DBG160:![0-9]+]]
// CHECK2-NEXT: invoke void @_Z3fooIPPcEvT_(i8** noundef [[TMP5]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG162:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170]]
// CHECK2-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META174:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG175:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG175]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8, !dbg [[DBG176:![0-9]+]]
// CHECK2-NEXT: invoke void @_Z3fooIPPcEvT_(i8** noundef [[TMP2]])
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG178:![0-9]+]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170:![0-9]+]]
// CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG171:![0-9]+]]
// CHECK2-NEXT: [[TMP7:%.*]] = mul nsw i64 0, [[TMP4]], !dbg [[DBG171]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP7]], !dbg [[DBG171]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0, !dbg [[DBG171]]
// CHECK2-NEXT: ret void, !dbg [[DBG172:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG186:![0-9]+]]
// CHECK2-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG187:![0-9]+]]
// CHECK2-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG187]]
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG187]]
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0, !dbg [[DBG187]]
// CHECK2-NEXT: ret void, !dbg [[DBG188:![0-9]+]]
// CHECK2: terminate.lpad:
// CHECK2-NEXT: [[TMP8:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG162]]
// CHECK2-NEXT: [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP8]], 0, !dbg [[DBG162]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP9]]) #[[ATTR7]], !dbg [[DBG162]]
// CHECK2-NEXT: unreachable, !dbg [[DBG162]]
// CHECK2-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null, !dbg [[DBG178]]
// CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG178]]
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG178]]
// CHECK2-NEXT: unreachable, !dbg [[DBG178]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
// CHECK2-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG173:![0-9]+]] {
// CHECK2-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG189:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8
// CHECK2-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META176:![0-9]+]], metadata !DIExpression()), !dbg [[DBG177:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG178:![0-9]+]]
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META192:![0-9]+]], metadata !DIExpression()), !dbg [[DBG193:![0-9]+]]
// CHECK2-NEXT: ret void, !dbg [[DBG194:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG195:![0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META196:![0-9]+]], metadata !DIExpression()), !dbg [[DBG197:![0-9]+]]
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG197]]
// CHECK2-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG197]]
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG197]]
// CHECK2-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG201:![0-9]+]]
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG201]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG201]]
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG201]]
// CHECK2-NEXT: [[TMP4:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG201]]
// CHECK2-NEXT: call void @.omp_outlined._debug__.9(i32* [[TMP2]], i32* [[TMP3]], i8*** [[TMP4]], i64 [[TMP1]]) #[[ATTR6]], !dbg [[DBG201]]
// CHECK2-NEXT: ret void, !dbg [[DBG201]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@main
@ -687,8 +765,6 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4
// CHECK3-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[LOADGEP_VLA]], i64 1
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4
// CHECK3-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]]
// CHECK3: omp.par.region.parallel.after:
// CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]]
// CHECK3: omp.par.pre_finalize:
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
@ -754,8 +830,6 @@ int main (int argc, char **argv) {
// CHECK3-NEXT: [[TMP5:%.*]] = mul nsw i64 0, [[TMP2]]
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP4]], i64 [[TMP5]]
// CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0
// CHECK3-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]]
// CHECK3: omp.par.region.parallel.after:
// CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]]
// CHECK3: omp.par.pre_finalize:
// CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
@ -829,9 +903,7 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG35]]
// CHECK4-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[LOADGEP_VLA]], i64 1, !dbg [[DBG35]]
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG35]]
// CHECK4-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]], !dbg [[DBG35]]
// CHECK4: omp.par.region.parallel.after:
// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]]
// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG35]]
// CHECK4: omp.par.pre_finalize:
// CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG35]]
// CHECK4: omp.par.outlined.exit.exitStub:
@ -899,9 +971,7 @@ int main (int argc, char **argv) {
// CHECK4-NEXT: [[TMP5:%.*]] = mul nsw i64 0, [[TMP2]], !dbg [[DBG66]]
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP4]], i64 [[TMP5]], !dbg [[DBG66]]
// CHECK4-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG66]]
// CHECK4-NEXT: br label [[OMP_PAR_REGION_PARALLEL_AFTER:%.*]], !dbg [[DBG67:![0-9]+]]
// CHECK4: omp.par.region.parallel.after:
// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]]
// CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG67:![0-9]+]]
// CHECK4: omp.par.pre_finalize:
// CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG67]]
// CHECK4: omp.par.outlined.exit.exitStub:

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
@ -55,526 +54,479 @@ void test_omp_parallel_master_copyin(int *a) {
// CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_copyin
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 1, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32** [[A_ADDR]], i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP2]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: ret void
//
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 1, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32** [[A_ADDR]], i32* [[TMP1]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP6:%.*]] = ptrtoint i32* [[TMP4]] to i64
// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i32* [[TMP5]] to i64
// CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP6]], [[TMP7]]
// CHECK-NEXT: br i1 [[TMP8]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: store i32 [[TMP9]], i32* [[TMP5]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP14]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP21:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP22]]
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
// CHECK-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP29]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
// CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP2]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
// CHECK-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP24]])
// CHECK-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_copyin
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 2, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32** [[A_ADDR]], i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP2]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 2, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32** [[A_ADDR]], i32* [[TMP1]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP6:%.*]] = ptrtoint i32* [[TMP4]] to i64
// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i32* [[TMP5]] to i64
// CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP6]], [[TMP7]]
// CHECK-NEXT: br i1 [[TMP8]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: store i32 [[TMP9]], i32* [[TMP5]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP14]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP21:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP22]]
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
// CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP2]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
// CHECK-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP24]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_simd_copyin
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 3, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32** [[A_ADDR]], i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP2]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 3, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32** [[A_ADDR]], i32* [[TMP1]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP6:%.*]] = ptrtoint i32* [[TMP4]] to i64
// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i32* [[TMP5]] to i64
// CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP6]], [[TMP7]]
// CHECK-NEXT: br i1 [[TMP8]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: store i32 [[TMP9]], i32* [[TMP5]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP14]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]]
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[TMP21:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP22]]
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]]
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP27]])
// CHECK-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
// CHECK-NEXT: br i1 [[TMP29]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK: .omp.final.then:
// CHECK-NEXT: store i32 100, i32* [[I]], align 4
// CHECK-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK: .omp.final.done:
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
// CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP2]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group
// CHECK-NEXT: [[TMP18:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, !llvm.access.group
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
// CHECK-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP0]], align 8, !llvm.access.group
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD2]], i32* [[ARRAYIDX]], align 4, !llvm.access.group
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP24]])
// CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK: .omp.final.then:
// CHECK-NEXT: store i32 100, i32* [[I]], align 4
// CHECK-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK: .omp.final.done:
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_sections_copyin
// CHECK-SAME: (i32* noundef [[A:%.*]], i32* noundef [[B:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 4, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32** [[A_ADDR]], i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: store i32** [[B_ADDR]], i32*** [[TMP2]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK-NEXT: [[TMP4:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32* [[TMP4]], i32** [[TMP3]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noundef [[A:%.*]], i32* noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 4, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32**, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32** [[A_ADDR]], i32** [[B_ADDR]], i32* [[TMP1]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 2
// CHECK-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP8:%.*]] = ptrtoint i32* [[TMP6]] to i64
// CHECK-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[TMP7]] to i64
// CHECK-NEXT: [[TMP10:%.*]] = icmp ne i64 [[TMP8]], [[TMP9]]
// CHECK-NEXT: br i1 [[TMP10]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK-NEXT: store i32 [[TMP11]], i32* [[TMP7]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP13]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP15]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = icmp slt i32 [[TMP16]], 1
// CHECK-NEXT: [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP16]], i32 1
// CHECK-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK-NEXT: store i32 [[TMP19]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: switch i32 [[TMP22]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
// CHECK-NEXT: ]
// CHECK: .omp.sections.case:
// CHECK-NEXT: [[TMP23:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK-NEXT: [[TMP25:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK-NEXT: store i32 [[TMP24]], i32* [[TMP25]], align 4
// CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK: .omp.sections.case1:
// CHECK-NEXT: [[TMP26:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK-NEXT: [[TMP28:%.*]] = load i32*, i32** [[TMP4]], align 8
// CHECK-NEXT: store i32 [[TMP27]], i32* [[TMP28]], align 4
// CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK: .omp.sections.exit:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: [[TMP30:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB4]], i32 [[TMP31]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
// CHECK-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[TMP3]] to i64
// CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4]], [[TMP5]]
// CHECK-NEXT: br i1 [[TMP6]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK-NEXT: store i32 [[TMP7]], i32* [[TMP3]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]])
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = icmp slt i32 [[TMP12]], 1
// CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 1
// CHECK-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: switch i32 [[TMP18]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
// CHECK-NEXT: ]
// CHECK: .omp.sections.case:
// CHECK-NEXT: [[TMP19:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
// CHECK-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK-NEXT: store i32 [[TMP20]], i32* [[TMP21]], align 4
// CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK: .omp.sections.case1:
// CHECK-NEXT: [[TMP22:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
// CHECK-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK-NEXT: store i32 [[TMP23]], i32* [[TMP24]], align 4
// CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK: .omp.sections.exit:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP25]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP27]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_master_copyin
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 5, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK-NEXT: store i32** [[A_ADDR]], i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP2]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK-NEXT: ret void
//
// CHECK-SAME: (i32* noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: store i32 5, i32* [[TMP0]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32** [[A_ADDR]], i32* [[TMP1]])
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP6:%.*]] = ptrtoint i32* [[TMP4]] to i64
// CHECK-NEXT: [[TMP7:%.*]] = ptrtoint i32* [[TMP5]] to i64
// CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP6]], [[TMP7]]
// CHECK-NEXT: br i1 [[TMP8]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: store i32 [[TMP9]], i32* [[TMP5]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]])
// CHECK-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]])
// CHECK-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK-NEXT: br i1 [[TMP15]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK: omp_if.then:
// CHECK-NEXT: store i32 0, i32* [[I]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP16]], 100
// CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
// CHECK-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
// CHECK: for.end:
// CHECK-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]])
// CHECK-NEXT: br label [[OMP_IF_END]]
// CHECK: omp_if.end:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@_ZTW1x
// CHECK-SAME: () #[[ATTR5:[0-9]+]] comdat {
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: ret i32* [[TMP1]]
//
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
// CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK: copyin.not.master:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP2]], align 4
// CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK: copyin.not.master.end:
// CHECK-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]])
// CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP10]])
// CHECK-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK-NEXT: br i1 [[TMP12]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK: omp_if.then:
// CHECK-NEXT: store i32 0, i32* [[I]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP13]], 100
// CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @x)
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP16]]
// CHECK-NEXT: [[TMP17:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP17]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4
// CHECK-NEXT: br label [[FOR_COND]]
// CHECK: for.end:
// CHECK-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP10]])
// CHECK-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK: omp_if.end:
// CHECK-NEXT: ret void

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -38,143 +38,128 @@ int main() {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A]], i32** [[TMP0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A:%.*]] = alloca [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], align 4
// CHECK1-NEXT: [[A1:%.*]] = alloca [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_7:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A]], i32 0, i32 1
// CHECK1-NEXT: store i8 0, i8* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 9
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 1
// CHECK1-NEXT: store i8 0, i8* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP13]], 5
// CHECK1-NEXT: br i1 [[CMP2]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP11]], 5
// CHECK1-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1: if.then:
// CHECK1-NEXT: store i32 0, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP16:%.*]] = icmp sle i32 [[TMP15]], [[TMP14]]
// CHECK1-NEXT: br i1 [[TMP16]], label [[LP_COND_THEN:%.*]], label [[LP_COND_EXIT:%.*]]
// CHECK1-NEXT: store i32 0, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP12]]
// CHECK1-NEXT: br i1 [[TMP14]], label [[LP_COND_THEN:%.*]], label [[LP_COND_EXIT:%.*]]
// CHECK1: lp_cond_then:
// CHECK1-NEXT: store i32 [[TMP14]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP17]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: store i32 [[TMP12]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP15]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT]]
// CHECK1: lp_cond_exit:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 10)
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[I]], i32** [[TMP19]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP22:%.*]] = icmp sle i32 [[TMP21]], [[TMP20]]
// CHECK1-NEXT: br i1 [[TMP22]], label [[LP_COND_THEN3:%.*]], label [[LP_COND_EXIT4:%.*]]
// CHECK1: lp_cond_then3:
// CHECK1-NEXT: store i32 [[TMP20]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP23]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT4]]
// CHECK1: lp_cond_exit4:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP24]] monotonic, align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP28:%.*]] = icmp sle i32 [[TMP27]], [[TMP26]]
// CHECK1-NEXT: br i1 [[TMP28]], label [[LP_COND_THEN5:%.*]], label [[LP_COND_EXIT6:%.*]]
// CHECK1: lp_cond_then5:
// CHECK1-NEXT: store i32 [[TMP26]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP29]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT6]]
// CHECK1: lp_cond_exit6:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 10)
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_7]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_7]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[I]], i32** [[TMP31]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_7]])
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A]], i32 0, i32 1
// CHECK1-NEXT: [[TMP33:%.*]] = load i8, i8* [[TMP32]], align 4
// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i8 [[TMP33]], 0
// CHECK1-NEXT: br i1 [[TMP34]], label [[LPC_THEN:%.*]], label [[LPC_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 10)
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[TMP2]], i32* [[I]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP18:%.*]] = icmp sle i32 [[TMP17]], [[TMP16]]
// CHECK1-NEXT: br i1 [[TMP18]], label [[LP_COND_THEN4:%.*]], label [[LP_COND_EXIT5:%.*]]
// CHECK1: lp_cond_then4:
// CHECK1-NEXT: store i32 [[TMP16]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP19]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT5]]
// CHECK1: lp_cond_exit5:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP20]] monotonic, align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP24:%.*]] = icmp sle i32 [[TMP23]], [[TMP22]]
// CHECK1-NEXT: br i1 [[TMP24]], label [[LP_COND_THEN6:%.*]], label [[LP_COND_EXIT7:%.*]]
// CHECK1: lp_cond_then6:
// CHECK1-NEXT: store i32 [[TMP22]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP25]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT7]]
// CHECK1: lp_cond_exit7:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 10)
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP2]], i32* [[I]])
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL]], %struct.lasprivate.conditional* [[A1]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i8, i8* [[TMP26]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i8 [[TMP27]], 0
// CHECK1-NEXT: br i1 [[TMP28]], label [[LPC_THEN:%.*]], label [[LPC_DONE:%.*]]
// CHECK1: lpc.then:
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP37:%.*]] = icmp sle i32 [[TMP36]], [[TMP35]]
// CHECK1-NEXT: br i1 [[TMP37]], label [[LP_COND_THEN8:%.*]], label [[LP_COND_EXIT9:%.*]]
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP31:%.*]] = icmp sle i32 [[TMP30]], [[TMP29]]
// CHECK1-NEXT: br i1 [[TMP31]], label [[LP_COND_THEN8:%.*]], label [[LP_COND_EXIT9:%.*]]
// CHECK1: lp_cond_then8:
// CHECK1-NEXT: store i32 [[TMP35]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP38]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: store i32 [[TMP29]], i32* @.{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP32]], i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: br label [[LP_COND_EXIT9]]
// CHECK1: lp_cond_exit9:
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var)
// CHECK1-NEXT: br label [[LPC_DONE]]
// CHECK1: lpc.done:
// CHECK1-NEXT: br label [[IF_END]]
@ -183,70 +168,69 @@ int main() {
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP39]], 1
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP33]], 1
// CHECK1-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP6]])
// CHECK1-NEXT: br i1 [[TMP41]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP4]])
// CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
// CHECK1: .omp.lastprivate.then:
// CHECK1-NEXT: [[TMP42:%.*]] = load i32, i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: store i32 [[TMP42]], i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP43]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* @{{pl_cond[.].+[.|,]}} align 4
// CHECK1-NEXT: store i32 [[TMP36]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP37]], i32* [[TMP0]], align 4
// CHECK1-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
// CHECK1: .omp.lastprivate.done:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i32* [[A]] to i8*
// CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP11]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP12]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[A1]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[A1]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[A1]] to i8*
// CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP7]], i32 1, i64 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: store i32 [[ADD1]], i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP10]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP15]] monotonic, align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP12]] monotonic, align 4
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
@ -277,23 +261,22 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP5]] monotonic, align 4
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP2]] to %struct.lasprivate.conditional*
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], %struct.lasprivate.conditional* [[TMP7]], i32 0, i32 1
// CHECK1-NEXT: store atomic volatile i8 1, i8* [[TMP8]] unordered, align 1
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP2]] monotonic, align 4
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0]] to %struct.lasprivate.conditional*
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], %struct.lasprivate.conditional* [[TMP4]], i32 0, i32 1
// CHECK1-NEXT: store atomic volatile i8 1, i8* [[TMP5]] unordered, align 1
// CHECK1-NEXT: ret void
//

View File

@ -93,22 +93,17 @@ int main() {
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK1-NEXT: [[PVAR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[LVAR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TEST]], i32 0, i32 0
// CHECK1-NEXT: store float* [[F]], float** [[PVAR]], align 8
// CHECK1-NEXT: store i64 0, i64* [[LVAR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store float** [[PVAR]], float*** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i64* [[LVAR]], i64** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64*)* @.omp_outlined. to void (i32*, i32*, ...)*), float** [[PVAR]], i64* [[LVAR]])
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
// CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP0]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
@ -122,11 +117,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], float** noundef nonnull align 8 dereferenceable(8) [[PVAR:%.*]], i64* noundef nonnull align 8 dereferenceable(8) [[LVAR:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[PVAR_ADDR:%.*]] = alloca float**, align 8
// CHECK1-NEXT: [[LVAR_ADDR:%.*]] = alloca i64*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca float*, align 8
@ -136,90 +132,88 @@ int main() {
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[PVAR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[LVAR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[PVAR2:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[LVAR3:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i64*, i64** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load float*, float** [[TMP2]], align 8
// CHECK1-NEXT: store float* [[TMP5]], float** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 8
// CHECK1-NEXT: store i64 [[TMP6]], i64* [[DOTLINEAR_START1]], align 8
// CHECK1-NEXT: store float** [[PVAR]], float*** [[PVAR_ADDR]], align 8
// CHECK1-NEXT: store i64* [[LVAR]], i64** [[LVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[PVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i64*, i64** [[LVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[TMP0]], align 8
// CHECK1-NEXT: store float* [[TMP2]], float** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[DOTLINEAR_START1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP16]], 3
// CHECK1-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL3]] to i64
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDX_EXT]]
// CHECK1-NEXT: store float* [[ADD_PTR]], float** [[PVAR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTLINEAR_START1]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[TMP18]], 3
// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[MUL4]] to i64
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP17]], [[CONV]]
// CHECK1-NEXT: store i64 [[ADD5]], i64* [[LVAR]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load float*, float** [[PVAR]], align 8
// CHECK1-NEXT: [[ADD_PTR6:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 3
// CHECK1-NEXT: store float* [[ADD_PTR6]], float** [[PVAR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i64, i64* [[LVAR]], align 8
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP20]], 3
// CHECK1-NEXT: store i64 [[ADD7]], i64* [[LVAR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load float*, float** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP13]], 3
// CHECK1-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL5]] to i64
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDX_EXT]]
// CHECK1-NEXT: store float* [[ADD_PTR]], float** [[PVAR2]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START1]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 3
// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[MUL6]] to i64
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
// CHECK1-NEXT: store i64 [[ADD7]], i64* [[LVAR3]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load float*, float** [[PVAR2]], align 8
// CHECK1-NEXT: [[ADD_PTR8:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 3
// CHECK1-NEXT: store float* [[ADD_PTR8]], float** [[PVAR2]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[LVAR3]], align 8
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP17]], 3
// CHECK1-NEXT: store i64 [[ADD9]], i64* [[LVAR3]], align 8
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK1-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
// CHECK1-NEXT: br i1 [[TMP20]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK1: .omp.linear.pu:
// CHECK1-NEXT: [[TMP24:%.*]] = load float*, float** [[PVAR]], align 8
// CHECK1-NEXT: store float* [[TMP24]], float** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[LVAR]], align 8
// CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP4]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = load float*, float** [[PVAR2]], align 8
// CHECK1-NEXT: store float* [[TMP21]], float** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, i64* [[LVAR3]], align 8
// CHECK1-NEXT: store i64 [[TMP22]], i64* [[TMP1]], align 8
// CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]]
// CHECK1: .omp.linear.pu.done:
// CHECK1-NEXT: ret void
@ -231,16 +225,11 @@ int main() {
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[PVAR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[LVAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TEST]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[F]], i32** [[PVAR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[LVAR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32** [[PVAR]], i32*** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[LVAR]], i32** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32** [[PVAR]], i32* [[LVAR]])
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: ret i32 0
//
@ -277,11 +266,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[PVAR:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[LVAR:%.*]]) #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[PVAR_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[LVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32*, align 8
@ -291,89 +281,87 @@ int main() {
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[PVAR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[LVAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[PVAR2:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[LVAR3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTLINEAR_START1]], align 4
// CHECK1-NEXT: store i32** [[PVAR]], i32*** [[PVAR_ADDR]], align 8
// CHECK1-NEXT: store i32* [[LVAR]], i32** [[LVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32**, i32*** [[PVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[LVAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP0]], align 8
// CHECK1-NEXT: store i32* [[TMP2]], i32** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[DOTLINEAR_START1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP9]], 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK1-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL3]] to i64
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP15]], i64 [[IDX_EXT]]
// CHECK1-NEXT: store i32* [[ADD_PTR]], i32** [[PVAR]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP17]], [[MUL4]]
// CHECK1-NEXT: store i32 [[ADD5]], i32* [[LVAR]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32*, i32** [[PVAR]], align 8
// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i32 1
// CHECK1-NEXT: store i32* [[INCDEC_PTR]], i32** [[PVAR]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[LVAR]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[LVAR]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTLINEAR_START]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP13]], 1
// CHECK1-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL5]] to i64
// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP12]], i64 [[IDX_EXT]]
// CHECK1-NEXT: store i32* [[ADD_PTR]], i32** [[PVAR2]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP14]], [[MUL6]]
// CHECK1-NEXT: store i32 [[ADD7]], i32* [[LVAR3]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32*, i32** [[PVAR2]], align 8
// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 1
// CHECK1-NEXT: store i32* [[INCDEC_PTR]], i32** [[PVAR2]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[LVAR3]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[LVAR3]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK1-NEXT: br i1 [[TMP23]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
// CHECK1-NEXT: br i1 [[TMP20]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK1: .omp.linear.pu:
// CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[PVAR]], align 8
// CHECK1-NEXT: store i32* [[TMP24]], i32** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[LVAR]], align 4
// CHECK1-NEXT: store i32 [[TMP25]], i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32*, i32** [[PVAR2]], align 8
// CHECK1-NEXT: store i32* [[TMP21]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[LVAR3]], align 4
// CHECK1-NEXT: store i32 [[TMP22]], i32* [[TMP1]], align 4
// CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]]
// CHECK1: .omp.linear.pu.done:
// CHECK1-NEXT: ret void
@ -429,11 +417,11 @@ int main() {
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK3-NEXT: [[G_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4
@ -442,77 +430,75 @@ int main() {
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[G1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[DOTLINEAR_START]], align 4
// CHECK3-NEXT: store i32* [[G]], i32** [[G_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]])
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP13]], 5
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[MUL2]]
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[G]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[G]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 5
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[G]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G]], i32** [[TMP15]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP11]], 5
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[MUL3]]
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[G1]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[G1]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 5
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[G1]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G1]], i32** [[TMP13]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK3-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
// CHECK3-NEXT: br i1 [[TMP18]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK3: .omp.linear.pu:
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[G]], align 4
// CHECK3-NEXT: store i32 [[TMP19]], i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[G1]], align 4
// CHECK3-NEXT: store i32 [[TMP17]], i32* [[TMP0]], align 4
// CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]]
// CHECK3: .omp.linear.pu.done:
// CHECK3-NEXT: ret void
@ -534,22 +520,19 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store i32* @g, i32** [[TMP0]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @g)
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4
@ -558,57 +541,55 @@ int main() {
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[G1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[DOTLINEAR_START]], align 4
// CHECK4-NEXT: store i32* [[G]], i32** [[G_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]])
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP13]], 5
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[MUL2]]
// CHECK4-NEXT: store i32 [[ADD3]], i32* [[G]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[G]], align 4
// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 5
// CHECK4-NEXT: store i32 [[ADD4]], i32* [[G]], align 4
// CHECK4-NEXT: store i32 1, i32* [[G]], align 4
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP11]], 5
// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[MUL3]]
// CHECK4-NEXT: store i32 [[ADD4]], i32* [[G1]], align 4
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[G1]], align 4
// CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 5
// CHECK4-NEXT: store i32 [[ADD5]], i32* [[G1]], align 4
// CHECK4-NEXT: store i32 1, i32* [[G1]], align 4
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0
// CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
// CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1
@ -620,33 +601,33 @@ int main() {
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[TMP15:%.*]] = load volatile i32, i32* [[G]], align 4
// CHECK4-NEXT: store volatile i32 [[TMP15]], i32* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP16]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP17]], align 8
// CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP20]](i8* noundef [[TMP18]])
// CHECK4-NEXT: [[TMP13:%.*]] = load volatile i32, i32* [[G1]], align 4
// CHECK4-NEXT: store volatile i32 [[TMP13]], i32* [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP14]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8* [[TMP17]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP18]](i8* noundef [[TMP16]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK4-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK4-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
// CHECK4-NEXT: br i1 [[TMP23]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK4-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
// CHECK4: .omp.linear.pu:
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[G]], align 4
// CHECK4-NEXT: store i32 [[TMP24]], i32* [[TMP2]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[G1]], align 4
// CHECK4-NEXT: store i32 [[TMP22]], i32* [[TMP0]], align 4
// CHECK4-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]]
// CHECK4: .omp.linear.pu.done:
// CHECK4-NEXT: ret void

View File

@ -40,297 +40,291 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i8** [[TMP2]], i8*** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8** [[TMP0]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP27:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP28:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP5]], i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP7]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP8]], i64 9
// CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP9]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP14:%.*]] = add nuw i64 [[TMP13]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP16:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP14]], align 16
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP17]]
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP5]], i64 9
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1
// CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP13:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP13]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint i8* [[TMP19]] to i64
// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP23]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = ptrtoint i8* [[TMP16]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP19:%.*]] = sub i64 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: [[TMP20:%.*]] = sdiv exact i64 [[TMP19]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP20]]
// CHECK1-NEXT: store i8** [[_TMP6]], i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[_TMP6]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i32* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP33]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP34]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP36]], i64 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i64 0
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = sext i32 [[TMP38]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP39]]
// CHECK1-NEXT: [[TMP40:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP40]], i64 9
// CHECK1-NEXT: [[TMP41:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP41]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP35]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP45:%.*]] = sub i64 [[TMP43]], [[TMP44]]
// CHECK1-NEXT: [[TMP46:%.*]] = sdiv exact i64 [[TMP45]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP47:%.*]] = add nuw i64 [[TMP46]], 1
// CHECK1-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32, i32* [[TMP54]], align 4
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP57:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP55]], i32 1, i32 2, i8* [[TMP56]])
// CHECK1-NEXT: store i8* [[TMP57]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i32, i32* [[TMP58]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP59]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP60:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP60]], 9
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i32* [[TMP30]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8*, i8** [[TMP33]], i64 0
// CHECK1-NEXT: [[TMP34:%.*]] = load i8*, i8** [[ARRAYIDX8]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, i8* [[TMP34]], i64 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP36]]
// CHECK1-NEXT: [[TMP37:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8*, i8** [[TMP37]], i64 9
// CHECK1-NEXT: [[TMP38:%.*]] = load i8*, i8** [[ARRAYIDX11]], align 8
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, i8* [[TMP38]], i64 [[LB_ADD_LEN10]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX9]], i8** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint i8* [[ARRAYIDX12]] to i64
// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint i8* [[ARRAYIDX9]] to i64
// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]]
// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1
// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP45]], i64* [[TMP46]], align 8
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_7]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
// CHECK1-NEXT: [[TMP53:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP54:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, i8* [[TMP53]])
// CHECK1-NEXT: store i8* [[TMP54]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = load i32, i32* [[TMP55]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
// CHECK1-NEXT: [[TMP57:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP57]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP61]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP58]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP62]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP59]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP64:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP12:%.*]] = icmp sle i64 [[TMP63]], [[TMP64]]
// CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1-NEXT: [[TMP60:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP60]], [[TMP61]]
// CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
// CHECK1: omp.inner.for.cond.cleanup:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP65:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP65]], 1
// CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP62]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]]
// CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP66]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP67]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP69:%.*]] = load i8**, i8*** [[_TMP4]], align 8
// CHECK1-NEXT: store i8** [[TMP69]], i8*** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP70:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP71:%.*]] = load i32, i32* [[TMP70]], align 4
// CHECK1-NEXT: [[TMP72:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP71]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP73:%.*]] = bitcast i8* [[TMP72]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP73]], i32 0, i32 0
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP74]], i32 0, i32 0
// CHECK1-NEXT: [[TMP76:%.*]] = load i8*, i8** [[TMP75]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP76]], i8* align 8 [[TMP77]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP73]], i32 0, i32 1
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP78]], i32 0, i32 0
// CHECK1-NEXT: [[TMP80:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP80]], i8** [[TMP79]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = load i32, i32* [[TMP81]], align 4
// CHECK1-NEXT: [[TMP83:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP82]], i8* [[TMP72]])
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP63]], align 8
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP64]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP66:%.*]] = load i8**, i8*** [[_TMP5]], align 8
// CHECK1-NEXT: store i8** [[TMP66]], i8*** [[TMP65]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = load i32, i32* [[TMP67]], align 4
// CHECK1-NEXT: [[TMP69:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP68]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP70:%.*]] = bitcast i8* [[TMP69]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP70]], i32 0, i32 0
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP71]], i32 0, i32 0
// CHECK1-NEXT: [[TMP73:%.*]] = load i8*, i8** [[TMP72]], align 8
// CHECK1-NEXT: [[TMP74:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP73]], i8* align 8 [[TMP74]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP70]], i32 0, i32 1
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP75]], i32 0, i32 0
// CHECK1-NEXT: [[TMP77:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP77]], i8** [[TMP76]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP79:%.*]] = load i32, i32* [[TMP78]], align 4
// CHECK1-NEXT: [[TMP80:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP79]], i8* [[TMP69]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP84:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[TMP84]], 1
// CHECK1-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP81]], 1
// CHECK1-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: [[TMP85:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP86:%.*]] = load i32, i32* [[TMP85]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP86]])
// CHECK1-NEXT: [[TMP87:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP88:%.*]] = load i32, i32* [[TMP87]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP88]], i32 1)
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP90:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: [[TMP82:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i32, i32* [[TMP82]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP83]])
// CHECK1-NEXT: [[TMP84:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[TMP84]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP85]], i32 1)
// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP87:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP87]], i8** [[TMP86]], align 8
// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP88]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP90:%.*]] = inttoptr i64 [[TMP11]] to i8*
// CHECK1-NEXT: store i8* [[TMP90]], i8** [[TMP89]], align 8
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP91]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP93:%.*]] = inttoptr i64 [[TMP14]] to i8*
// CHECK1-NEXT: store i8* [[TMP93]], i8** [[TMP92]], align 8
// CHECK1-NEXT: [[TMP94:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP95:%.*]] = load i32, i32* [[TMP94]], align 4
// CHECK1-NEXT: [[TMP96:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP97:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP95]], i32 2, i64 24, i8* [[TMP96]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP97]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP91:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = load i32, i32* [[TMP91]], align 4
// CHECK1-NEXT: [[TMP93:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP94:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP92]], i32 2, i64 24, i8* [[TMP93]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP94]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP98:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP99:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP98]], [[TMP99]]
// CHECK1-NEXT: store i32 [[ADD14]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP100]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP95:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP95]], [[TMP96]]
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP97]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST15:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP101:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP101]] to i32
// CHECK1-NEXT: [[TMP102:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP102]] to i32
// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[CONV]], [[CONV16]]
// CHECK1-NEXT: [[CONV18:%.*]] = trunc i32 [[ADD17]] to i8
// CHECK1-NEXT: store i8 [[CONV18]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT19]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST15]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP98:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP98]] to i32
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP99]] to i32
// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]]
// CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
// CHECK1-NEXT: store i8 [[CONV19]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP100]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done21:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP95]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP97]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done22:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP92]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP103:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP104:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP103]] monotonic, align 4
// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP105]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY22]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY23:%.*]]
// CHECK1: omp.arraycpy.body23:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST24:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST25:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP106:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP106]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]] monotonic, align 1
// CHECK1-NEXT: [[TMP100:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP101:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP100]] monotonic, align 4
// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP102]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]]
// CHECK1: omp.arraycpy.body24:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP103:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP103]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP107:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP112:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP107]], i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[TMP108:%.*]] = load i8, i8* [[_TMP27]], align 1
// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP108]] to i32
// CHECK1-NEXT: [[TMP109:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP109]] to i32
// CHECK1-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV28]], [[CONV29]]
// CHECK1-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i8
// CHECK1-NEXT: store i8 [[CONV31]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP110:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP111:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP107]], i8 [[TMP110]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP112]] = extractvalue { i8, i1 } [[TMP111]], 0
// CHECK1-NEXT: [[TMP113:%.*]] = extractvalue { i8, i1 } [[TMP111]], 1
// CHECK1-NEXT: br i1 [[TMP113]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP104:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP109:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP104]], i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[TMP105:%.*]] = load i8, i8* [[_TMP28]], align 1
// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP105]] to i32
// CHECK1-NEXT: [[TMP106:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1
// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP106]] to i32
// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]]
// CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8
// CHECK1-NEXT: store i8 [[CONV32]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP108:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP104]], i8 [[TMP107]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP109]] = extractvalue { i8, i1 } [[TMP108]], 0
// CHECK1-NEXT: [[TMP110:%.*]] = extractvalue { i8, i1 } [[TMP108]], 1
// CHECK1-NEXT: br i1 [[TMP110]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST24]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP105]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY23]]
// CHECK1: omp.arraycpy.done35:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP102]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]]
// CHECK1: omp.arraycpy.done36:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP114:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP114]])
// CHECK1-NEXT: [[TMP111:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP111]])
// CHECK1-NEXT: ret void
//
//
@ -442,7 +436,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -456,7 +450,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -469,29 +463,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -505,7 +499,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

View File

@ -25,20 +25,17 @@ void foo(float *c) {
// CHECK1-SAME: (float* noundef [[C:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store float** [[C_ADDR]], float*** [[TMP0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**)* @.omp_outlined. to void (i32*, i32*, ...)*), float** [[C_ADDR]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], float** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
@ -48,58 +45,56 @@ void foo(float *c) {
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[TMP2]], align 8
// CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP3]], i64 16) ]
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load float*, float** [[TMP0]], align 8
// CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 16) ]
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: store i32 10, i32* [[I]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]

View File

@ -70,43 +70,35 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@_Z9gtid_testv
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined..1(i32* [[TMP3]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @.omp_outlined..1(i32* [[TMP2]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z9gtid_testv()
// CHECK1-NEXT: ret void
//
@ -115,32 +107,29 @@ int main() {
// CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 1
// CHECK1-NEXT: [[DOTTHREADID_TEMP_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined..3(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_1]]) #[[ATTR2]]
// CHECK1-NEXT: call void @.omp_outlined..3(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @Arg, align 4
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_3]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR4]], align 4
// CHECK1-NEXT: call void @.omp_outlined..4(i32* [[DOTTHREADID_TEMP_3]], i32* [[DOTBOUND_ZERO_ADDR4]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_2]]) #[[ATTR2]]
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR2]], align 4
// CHECK1-NEXT: call void @.omp_outlined..4(i32* [[DOTTHREADID_TEMP_1]], i32* [[DOTBOUND_ZERO_ADDR2]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
@ -150,43 +139,34 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn4v()
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn5v()
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn6v()
// CHECK1-NEXT: ret void
//
@ -195,32 +175,29 @@ int main() {
// CHECK1-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_5:%.*]], align 1
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_6:%.*]], align 1
// CHECK1-NEXT: [[DOTTHREADID_TEMP_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTHREADID_TEMP_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..5 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined..6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon.5* [[OMP_OUTLINED_ARG_AGG_1]]) #[[ATTR2]]
// CHECK1-NEXT: call void @.omp_outlined..6(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG_ADDR]], align 4
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.6*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), %struct.anon.6* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_3]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR4]], align 4
// CHECK1-NEXT: call void @.omp_outlined..7(i32* [[DOTTHREADID_TEMP_3]], i32* [[DOTBOUND_ZERO_ADDR4]], %struct.anon.6* [[OMP_OUTLINED_ARG_AGG_2]]) #[[ATTR2]]
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR2]], align 4
// CHECK1-NEXT: call void @.omp_outlined..7(i32* [[DOTTHREADID_TEMP_1]], i32* [[DOTBOUND_ZERO_ADDR2]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
@ -228,43 +205,34 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn1v()
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.5* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.5*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.5* [[__CONTEXT]], %struct.anon.5** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.5*, %struct.anon.5** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn2v()
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.6* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.6*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.6* [[__CONTEXT]], %struct.anon.6** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.6*, %struct.anon.6** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: call void @_Z3fn3v()
// CHECK1-NEXT: ret void
//

View File

@ -18,45 +18,37 @@ void foo() {
// CHECK-LABEL: define {{[^@]+}}@_Z3foov
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA3:![0-9]+]]
// CHECK-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
// CHECK-NEXT: ret void
//
//
// CHECK: Function Attrs: noinline norecurse nounwind
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: ret void
//
//
// CHECK: Function Attrs: norecurse nounwind
// CHECK: Function Attrs: alwaysinline norecurse nounwind
// CHECK-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: ret void
//
//
@ -64,44 +56,36 @@ void foo() {
// CHECK-NOINLINE-LABEL: define {{[^@]+}}@_Z3foov
// CHECK-NOINLINE-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NOINLINE-NEXT: entry:
// CHECK-NOINLINE-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NOINLINE-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK-NOINLINE-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK-NOINLINE-NEXT: [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NOINLINE-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NOINLINE-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK-NOINLINE-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA3:![0-9]+]]
// CHECK-NOINLINE-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK-NOINLINE-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]) #[[ATTR2:[0-9]+]]
// CHECK-NOINLINE-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2:[0-9]+]]
// CHECK-NOINLINE-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK-NOINLINE-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
// CHECK-NOINLINE-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
// CHECK-NOINLINE-NEXT: ret void
//
//
// CHECK-NOINLINE: Function Attrs: noinline norecurse nounwind
// CHECK-NOINLINE-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK-NOINLINE-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NOINLINE-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NOINLINE-NEXT: entry:
// CHECK-NOINLINE-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NOINLINE-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NOINLINE-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NOINLINE-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8, !tbaa [[TBAA7:![0-9]+]]
// CHECK-NOINLINE-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NOINLINE-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NOINLINE-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NOINLINE-NEXT: ret void
//
//
// CHECK-NOINLINE: Function Attrs: noinline norecurse nounwind
// CHECK-NOINLINE: Function Attrs: alwaysinline norecurse nounwind
// CHECK-NOINLINE-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK-NOINLINE-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK-NOINLINE-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK-NOINLINE-NEXT: entry:
// CHECK-NOINLINE-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NOINLINE-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK-NOINLINE-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NOINLINE-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NOINLINE-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NOINLINE-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8, !tbaa [[TBAA7]]
// CHECK-NOINLINE-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NOINLINE-NEXT: ret void
//

View File

@ -296,39 +296,35 @@ void parallel_master_allocate() {
// CHECK1-LABEL: define {{[^@]+}}@_Z15parallel_masterv
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK1-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: invoke void @_Z3foov()
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: unreachable
//
//
@ -343,32 +339,28 @@ void parallel_master_allocate() {
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK5-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK5-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK5-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK5: omp_if.then:
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK5-NEXT: store i32 [[INC]], i32* [[A]], align 4
// CHECK5-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK5-NEXT: br label [[OMP_IF_END]]
// CHECK5: omp_if.end:
// CHECK5-NEXT: ret void
@ -378,35 +370,30 @@ void parallel_master_allocate() {
// CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK9-NEXT: store i32* [[A]], i32** [[TMP0]], align 8
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK9-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK9-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK9-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK9-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK9-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK9-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK9: omp_if.then:
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK9-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK9-NEXT: store i32 [[INC]], i32* [[TMP2]], align 4
// CHECK9-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK9-NEXT: [[INC:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK9-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK9-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK9-NEXT: br label [[OMP_IF_END]]
// CHECK9: omp_if.end:
// CHECK9-NEXT: ret void
@ -416,38 +403,35 @@ void parallel_master_allocate() {
// CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: store i32 [[TMP1]], i32* [[TMP0]], align 4
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK13-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[A]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK13-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK13-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK13-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK13-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK13-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK13: omp_if.then:
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK13-NEXT: store i32 [[INC]], i32* [[A]], align 4
// CHECK13-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK13-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK13-NEXT: store i32 [[INC]], i32* [[CONV]], align 4
// CHECK13-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK13-NEXT: br label [[OMP_IF_END]]
// CHECK13: omp_if.end:
// CHECK13-NEXT: ret void
@ -457,17 +441,18 @@ void parallel_master_allocate() {
// CHECK17-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK17-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK17-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[Y_CASTED1:%.*]] = alloca i64, align 8
// CHECK17-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[A]])
// CHECK17-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK17-NEXT: store %struct.St* [[A]], %struct.St** [[TMP0]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: store i32 [[TMP4]], i32* [[TMP3]], align 4
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[Y_CASTED]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[Y_CASTED1]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[Y_CASTED1]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.St*, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.St* [[A]], i64 [[TMP1]], i64 [[TMP3]])
// CHECK17-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR4:[0-9]+]]
// CHECK17-NEXT: ret void
//
@ -483,53 +468,49 @@ void parallel_master_allocate() {
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.St* noundef nonnull align 4 dereferenceable(8) [[A:%.*]], i64 noundef [[Y:%.*]], i64 noundef [[Y1:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK17-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[Y1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[Y_ADDR2:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A4:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK17-NEXT: [[TMP2:%.*]] = load %struct.St*, %struct.St** [[TMP1]], align 8
// CHECK17-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 8
// CHECK17-NEXT: store i32 [[TMP4]], i32* [[Y]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK17-NEXT: store i32 [[TMP6]], i32* [[Y1]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = bitcast %struct.St* [[A]] to i8*
// CHECK17-NEXT: [[TMP8:%.*]] = bitcast %struct.St* [[TMP2]] to i8*
// CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 8, i1 false)
// CHECK17-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK17-NEXT: br i1 [[TMP12]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK17-NEXT: store %struct.St* [[A]], %struct.St** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8
// CHECK17-NEXT: store i64 [[Y1]], i64* [[Y_ADDR2]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load %struct.St*, %struct.St** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[Y_ADDR2]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = bitcast %struct.St* [[A4]] to i8*
// CHECK17-NEXT: [[TMP2:%.*]] = bitcast %struct.St* [[TMP0]] to i8*
// CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP1]], i8* align 4 [[TMP2]], i64 8, i1 false)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK17-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK17-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 0
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[A2]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[A2]], align 4
// CHECK17-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 1
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK17-NEXT: store i32 [[ADD3]], i32* [[B]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[Y]], align 4
// CHECK17-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK17-NEXT: store i32 [[INC]], i32* [[Y]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK17-NEXT: store i32 [[INC4]], i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK17-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A4]], i32 0, i32 0
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[A5]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[A5]], align 4
// CHECK17-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A4]], i32 0, i32 1
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4
// CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK17-NEXT: store i32 [[ADD6]], i32* [[B]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK17-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK17-NEXT: store i32 [[INC]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: [[INC7:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK17-NEXT: store i32 [[INC7]], i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR4]]
// CHECK17-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[A4]]) #[[ATTR4]]
// CHECK17-NEXT: ret void
//
//
@ -569,38 +550,35 @@ void parallel_master_allocate() {
// CHECK21-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK21-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: store i32 [[TMP1]], i32* [[TMP0]], align 4
// CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK21-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK21-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK21-NEXT: store i32 [[TMP2]], i32* [[A]], align 4
// CHECK21-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK21-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK21-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK21-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK21-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK21-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK21-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK21: omp_if.then:
// CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK21-NEXT: store i32 [[INC]], i32* [[A]], align 4
// CHECK21-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK21-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK21-NEXT: store i32 [[INC]], i32* [[CONV]], align 4
// CHECK21-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK21-NEXT: br label [[OMP_IF_END]]
// CHECK21: omp_if.end:
// CHECK21-NEXT: ret void
@ -609,44 +587,40 @@ void parallel_master_allocate() {
// CHECK25-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
// CHECK25-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK25-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK25-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK25-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK25-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32*
// CHECK25-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[TMP4]] to i64
// CHECK25-NEXT: [[TMP6:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP5]]
// CHECK25-NEXT: br i1 [[TMP6]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK25-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK25-NEXT: [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
// CHECK25-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
// CHECK25-NEXT: [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP4]]
// CHECK25-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK25: copyin.not.master:
// CHECK25-NEXT: [[TMP7:%.*]] = load i32, i32* @a, align 4
// CHECK25-NEXT: store i32 [[TMP7]], i32* [[TMP4]], align 4
// CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK25-NEXT: store i32 [[TMP6]], i32* [[TMP3]], align 4
// CHECK25-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK25: copyin.not.master.end:
// CHECK25-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK25-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK25-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK25-NEXT: br i1 [[TMP9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK25-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK25-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK25-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK25-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK25: omp_if.then:
// CHECK25-NEXT: [[TMP10:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK25-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK25-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK25-NEXT: store i32 [[INC]], i32* [[TMP11]], align 4
// CHECK25-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK25-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
// CHECK25-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK25-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK25-NEXT: store i32 [[INC]], i32* [[TMP10]], align 4
// CHECK25-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK25-NEXT: br label [[OMP_IF_END]]
// CHECK25: omp_if.end:
// CHECK25-NEXT: ret void
@ -655,50 +629,45 @@ void parallel_master_allocate() {
// CHECK29-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
// CHECK29-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK29-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK29-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: store i32* [[TMP1]], i32** [[TMP0]], align 8
// CHECK29-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK29-NEXT: [[TMP0:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
// CHECK29-NEXT: ret void
//
//
// CHECK29-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK29-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK29-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK29-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK29-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK29-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK29-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK29-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK29-NEXT: [[TMP3:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP2]] to i64
// CHECK29-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[TMP3]] to i64
// CHECK29-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4]], [[TMP5]]
// CHECK29-NEXT: br i1 [[TMP6]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK29-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK29-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK29-NEXT: [[TMP1:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP0]] to i64
// CHECK29-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
// CHECK29-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
// CHECK29-NEXT: br i1 [[TMP4]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK29: copyin.not.master:
// CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK29-NEXT: store i32 [[TMP7]], i32* [[TMP3]], align 4
// CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK29-NEXT: store i32 [[TMP5]], i32* [[TMP1]], align 4
// CHECK29-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK29: copyin.not.master.end:
// CHECK29-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK29-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]])
// CHECK29-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK29-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]])
// CHECK29-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK29-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]])
// CHECK29-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
// CHECK29-NEXT: br i1 [[TMP13]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK29-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]])
// CHECK29-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK29-NEXT: br i1 [[TMP11]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK29: omp_if.then:
// CHECK29-NEXT: [[TMP14:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK29-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK29-NEXT: store i32 [[INC]], i32* [[TMP14]], align 4
// CHECK29-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]])
// CHECK29-NEXT: [[TMP12:%.*]] = call align 4 i32* @llvm.threadlocal.address.p0i32(i32* align 4 @a)
// CHECK29-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK29-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK29-NEXT: store i32 [[INC]], i32* [[TMP12]], align 4
// CHECK29-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]])
// CHECK29-NEXT: br label [[OMP_IF_END]]
// CHECK29: omp_if.end:
// CHECK29-NEXT: ret void

View File

@ -40,252 +40,246 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i8** [[TMP2]], i8*** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8** [[TMP0]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP23:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP24:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP5]], i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP7]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP8]], i64 9
// CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP9]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP14:%.*]] = add nuw i64 [[TMP13]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP16:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP14]], align 16
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP17]]
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP5]], i64 9
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1
// CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP13:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP13]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint i8* [[TMP19]] to i64
// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP23]]
// CHECK1-NEXT: store i8** [[_TMP4]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[_TMP4]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = ptrtoint i8* [[TMP16]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP19:%.*]] = sub i64 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: [[TMP20:%.*]] = sdiv exact i64 [[TMP19]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP20]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i32* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP33]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP34]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i8*, i8** [[TMP36]], i64 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i8*, i8** [[ARRAYIDX6]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i64 0
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = sext i32 [[TMP38]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN8:%.*]] = add nsw i64 -1, [[TMP39]]
// CHECK1-NEXT: [[TMP40:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8*, i8** [[TMP40]], i64 9
// CHECK1-NEXT: [[TMP41:%.*]] = load i8*, i8** [[ARRAYIDX9]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8, i8* [[TMP41]], i64 [[LB_ADD_LEN8]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP35]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX7]], i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint i8* [[ARRAYIDX10]] to i64
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[ARRAYIDX7]] to i64
// CHECK1-NEXT: [[TMP45:%.*]] = sub i64 [[TMP43]], [[TMP44]]
// CHECK1-NEXT: [[TMP46:%.*]] = sdiv exact i64 [[TMP45]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP47:%.*]] = add nuw i64 [[TMP46]], 1
// CHECK1-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32, i32* [[TMP54]], align 4
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP57:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP55]], i32 0, i32 2, i8* [[TMP56]])
// CHECK1-NEXT: store i8* [[TMP57]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i32, i32* [[TMP58]], align 4
// CHECK1-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP59]])
// CHECK1-NEXT: [[TMP61:%.*]] = icmp ne i32 [[TMP60]], 0
// CHECK1-NEXT: br i1 [[TMP61]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i32* [[TMP30]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP33]], i64 0
// CHECK1-NEXT: [[TMP34:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP34]], i64 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]]
// CHECK1-NEXT: [[TMP37:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP37]], i64 9
// CHECK1-NEXT: [[TMP38:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP38]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]]
// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1
// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP45]], i64* [[TMP46]], align 8
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
// CHECK1-NEXT: [[TMP53:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP54:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]], i32 0, i32 2, i8* [[TMP53]])
// CHECK1-NEXT: store i8* [[TMP54]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = load i32, i32* [[TMP55]], align 4
// CHECK1-NEXT: [[TMP57:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP56]])
// CHECK1-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
// CHECK1-NEXT: br i1 [[TMP58]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP62]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP63]], align 8
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP65:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP65]], i8*** [[TMP64]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP67:%.*]] = load i32, i32* [[TMP66]], align 4
// CHECK1-NEXT: [[TMP68:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP67]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP69:%.*]] = bitcast i8* [[TMP68]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP69]], i32 0, i32 0
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP70]], i32 0, i32 0
// CHECK1-NEXT: [[TMP72:%.*]] = load i8*, i8** [[TMP71]], align 8
// CHECK1-NEXT: [[TMP73:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP72]], i8* align 8 [[TMP73]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP69]], i32 0, i32 1
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP74]], i32 0, i32 0
// CHECK1-NEXT: [[TMP76:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP76]], i8** [[TMP75]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = load i32, i32* [[TMP77]], align 4
// CHECK1-NEXT: [[TMP79:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP78]], i8* [[TMP68]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP59]])
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP60]], align 8
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP62:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP62]], i8*** [[TMP61]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP64:%.*]] = load i32, i32* [[TMP63]], align 4
// CHECK1-NEXT: [[TMP65:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP64]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP66:%.*]] = bitcast i8* [[TMP65]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP66]], i32 0, i32 0
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP67]], i32 0, i32 0
// CHECK1-NEXT: [[TMP69:%.*]] = load i8*, i8** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP70:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP69]], i8* align 8 [[TMP70]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP66]], i32 0, i32 1
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 0
// CHECK1-NEXT: [[TMP73:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP73]], i8** [[TMP72]], align 8
// CHECK1-NEXT: [[TMP74:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP75:%.*]] = load i32, i32* [[TMP74]], align 4
// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP75]], i8* [[TMP65]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP56]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: [[TMP80:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load i32, i32* [[TMP80]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP81]], i32 0)
// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP83:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: [[TMP77:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = load i32, i32* [[TMP77]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP78]], i32 0)
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP80:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP80]], i8** [[TMP79]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP81]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP83:%.*]] = inttoptr i64 [[TMP11]] to i8*
// CHECK1-NEXT: store i8* [[TMP83]], i8** [[TMP82]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP84]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP86:%.*]] = inttoptr i64 [[TMP14]] to i8*
// CHECK1-NEXT: store i8* [[TMP86]], i8** [[TMP85]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP88:%.*]] = load i32, i32* [[TMP87]], align 4
// CHECK1-NEXT: [[TMP89:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP90:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP88]], i32 2, i64 24, i8* [[TMP89]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP90]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP84:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[TMP84]], align 4
// CHECK1-NEXT: [[TMP86:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP87:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP85]], i32 2, i64 24, i8* [[TMP86]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP87]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP91:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP92:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP91]], [[TMP92]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP93]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP88:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP88]], [[TMP89]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP90]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP94:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP94]] to i32
// CHECK1-NEXT: [[TMP95:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV12:%.*]] = sext i8 [[TMP95]] to i32
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV]], [[CONV12]]
// CHECK1-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
// CHECK1-NEXT: store i8 [[CONV14]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP91:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP91]] to i32
// CHECK1-NEXT: [[TMP92:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP92]] to i32
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]]
// CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8
// CHECK1-NEXT: store i8 [[CONV15]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP93]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done17:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP88]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP90]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done18:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP85]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP96]] monotonic, align 4
// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE31:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]]
// CHECK1: omp.arraycpy.body19:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT29:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT28:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV22:%.*]] = sext i8 [[TMP99]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]] monotonic, align 1
// CHECK1-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP94:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP93]] monotonic, align 4
// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP95]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]]
// CHECK1: omp.arraycpy.body20:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP96:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP96]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP100:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY19]] ], [ [[TMP105:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP100]], i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[TMP101:%.*]] = load i8, i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[CONV24:%.*]] = sext i8 [[TMP101]] to i32
// CHECK1-NEXT: [[TMP102:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP102]] to i32
// CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[CONV24]], [[CONV25]]
// CHECK1-NEXT: [[CONV27:%.*]] = trunc i32 [[ADD26]] to i8
// CHECK1-NEXT: store i8 [[CONV27]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP103:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP104:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i8 [[TMP100]], i8 [[TMP103]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP105]] = extractvalue { i8, i1 } [[TMP104]], 0
// CHECK1-NEXT: [[TMP106:%.*]] = extractvalue { i8, i1 } [[TMP104]], 1
// CHECK1-NEXT: br i1 [[TMP106]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP97:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP102:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP97]], i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[TMP98:%.*]] = load i8, i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP98]] to i32
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP99]] to i32
// CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]]
// CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8
// CHECK1-NEXT: store i8 [[CONV28]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP100:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP101:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP97]], i8 [[TMP100]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP102]] = extractvalue { i8, i1 } [[TMP101]], 0
// CHECK1-NEXT: [[TMP103:%.*]] = extractvalue { i8, i1 } [[TMP101]], 1
// CHECK1-NEXT: br i1 [[TMP103]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT28]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE30:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT28]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE30]], label [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_BODY19]]
// CHECK1: omp.arraycpy.done31:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP95]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]]
// CHECK1: omp.arraycpy.done32:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP107:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP107]])
// CHECK1-NEXT: [[TMP104:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP104]])
// CHECK1-NEXT: ret void
//
//
@ -397,7 +391,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -411,7 +405,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -424,29 +418,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -460,7 +454,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

View File

@ -57,112 +57,108 @@ struct S {
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_5:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED7:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED10:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_9:%.*]] = alloca [[STRUCT_ANON_7:%.*]], align 1
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[TMP2]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]])
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[TMP5]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_2]])
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32*
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP6]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_3]], align 1
// CHECK1-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_4]], align 1
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]], i32 0, i32 1
// CHECK1-NEXT: store i8*** [[ARGV_ADDR]], i8**** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]], i32 0, i32 2
// CHECK1-NEXT: [[TMP12:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_3]], align 1
// CHECK1-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP12]] to i1
// CHECK1-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[TOBOOL6]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL7]], i8* [[TMP11]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]], i32 0, i32 3
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: store i32 [[TMP14]], i32* [[TMP13]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_3]], align 1
// CHECK1-NEXT: [[TOBOOL8:%.*]] = trunc i8 [[TMP15]] to i1
// CHECK1-NEXT: br i1 [[TOBOOL8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_4]], align 1
// CHECK1-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1
// CHECK1-NEXT: [[CONV8:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED7]] to i8*
// CHECK1-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TOBOOL6]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL9]], i8* [[CONV8]], align 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED7]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[CONV11:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED10]] to i32*
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[CONV11]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED10]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_4]], align 1
// CHECK1-NEXT: [[TOBOOL12:%.*]] = trunc i8 [[TMP13]] to i1
// CHECK1-NEXT: br i1 [[TOBOOL12]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8***, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8*** [[ARGV_ADDR]], i64 [[TMP10]], i64 [[TMP12]])
// CHECK1-NEXT: br label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.else:
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_5]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i32* [[ARGC_ADDR]], i8*** [[ARGV_ADDR]], i64 [[TMP10]], i64 [[TMP12]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.7*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), %struct.anon.7* [[OMP_OUTLINED_ARG_AGG_9]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP16]]
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP14]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP9]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 4
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP11]] to i32*
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 9
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP17]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[TMP15]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP8]], i32 1, i64* [[TMP13]], i64* [[TMP14]], i64 [[TMP18]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 33, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 4
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP8]] to i32*
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 9
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP14]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP12]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP5]], i32 1, i64* [[TMP10]], i64* [[TMP11]], i64 [[TMP15]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
@ -181,7 +177,7 @@ struct S {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
@ -194,7 +190,7 @@ struct S {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 8
@ -221,8 +217,8 @@ struct S {
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
// CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP21]] to i32
// CHECK1-NEXT: store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
@ -245,51 +241,47 @@ struct S {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
// CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.3*)* @.omp_task_entry..4 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to %struct.kmp_task_t_with_privates.3*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], %struct.kmp_task_t_with_privates.3* [[TMP9]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP10]], i32 0, i32 9
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP15]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[TMP13]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP7]] to i64
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP8]], i32 1, i64* [[TMP11]], i64* [[TMP12]], i64 [[TMP16]], i32 1, i32 1, i64 [[TMP17]], i8* null)
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.1*)* @.omp_task_entry..4 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates.1*
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 9
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP12]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP4]] to i64
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP5]], i32 1, i64* [[TMP8]], i64* [[TMP9]], i64 [[TMP13]], i32 1, i32 1, i64 [[TMP14]], i8* null)
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry..4
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.3* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.1* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
@ -301,21 +293,21 @@ struct S {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.3*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.1*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.3* [[TMP1]], %struct.kmp_task_t_with_privates.3** [[DOTADDR1]], align 8
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.1* [[TMP1]], %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.3*, %struct.kmp_task_t_with_privates.3** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], %struct.kmp_task_t_with_privates.3* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.1*, %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.2*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.3* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.1* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
@ -341,8 +333,8 @@ struct S {
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTST__ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !28
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: store %struct.anon.2* [[TMP8]], %struct.anon.2** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !28
// CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP21]] to i32
// CHECK1-NEXT: store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !28
@ -365,109 +357,104 @@ struct S {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGV:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_5:%.*]], align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8***, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i8***, i8**** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i8, i8* [[TMP5]], align 8
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 3
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
// CHECK1-NEXT: br i1 [[TMP12]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8*** [[ARGV]], i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i8***, i8**** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
// CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP2]], i32** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i8*** [[TMP4]], i8**** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP18]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP21]] to i64
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, i8* [[TMP20]], i64 [[IDXPROM6]]
// CHECK1-NEXT: [[TMP22:%.*]] = load i8, i8* [[ARRAYIDX7]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP23]], 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i8*** [[TMP1]], i8**** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV3]], align 4
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_6]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP11]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP14]] to i64
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, i8* [[TMP13]], i64 [[IDXPROM8]]
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, i8* [[ARRAYIDX9]], align 1
// CHECK1-NEXT: [[CONV10:%.*]] = sext i8 [[TMP15]] to i32
// CHECK1-NEXT: store i32 [[CONV10]], i32* [[DOTCAPTURE_EXPR_7]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP16]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[CONV9:%.*]] = sext i32 [[DIV]] to i64
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP24]], [[TMP25]]
// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB11]], 1
// CHECK1-NEXT: [[DIV12:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[CONV13:%.*]] = zext i32 [[DIV12]] to i64
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV9]], [[CONV13]]
// CHECK1-NEXT: [[SUB14:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK1-NEXT: store i64 [[SUB14]], i64* [[DOTCAPTURE_EXPR_8]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 1, i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.6*)* @.omp_task_entry..7 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8* [[TMP26]] to %struct.kmp_task_t_with_privates.6*
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_6:%.*]], %struct.kmp_task_t_with_privates.6* [[TMP27]], i32 0, i32 0
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP28]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = load i8*, i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast %struct.anon.5* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP30]], i8* align 8 [[TMP31]], i64 16, i1 false)
// CHECK1-NEXT: [[TMP32:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TOBOOL15:%.*]] = trunc i8 [[TMP32]] to i1
// CHECK1-NEXT: [[TMP33:%.*]] = sext i1 [[TOBOOL15]] to i32
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP28]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP34]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP28]], i32 0, i32 6
// CHECK1-NEXT: [[TMP36:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_8]], align 8
// CHECK1-NEXT: store i64 [[TMP36]], i64* [[TMP35]], align 8
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP28]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP28]], i32 0, i32 9
// CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP39]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP40:%.*]] = load i64, i64* [[TMP37]], align 8
// CHECK1-NEXT: [[TMP41:%.*]] = zext i32 [[TMP15]] to i64
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i8* [[TMP26]], i32 [[TMP33]], i64* [[TMP34]], i64* [[TMP35]], i64 [[TMP40]], i32 1, i32 2, i64 [[TMP41]], i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
// CHECK1-NEXT: [[CONV12:%.*]] = sext i32 [[DIV]] to i64
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_7]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_6]], align 4
// CHECK1-NEXT: [[SUB13:%.*]] = sub i32 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: [[SUB14:%.*]] = sub i32 [[SUB13]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB14]], 1
// CHECK1-NEXT: [[DIV15:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[CONV16:%.*]] = zext i32 [[DIV15]] to i64
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV12]], [[CONV16]]
// CHECK1-NEXT: [[SUB17:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK1-NEXT: store i64 [[SUB17]], i64* [[DOTCAPTURE_EXPR_11]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 1, i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.3*)* @.omp_task_entry..7 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.kmp_task_t_with_privates.3*
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], %struct.kmp_task_t_with_privates.3* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP21]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = load i8*, i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.anon.2* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP23]], i8* align 8 [[TMP24]], i64 16, i1 false)
// CHECK1-NEXT: [[TMP25:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP25]] to i1
// CHECK1-NEXT: [[TMP26:%.*]] = sext i1 [[TOBOOL]] to i32
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP21]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP21]], i32 0, i32 6
// CHECK1-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_11]], align 8
// CHECK1-NEXT: store i64 [[TMP29]], i64* [[TMP28]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP21]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP21]], i32 0, i32 9
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP32]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP33:%.*]] = load i64, i64* [[TMP30]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i8* [[TMP19]], i32 [[TMP26]], i64* [[TMP27]], i64* [[TMP28]], i64 [[TMP33]], i32 1, i32 2, i64 [[TMP34]], i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry..7
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.6* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.3* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
@ -479,7 +466,7 @@ struct S {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.5*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3_I:%.*]] = alloca i32, align 4
@ -490,17 +477,17 @@ struct S {
// CHECK1-NEXT: [[J15_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.6*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.3*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.6* [[TMP1]], %struct.kmp_task_t_with_privates.6** [[DOTADDR1]], align 8
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.3* [[TMP1]], %struct.kmp_task_t_with_privates.3** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.6*, %struct.kmp_task_t_with_privates.6** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_6:%.*]], %struct.kmp_task_t_with_privates.6* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.3*, %struct.kmp_task_t_with_privates.3** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], %struct.kmp_task_t_with_privates.3* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.5*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.6* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.2*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.3* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
@ -526,26 +513,26 @@ struct S {
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTST__ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !40
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: store %struct.anon.5* [[TMP8]], %struct.anon.5** [[__CONTEXT_ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.5*, %struct.anon.5** [[__CONTEXT_ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON_5:%.*]], %struct.anon.5* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: store %struct.anon.2* [[TMP8]], %struct.anon.2** [[__CONTEXT_ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = load i32*, i32** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
// CHECK1-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !40
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP25:%.*]] = load i32*, i32** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK1-NEXT: store i32 [[TMP26]], i32* [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP28:%.*]] = load i8***, i8**** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = load i8**, i8*** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4
// CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP32]] to i64
// CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP29]], i64 [[IDXPROM_I]]
// CHECK1-NEXT: [[TMP33:%.*]] = load i8*, i8** [[ARRAYIDX_I]], align 8
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[TMP34]], align 8
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK1-NEXT: [[IDXPROM4_I:%.*]] = sext i32 [[TMP36]] to i64
@ -577,9 +564,9 @@ struct S {
// CHECK1: taskloop.if.then.i:
// CHECK1-NEXT: [[TMP45:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !40
// CHECK1-NEXT: store i64 [[TMP45]], i64* [[DOTOMP_IV_I]], align 8, !noalias !40
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP47:%.*]] = load i32*, i32** [[TMP46]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_ANON_5]], %struct.anon.5* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP49:%.*]] = load i8***, i8**** [[TMP48]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]]
// CHECK1: omp.inner.for.cond.i:
@ -628,47 +615,44 @@ struct S {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.7* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.7*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_8:%.*]], align 1
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.7* [[__CONTEXT]], %struct.anon.7** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.7*, %struct.anon.7** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK1-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.9*)* @.omp_task_entry..10 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates.9*
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], %struct.kmp_task_t_with_privates.9* [[TMP6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP7]], i32 0, i32 9
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP12]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* [[TMP5]], i32 1, i64* [[TMP8]], i64* [[TMP9]], i64 [[TMP13]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP4:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.5*)* @.omp_task_entry..10 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.kmp_task_t_with_privates.5*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 6
// CHECK1-NEXT: store i64 9, i64* [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 9
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP11]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[TMP9]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP4]], i32 1, i64* [[TMP7]], i64* [[TMP8]], i64 [[TMP12]], i32 1, i32 0, i64 0, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry..10
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.9* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.5* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
@ -680,22 +664,22 @@ struct S {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.4*, align 8
// CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[CLEANUP_DEST_SLOT_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.9*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.5*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.9* [[TMP1]], %struct.kmp_task_t_with_privates.9** [[DOTADDR1]], align 8
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.5* [[TMP1]], %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.9*, %struct.kmp_task_t_with_privates.9** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], %struct.kmp_task_t_with_privates.9* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.5*, %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.8*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.9* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.4*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.5* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
@ -721,8 +705,8 @@ struct S {
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTST__ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !52
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: store %struct.anon.8* [[TMP8]], %struct.anon.8** [[__CONTEXT_ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.8*, %struct.anon.8** [[__CONTEXT_ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: store %struct.anon.4* [[TMP8]], %struct.anon.4** [[__CONTEXT_ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !52
// CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP21]] to i32
// CHECK1-NEXT: store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !52
@ -790,7 +774,7 @@ struct S {
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_10:%.*]], align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[C]], i32* [[C_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
@ -798,96 +782,89 @@ struct S {
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_10]], %struct.anon.10* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_10]], %struct.anon.10* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[C_ADDR]], i32** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_10]], %struct.anon.10* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP4]] to i1
// CHECK1-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP1]] to i1
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
// CHECK1-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL2]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL3]], i8* [[TMP3]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.10*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.anon.10* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: store i8 [[FROMBOOL3]], i8* [[CONV]], align 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S*, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S* [[THIS1]], i32* [[C_ADDR]], i64 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.10* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S* noundef [[THIS:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.10*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_11:%.*]], align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_6:%.*]], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.10* [[__CONTEXT]], %struct.anon.10** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.10*, %struct.anon.10** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_10:%.*]], %struct.anon.10* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load %struct.S*, %struct.S** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_10]], %struct.anon.10* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_10]], %struct.anon.10* [[TMP0]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = load i8, i8* [[TMP5]], align 8
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
// CHECK1-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
// CHECK1-NEXT: br i1 [[TMP10]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
// CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_11]], %struct.anon.11* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store %struct.S* [[TMP2]], %struct.S** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_11]], %struct.anon.11* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP12]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[TMP13:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP13]] to i1
// CHECK1-NEXT: store i32* [[TMP]], i32** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP15]], 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], %struct.anon.6* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], %struct.anon.6* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP7]], align 8
// CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, i8* [[CONV]], align 1
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
// CHECK1-NEXT: store i32* [[TMP]], i32** [[_TMP1]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], 0
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = select i1 [[TOBOOL1]], i32 2, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = or i32 [[TMP16]], 1
// CHECK1-NEXT: [[TMP18:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 [[TMP17]], i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.12*)* @.omp_task_entry..13 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8* [[TMP18]] to %struct.kmp_task_t_with_privates.12*
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_12:%.*]], %struct.kmp_task_t_with_privates.12* [[TMP19]], i32 0, i32 0
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.anon.11* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP22]], i8* align 8 [[TMP23]], i64 16, i1 false)
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP20]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP20]], i32 0, i32 6
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[TMP26]] to i64
// CHECK1-NEXT: store i64 [[CONV]], i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP20]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP20]], i32 0, i32 9
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i8* [[TMP18]], i32 1, i64* [[TMP24]], i64* [[TMP25]], i64 [[TMP30]], i32 1, i32 2, i64 4, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]])
// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0
// CHECK1-NEXT: [[TMP12:%.*]] = or i32 [[TMP11]], 1
// CHECK1-NEXT: [[TMP13:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 [[TMP12]], i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.7*)* @.omp_task_entry..13 to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.kmp_task_t_with_privates.7*
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], %struct.kmp_task_t_with_privates.7* [[TMP14]], i32 0, i32 0
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP15]], i32 0, i32 0
// CHECK1-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast %struct.anon.6* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP17]], i8* align 8 [[TMP18]], i64 16, i1 false)
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP15]], i32 0, i32 5
// CHECK1-NEXT: store i64 0, i64* [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP15]], i32 0, i32 6
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK1-NEXT: [[CONV5:%.*]] = sext i32 [[TMP21]] to i64
// CHECK1-NEXT: store i64 [[CONV5]], i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP15]], i32 0, i32 7
// CHECK1-NEXT: store i64 1, i64* [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP15]], i32 0, i32 9
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP24]], i8 0, i64 8, i1 false)
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[TMP22]], align 8
// CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i8* [[TMP13]], i32 1, i64* [[TMP19]], i64* [[TMP20]], i64 [[TMP25]], i32 1, i32 2, i64 4, i8* null)
// CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry..13
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.12* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.7* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
@ -899,7 +876,7 @@ struct S {
// CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.11*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.6*, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP1_I:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4
@ -910,17 +887,17 @@ struct S {
// CHECK1-NEXT: [[TMP6_I:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.12*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.7*, align 8
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.12* [[TMP1]], %struct.kmp_task_t_with_privates.12** [[DOTADDR1]], align 8
// CHECK1-NEXT: store %struct.kmp_task_t_with_privates.7* [[TMP1]], %struct.kmp_task_t_with_privates.7** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.12*, %struct.kmp_task_t_with_privates.12** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_12:%.*]], %struct.kmp_task_t_with_privates.12* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.7*, %struct.kmp_task_t_with_privates.7** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], %struct.kmp_task_t_with_privates.7* [[TMP3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.11*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.12* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.6*
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates.7* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
@ -946,12 +923,12 @@ struct S {
// CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTST__ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !64
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: store %struct.anon.11* [[TMP8]], %struct.anon.11** [[__CONTEXT_ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.11*, %struct.anon.11** [[__CONTEXT_ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON_11:%.*]], %struct.anon.11* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: store %struct.anon.6* [[TMP8]], %struct.anon.6** [[__CONTEXT_ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: [[TMP20:%.*]] = load %struct.anon.6*, %struct.anon.6** [[__CONTEXT_ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON_6:%.*]], %struct.anon.6* [[TMP20]], i32 0, i32 0
// CHECK1-NEXT: [[TMP22:%.*]] = load %struct.S*, %struct.S** [[TMP21]], align 8
// CHECK1-NEXT: store i32* [[TMP_I]], i32** [[TMP1_I]], align 8, !noalias !64
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_11]], %struct.anon.11* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], %struct.anon.6* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// CHECK1-NEXT: store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64
@ -969,7 +946,7 @@ struct S {
// CHECK1-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !64
// CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP29]] to i32
// CHECK1-NEXT: store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !64
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_11]], %struct.anon.11* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], %struct.anon.6* [[TMP20]], i32 0, i32 1
// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]]
// CHECK1: omp.inner.for.cond.i:

File diff suppressed because it is too large Load Diff

View File

@ -177,7 +177,6 @@ int main() {
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN2SSC1ERi(%struct.SS* noundef nonnull align 8 dereferenceable(16) [[SS]], i32* noundef nonnull align 4 dereferenceable(4) @_ZZ4mainE5sivar)
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]])
@ -189,7 +188,7 @@ int main() {
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float noundef 2.000000e+00)
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]], float noundef 3.000000e+00)
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
// CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4:[0-9]+]]
@ -245,11 +244,10 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
@ -257,8 +255,6 @@ int main() {
// CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
@ -270,20 +266,20 @@ int main() {
// CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
// CHECK1: arrayctor.cont:
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[T_VAR]], align 4
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast %struct.S* [[VAR]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 4, i1 false)
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.S* [[VAR]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP1]], i8* align 4 [[TMP2]], i64 4, i1 false)
// CHECK1-NEXT: store i32 3, i32* [[SIVAR]], align 4
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN2]], i64 2
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN2]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP4]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN2]]
@ -306,37 +302,36 @@ int main() {
// CHECK1-SAME: () #[[ATTR5:[0-9]+]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_1:%.*]], align 4
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[SST:%.*]] = alloca [[STRUCT_SST:%.*]], align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 128
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.1], align 128
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_1]], align 128
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 128
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: call void @_ZN3SSTIiEC1Ev(%struct.SST* noundef nonnull align 4 dereferenceable(4) [[SST]])
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 128
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP0]], i8* align 128 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.1* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done1:
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
//
@ -346,7 +341,6 @@ int main() {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
@ -360,18 +354,16 @@ int main() {
// CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4
@ -379,23 +371,21 @@ int main() {
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[TMP1]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[TMP]], align 8
// CHECK1-NEXT: store i32* [[C]], i32** [[_TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK1-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[B]], align 4
// CHECK1-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP3]], -1
// CHECK1-NEXT: store i32 [[DEC]], i32* [[B]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP7]], 1
// CHECK1-NEXT: store i32 [[DIV]], i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 1
// CHECK1-NEXT: store i32 [[DIV]], i32* [[TMP4]], align 4
// CHECK1-NEXT: ret void
//
//
@ -434,12 +424,12 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: ret void
//
//
@ -454,81 +444,78 @@ int main() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 128
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.1], align 128
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_1:%.*]], align 128
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 128
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 128
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
// CHECK1: arrayctor.loop:
// CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.1* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
// CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYCTOR_CUR]], i64 1
// CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
// CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
// CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
// CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
// CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
// CHECK1: arrayctor.cont:
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 128
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[T_VAR]], align 128
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX]], align 128
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.S.1* [[ARRAYIDX1]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast %struct.S.1* [[VAR]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP2]], i8* align 128 [[TMP3]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN2:%.*]] = getelementptr inbounds [2 x %struct.S.1], [2 x %struct.S.1]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAY_BEGIN2]], i64 2
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX]], align 128
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast %struct.S.0* [[ARRAYIDX1]] to i8*
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP1]], i8* align 128 [[TMP2]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAY_BEGIN2:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN2]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.1* [ [[TMP4]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_1]], %struct.S.1* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.1* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN2]]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN2]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE3:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done3:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_1:%.*]], %struct.S.1* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32 0, i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
@ -537,64 +524,59 @@ int main() {
// CHECK1-SAME: (%struct.SST* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
// CHECK1-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SST:%.*]], %struct.SST* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store %struct.SST* [[THIS1]], %struct.SST** [[TMP0]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.SST* [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SST* noundef [[THIS:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load %struct.SST*, %struct.SST** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[DOTA__VOID_ADDR:%.*]] = call i8* @__kmpc_alloc(i32 [[TMP4]], i64 4, i8* inttoptr (i64 2 to i8*))
// CHECK1-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[DOTA__VOID_ADDR:%.*]] = call i8* @__kmpc_alloc(i32 [[TMP2]], i64 4, i8* inttoptr (i64 2 to i8*))
// CHECK1-NEXT: [[DOTA__ADDR:%.*]] = bitcast i8* [[DOTA__VOID_ADDR]] to i32*
// CHECK1-NEXT: store i32* [[DOTA__ADDR]], i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast i32* [[DOTA__ADDR]] to i8*
// CHECK1-NEXT: call void @__kmpc_free(i32 [[TMP4]], i8* [[TMP7]], i8* inttoptr (i64 2 to i8*))
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[DOTA__ADDR]] to i8*
// CHECK1-NEXT: call void @__kmpc_free(i32 [[TMP2]], i8* [[TMP5]], i8* inttoptr (i64 2 to i8*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_1:%.*]], %struct.S.1* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
// CHECK1-SAME: (%struct.S.1* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.1*, align 8
// CHECK1-NEXT: store %struct.S.1* [[THIS]], %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.1*, %struct.S.1** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
@ -628,7 +610,6 @@ int main() {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK3-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
@ -642,18 +623,16 @@ int main() {
// CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK3-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4
@ -662,22 +641,20 @@ int main() {
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[TMP1]], align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[A]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[C]], i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store %struct.SS* [[TMP2]], %struct.SS** [[TMP3]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[TMP5]], i32** [[TMP4]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[B]], i32** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
// CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: store i32* [[TMP8]], i32** [[TMP7]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store %struct.SS* [[TMP0]], %struct.SS** [[TMP1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[TMP3]], i32** [[TMP2]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[B]], i32** [[TMP4]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: store i32* [[TMP6]], i32** [[TMP5]], align 8
// CHECK3-NEXT: call void @_ZZN2SSC1ERiENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]])
// CHECK3-NEXT: ret void
//
@ -686,7 +663,6 @@ int main() {
// CHECK3-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(32) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8
// CHECK3-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK3-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0
@ -706,18 +682,16 @@ int main() {
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK3-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK3-NEXT: store %struct.SS* [[TMP1]], %struct.SS** [[TMP11]], align 8
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4
@ -725,46 +699,41 @@ int main() {
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[TMP1]], align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[A]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[C]], i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK3-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP1]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[B]], align 4
// CHECK3-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP3]], -1
// CHECK3-NEXT: store i32 [[DEC]], i32* [[B]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP7]], 1
// CHECK3-NEXT: store i32 [[DIV]], i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 1
// CHECK3-NEXT: store i32 [[DIV]], i32* [[TMP4]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
// CHECK3-NEXT: [[G:%.*]] = alloca i32, align 128
// CHECK3-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
// CHECK3-NEXT: store i32 1, i32* [[G]], align 128
// CHECK3-NEXT: store i32 2, i32* [[SIVAR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G]], i32** [[TMP1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: store i32* [[SIVAR]], i32** [[TMP2]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.3* noundef nonnull align 8 dereferenceable(16) [[REF_TMP]])
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G]], i32** [[TMP0]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: store i32* [[SIVAR]], i32** [[TMP1]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.1* noundef nonnull align 8 dereferenceable(16) [[REF_TMP]])
// CHECK3-NEXT: ret void
//
//
@ -799,27 +768,23 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK4-NEXT: [[G:%.*]] = alloca i32, align 128
// CHECK4-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>, align 128
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: store i32 1, i32* [[G]], align 128
// CHECK4-NEXT: store i32 20, i32* [[SIVAR]], align 4
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]], i32 0, i32 0
@ -833,18 +798,18 @@ int main() {
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* [[G]], align 128
// CHECK4-NEXT: store volatile i32 [[TMP1]], i32* [[BLOCK_CAPTURED]], align 128
// CHECK4-NEXT: [[TMP0:%.*]] = load volatile i32, i32* [[G]], align 128
// CHECK4-NEXT: store volatile i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 128
// CHECK4-NEXT: [[BLOCK_CAPTURED1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[BLOCK_CAPTURED1]], align 32
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP3]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP6:%.*]] = load i8*, i8** [[TMP4]], align 8
// CHECK4-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP7]](i8* noundef [[TMP5]])
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIVAR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[BLOCK_CAPTURED1]], align 32
// CHECK4-NEXT: [[TMP2:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, [92 x i8], i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP2]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP4:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP5:%.*]] = load i8*, i8** [[TMP3]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP6]](i8* noundef [[TMP4]])
// CHECK4-NEXT: ret void
//
//
@ -868,7 +833,6 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK4-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
@ -882,18 +846,16 @@ int main() {
// CHECK4-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK4-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR3]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4
@ -902,10 +864,8 @@ int main() {
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[TMP1]], align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[A]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[C]], i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 0
@ -919,23 +879,23 @@ int main() {
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.4 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED_THIS_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: store %struct.SS* [[TMP2]], %struct.SS** [[BLOCK_CAPTURED_THIS_ADDR]], align 8
// CHECK4-NEXT: store %struct.SS* [[TMP0]], %struct.SS** [[BLOCK_CAPTURED_THIS_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[TMP3]], i32** [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[TMP1]], i32** [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[BLOCK_CAPTURED2]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[BLOCK_CAPTURED2]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: store i32* [[TMP5]], i32** [[BLOCK_CAPTURED3]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP6]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP7]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP10]](i8* noundef [[TMP8]])
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: store i32* [[TMP3]], i32** [[BLOCK_CAPTURED3]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP4]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP5]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP8]](i8* noundef [[TMP6]])
// CHECK4-NEXT: ret void
//
//
@ -944,7 +904,6 @@ int main() {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>*, align 8
// CHECK4-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>** [[BLOCK_ADDR]], align 8
@ -964,18 +923,16 @@ int main() {
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 1
// CHECK4-NEXT: store i32 [[DIV]], i32* [[TMP3]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[TMP5]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.SS* [[THIS]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] {
// CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR3]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4
@ -983,22 +940,20 @@ int main() {
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[TMP1]], align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[A]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[C]], i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP5]], -1
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1
// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP1]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[B]], align 4
// CHECK4-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP3]], -1
// CHECK4-NEXT: store i32 [[DEC]], i32* [[B]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP7]], 1
// CHECK4-NEXT: store i32 [[DIV]], i32* [[TMP6]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 1
// CHECK4-NEXT: store i32 [[DIV]], i32* [[TMP4]], align 4
// CHECK4-NEXT: ret void
//

File diff suppressed because it is too large Load Diff

View File

@ -40,243 +40,237 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i8** [[TMP2]], i8*** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8** [[TMP0]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP23:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP24:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP5]], i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP7]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP8]], i64 9
// CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP9]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP14:%.*]] = add nuw i64 [[TMP13]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP16:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP14]], align 16
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP17]]
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP5]], i64 9
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1
// CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP13:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP13]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint i8* [[TMP19]] to i64
// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP23]]
// CHECK1-NEXT: store i8** [[_TMP4]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[_TMP4]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = ptrtoint i8* [[TMP16]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP19:%.*]] = sub i64 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: [[TMP20:%.*]] = sdiv exact i64 [[TMP19]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP20]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i32* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP33]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP34]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i8*, i8** [[TMP36]], i64 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i8*, i8** [[ARRAYIDX6]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i64 0
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = sext i32 [[TMP38]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN8:%.*]] = add nsw i64 -1, [[TMP39]]
// CHECK1-NEXT: [[TMP40:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8*, i8** [[TMP40]], i64 9
// CHECK1-NEXT: [[TMP41:%.*]] = load i8*, i8** [[ARRAYIDX9]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8, i8* [[TMP41]], i64 [[LB_ADD_LEN8]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP35]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX7]], i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint i8* [[ARRAYIDX10]] to i64
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[ARRAYIDX7]] to i64
// CHECK1-NEXT: [[TMP45:%.*]] = sub i64 [[TMP43]], [[TMP44]]
// CHECK1-NEXT: [[TMP46:%.*]] = sdiv exact i64 [[TMP45]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP47:%.*]] = add nuw i64 [[TMP46]], 1
// CHECK1-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32, i32* [[TMP54]], align 4
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP57:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP55]], i32 0, i32 2, i8* [[TMP56]])
// CHECK1-NEXT: store i8* [[TMP57]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP58]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP59]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP61:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP61]], i8*** [[TMP60]], align 8
// CHECK1-NEXT: [[TMP62:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP63:%.*]] = load i32, i32* [[TMP62]], align 4
// CHECK1-NEXT: [[TMP64:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP63]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP65:%.*]] = bitcast i8* [[TMP64]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP65]], i32 0, i32 0
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP66]], i32 0, i32 0
// CHECK1-NEXT: [[TMP68:%.*]] = load i8*, i8** [[TMP67]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP68]], i8* align 8 [[TMP69]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP65]], i32 0, i32 1
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP70]], i32 0, i32 0
// CHECK1-NEXT: [[TMP72:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP72]], i8** [[TMP71]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i32* [[TMP30]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP33]], i64 0
// CHECK1-NEXT: [[TMP34:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP34]], i64 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]]
// CHECK1-NEXT: [[TMP37:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP37]], i64 9
// CHECK1-NEXT: [[TMP38:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP38]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]]
// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1
// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP45]], i64* [[TMP46]], align 8
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
// CHECK1-NEXT: [[TMP53:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP54:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]], i32 0, i32 2, i8* [[TMP53]])
// CHECK1-NEXT: store i8* [[TMP54]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP55]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP56]], align 8
// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP58:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP58]], i8*** [[TMP57]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP59]], align 4
// CHECK1-NEXT: [[TMP61:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP60]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8* [[TMP61]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP62]], i32 0, i32 0
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP63]], i32 0, i32 0
// CHECK1-NEXT: [[TMP65:%.*]] = load i8*, i8** [[TMP64]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP65]], i8* align 8 [[TMP66]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP62]], i32 0, i32 1
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP67]], i32 0, i32 0
// CHECK1-NEXT: [[TMP69:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP69]], i8** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP70:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP71:%.*]] = load i32, i32* [[TMP70]], align 4
// CHECK1-NEXT: [[TMP72:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP71]], i8* [[TMP61]])
// CHECK1-NEXT: [[TMP73:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP74:%.*]] = load i32, i32* [[TMP73]], align 4
// CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP74]], i8* [[TMP64]])
// CHECK1-NEXT: [[TMP76:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = load i32, i32* [[TMP76]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP77]], i32 0)
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP79:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP74]], i32 0)
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP76:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP76]], i8** [[TMP75]], align 8
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP77]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP79:%.*]] = inttoptr i64 [[TMP11]] to i8*
// CHECK1-NEXT: store i8* [[TMP79]], i8** [[TMP78]], align 8
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP80]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP82:%.*]] = inttoptr i64 [[TMP14]] to i8*
// CHECK1-NEXT: store i8* [[TMP82]], i8** [[TMP81]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = load i32, i32* [[TMP83]], align 4
// CHECK1-NEXT: [[TMP85:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP86:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP84]], i32 2, i64 24, i8* [[TMP85]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP86]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP80:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP81:%.*]] = load i32, i32* [[TMP80]], align 4
// CHECK1-NEXT: [[TMP82:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP83:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP81]], i32 2, i64 24, i8* [[TMP82]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP83]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP87:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP88:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP87]], [[TMP88]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP89]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP84:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP84]], [[TMP85]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP86]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP90:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP90]] to i32
// CHECK1-NEXT: [[TMP91:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV12:%.*]] = sext i8 [[TMP91]] to i32
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV]], [[CONV12]]
// CHECK1-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
// CHECK1-NEXT: store i8 [[CONV14]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP87:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP87]] to i32
// CHECK1-NEXT: [[TMP88:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP88]] to i32
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]]
// CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8
// CHECK1-NEXT: store i8 [[CONV15]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP89]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done17:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP84]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP86]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done18:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP81]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP92:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP93:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP92]] monotonic, align 4
// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP94]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE31:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]]
// CHECK1: omp.arraycpy.body19:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT29:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT28:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP95:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV22:%.*]] = sext i8 [[TMP95]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]] monotonic, align 1
// CHECK1-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP90:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP89]] monotonic, align 4
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP91]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]]
// CHECK1: omp.arraycpy.body20:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP92:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP92]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP96:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY19]] ], [ [[TMP101:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP96]], i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[TMP97:%.*]] = load i8, i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[CONV24:%.*]] = sext i8 [[TMP97]] to i32
// CHECK1-NEXT: [[TMP98:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP98]] to i32
// CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[CONV24]], [[CONV25]]
// CHECK1-NEXT: [[CONV27:%.*]] = trunc i32 [[ADD26]] to i8
// CHECK1-NEXT: store i8 [[CONV27]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP100:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i8 [[TMP96]], i8 [[TMP99]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP101]] = extractvalue { i8, i1 } [[TMP100]], 0
// CHECK1-NEXT: [[TMP102:%.*]] = extractvalue { i8, i1 } [[TMP100]], 1
// CHECK1-NEXT: br i1 [[TMP102]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP93:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP98:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP93]], i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[TMP94:%.*]] = load i8, i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP94]] to i32
// CHECK1-NEXT: [[TMP95:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP95]] to i32
// CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]]
// CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8
// CHECK1-NEXT: store i8 [[CONV28]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP96:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP97:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP93]], i8 [[TMP96]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP98]] = extractvalue { i8, i1 } [[TMP97]], 0
// CHECK1-NEXT: [[TMP99:%.*]] = extractvalue { i8, i1 } [[TMP97]], 1
// CHECK1-NEXT: br i1 [[TMP99]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT28]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE30:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT28]], [[TMP94]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE30]], label [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_BODY19]]
// CHECK1: omp.arraycpy.done31:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP91]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]]
// CHECK1: omp.arraycpy.done32:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP103]])
// CHECK1-NEXT: [[TMP100:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP100]])
// CHECK1-NEXT: ret void
//
//
@ -388,7 +382,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -402,7 +396,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -415,29 +409,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -451,7 +445,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

View File

@ -57,19 +57,17 @@ int main() {
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
// CHECK1-NEXT: ret i32 [[CALL]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
@ -77,30 +75,28 @@ int main() {
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 1
// CHECK1-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 1
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
// CHECK1-NEXT: ]
@ -117,18 +113,18 @@ int main() {
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP10:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { i8*, i32 } [[TMP11]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP12]]) #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: [[TMP11:%.*]] = extractvalue { i8*, i32 } [[TMP10]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP11]]) #[[ATTR7:[0-9]+]]
// CHECK1-NEXT: unreachable
//
//
@ -142,17 +138,15 @@ int main() {
// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
@ -160,30 +154,28 @@ int main() {
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 0
// CHECK1-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 0
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0
// CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
@ -194,17 +186,17 @@ int main() {
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: ret void
// CHECK1: terminate.lpad:
// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: [[TMP10:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { i8*, i32 } [[TMP11]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP12]]) #[[ATTR7]]
// CHECK1-NEXT: [[TMP11:%.*]] = extractvalue { i8*, i32 } [[TMP10]], 0
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP11]]) #[[ATTR7]]
// CHECK1-NEXT: unreachable
//

View File

@ -40,285 +40,279 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: store i8** [[TMP2]], i8*** [[TMP1]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
// CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]], i8** [[TMP0]])
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[ARGC1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[_TMP5:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [2 x %struct.kmp_taskred_input_t], align 8
// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
// CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP23:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP24:%.*]] = alloca i8, align 1
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP5]], i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP7]]
// CHECK1-NEXT: [[TMP8:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8*, i8** [[TMP8]], i64 9
// CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[ARRAYIDX2]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[TMP9]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint i8* [[ARRAYIDX3]] to i64
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP14:%.*]] = add nuw i64 [[TMP13]], 1
// CHECK1-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP16:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP14]], align 16
// CHECK1-NEXT: store i64 [[TMP14]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP17]]
// CHECK1-NEXT: store i32 0, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]]
// CHECK1-NEXT: [[TMP5:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[TMP5]], i64 9
// CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[ARRAYIDX3]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i64 [[LB_ADD_LEN]]
// CHECK1-NEXT: [[TMP7:%.*]] = ptrtoint i8* [[ARRAYIDX4]] to i64
// CHECK1-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP9:%.*]] = sub i64 [[TMP7]], [[TMP8]]
// CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1
// CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP13:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP13]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i8* [[VLA]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i8 0, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint i8* [[TMP19]] to i64
// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP23]]
// CHECK1-NEXT: store i8** [[_TMP4]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[_TMP4]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = ptrtoint i8* [[TMP16]] to i64
// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint i8* [[ARRAYIDX2]] to i64
// CHECK1-NEXT: [[TMP19:%.*]] = sub i64 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: [[TMP20:%.*]] = sdiv exact i64 [[TMP19]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[VLA]], i64 [[TMP20]]
// CHECK1-NEXT: store i8** [[_TMP5]], i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8* [[TMP21]], i8** [[_TMP5]], align 8
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i32* [[TMP2]] to i8*
// CHECK1-NEXT: store i8* [[TMP28]], i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP33]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP34]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
// CHECK1-NEXT: [[TMP36:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i8*, i8** [[TMP36]], i64 0
// CHECK1-NEXT: [[TMP37:%.*]] = load i8*, i8** [[ARRAYIDX6]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i64 0
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = sext i32 [[TMP38]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN8:%.*]] = add nsw i64 -1, [[TMP39]]
// CHECK1-NEXT: [[TMP40:%.*]] = load i8**, i8*** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8*, i8** [[TMP40]], i64 9
// CHECK1-NEXT: [[TMP41:%.*]] = load i8*, i8** [[ARRAYIDX9]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8, i8* [[TMP41]], i64 [[LB_ADD_LEN8]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP35]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX7]], i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint i8* [[ARRAYIDX10]] to i64
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[ARRAYIDX7]] to i64
// CHECK1-NEXT: [[TMP45:%.*]] = sub i64 [[TMP43]], [[TMP44]]
// CHECK1-NEXT: [[TMP46:%.*]] = sdiv exact i64 [[TMP45]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP47:%.*]] = add nuw i64 [[TMP46]], 1
// CHECK1-NEXT: [[TMP48:%.*]] = mul nuw i64 [[TMP47]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32, i32* [[TMP54]], align 4
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP57:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP55]], i32 1, i32 2, i8* [[TMP56]])
// CHECK1-NEXT: store i8* [[TMP57]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP58:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP59:%.*]] = load i32, i32* [[TMP58]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP59]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP61:%.*]] = icmp slt i32 [[TMP60]], 0
// CHECK1-NEXT: [[TMP62:%.*]] = select i1 [[TMP61]], i32 [[TMP60]], i32 0
// CHECK1-NEXT: store i32 [[TMP62]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP63:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP63]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
// CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i32* [[TMP30]] to i8*
// CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 4, i1 false)
// CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8*, i8** [[TMP33]], i64 0
// CHECK1-NEXT: [[TMP34:%.*]] = load i8*, i8** [[ARRAYIDX7]], align 8
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP34]], i64 0
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]]
// CHECK1-NEXT: [[TMP37:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i8*, i8** [[TMP37]], i64 9
// CHECK1-NEXT: [[TMP38:%.*]] = load i8*, i8** [[ARRAYIDX10]], align 8
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[TMP38]], i64 [[LB_ADD_LEN9]]
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 1
// CHECK1-NEXT: store i8* [[ARRAYIDX8]], i8** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64
// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64
// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]]
// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1
// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 2
// CHECK1-NEXT: store i64 [[TMP45]], i64* [[TMP46]], align 8
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 3
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 4
// CHECK1-NEXT: store i8* null, i8** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 5
// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_6]], i32 0, i32 6
// CHECK1-NEXT: store i32 1, i32* [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[TMP51]], align 4
// CHECK1-NEXT: [[TMP53:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
// CHECK1-NEXT: [[TMP54:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, i8* [[TMP53]])
// CHECK1-NEXT: store i8* [[TMP54]], i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: [[TMP55:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP56:%.*]] = load i32, i32* [[TMP55]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP58:%.*]] = icmp slt i32 [[TMP57]], 0
// CHECK1-NEXT: [[TMP59:%.*]] = select i1 [[TMP58]], i32 [[TMP57]], i32 0
// CHECK1-NEXT: store i32 [[TMP59]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
// CHECK1-NEXT: store i32 [[TMP60]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP64:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP65:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP64]], [[TMP65]]
// CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP61]], [[TMP62]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP66]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: [[TMP63:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: switch i32 [[TMP63]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.sections.case:
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP67]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC]], i32** [[TMP68]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP70:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP70]], i8*** [[TMP69]], align 8
// CHECK1-NEXT: [[TMP71:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP72:%.*]] = load i32, i32* [[TMP71]], align 4
// CHECK1-NEXT: [[TMP73:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP72]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8* [[TMP73]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP74]], i32 0, i32 0
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP75]], i32 0, i32 0
// CHECK1-NEXT: [[TMP77:%.*]] = load i8*, i8** [[TMP76]], align 8
// CHECK1-NEXT: [[TMP78:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP77]], i8* align 8 [[TMP78]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP74]], i32 0, i32 1
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP79]], i32 0, i32 0
// CHECK1-NEXT: [[TMP81:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP81]], i8** [[TMP80]], align 8
// CHECK1-NEXT: [[TMP82:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP83:%.*]] = load i32, i32* [[TMP82]], align 4
// CHECK1-NEXT: [[TMP84:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP83]], i8* [[TMP73]])
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[TMP64]], align 8
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
// CHECK1-NEXT: store i32* [[ARGC1]], i32** [[TMP65]], align 8
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
// CHECK1-NEXT: [[TMP67:%.*]] = load i8**, i8*** [[TMP]], align 8
// CHECK1-NEXT: store i8** [[TMP67]], i8*** [[TMP66]], align 8
// CHECK1-NEXT: [[TMP68:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP69:%.*]] = load i32, i32* [[TMP68]], align 4
// CHECK1-NEXT: [[TMP70:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP69]], i32 1, i64 48, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
// CHECK1-NEXT: [[TMP71:%.*]] = bitcast i8* [[TMP70]] to %struct.kmp_task_t_with_privates*
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP71]], i32 0, i32 0
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP72]], i32 0, i32 0
// CHECK1-NEXT: [[TMP74:%.*]] = load i8*, i8** [[TMP73]], align 8
// CHECK1-NEXT: [[TMP75:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP74]], i8* align 8 [[TMP75]], i64 24, i1 false)
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP71]], i32 0, i32 1
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP76]], i32 0, i32 0
// CHECK1-NEXT: [[TMP78:%.*]] = load i8*, i8** [[DOTTASK_RED_]], align 8
// CHECK1-NEXT: store i8* [[TMP78]], i8** [[TMP77]], align 8
// CHECK1-NEXT: [[TMP79:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP80:%.*]] = load i32, i32* [[TMP79]], align 4
// CHECK1-NEXT: [[TMP81:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP80]], i8* [[TMP70]])
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
// CHECK1: .omp.sections.exit:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP85]], 1
// CHECK1-NEXT: [[TMP82:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP82]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: [[TMP86:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP87:%.*]] = load i32, i32* [[TMP86]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP87]])
// CHECK1-NEXT: [[TMP88:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = load i32, i32* [[TMP88]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP89]], i32 1)
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP91:%.*]] = bitcast i32* [[ARGC]] to i8*
// CHECK1-NEXT: [[TMP83:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP84:%.*]] = load i32, i32* [[TMP83]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP84]])
// CHECK1-NEXT: [[TMP85:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP86:%.*]] = load i32, i32* [[TMP85]], align 4
// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP86]], i32 1)
// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP88:%.*]] = bitcast i32* [[ARGC1]] to i8*
// CHECK1-NEXT: store i8* [[TMP88]], i8** [[TMP87]], align 8
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP89]], align 8
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP91:%.*]] = inttoptr i64 [[TMP11]] to i8*
// CHECK1-NEXT: store i8* [[TMP91]], i8** [[TMP90]], align 8
// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: store i8* [[VLA]], i8** [[TMP92]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP94:%.*]] = inttoptr i64 [[TMP14]] to i8*
// CHECK1-NEXT: store i8* [[TMP94]], i8** [[TMP93]], align 8
// CHECK1-NEXT: [[TMP95:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[TMP95]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP98:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP96]], i32 2, i64 24, i8* [[TMP97]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP98]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: [[TMP92:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP93:%.*]] = load i32, i32* [[TMP92]], align 4
// CHECK1-NEXT: [[TMP94:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP95:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP93]], i32 2, i64 24, i8* [[TMP94]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP95]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP99:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP100:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP99]], [[TMP100]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP101]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1-NEXT: [[TMP96:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP97:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP96]], [[TMP97]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP102:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP102]] to i32
// CHECK1-NEXT: [[TMP103:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV12:%.*]] = sext i8 [[TMP103]] to i32
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV]], [[CONV12]]
// CHECK1-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
// CHECK1-NEXT: store i8 [[CONV14]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP99:%.*]] = load i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP99]] to i32
// CHECK1-NEXT: [[TMP100:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1
// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP100]] to i32
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]]
// CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8
// CHECK1-NEXT: store i8 [[CONV15]], i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP101]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done17:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP96]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP98]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done18:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP93]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC]], align 4
// CHECK1-NEXT: [[TMP105:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP104]] monotonic, align 4
// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr i8, i8* [[ARRAYIDX1]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i8* [[ARRAYIDX1]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE31:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]]
// CHECK1: omp.arraycpy.body19:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT29:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i8* [ [[ARRAYIDX1]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT28:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV22:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]] monotonic, align 1
// CHECK1-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC1]], align 4
// CHECK1-NEXT: [[TMP102:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP101]] monotonic, align 4
// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr i8, i8* [[ARRAYIDX2]], i64 [[TMP11]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq i8* [[ARRAYIDX2]], [[TMP103]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]]
// CHECK1: omp.arraycpy.body20:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi i8* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi i8* [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP104:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP104]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP108:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY19]] ], [ [[TMP113:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP108]], i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[TMP109:%.*]] = load i8, i8* [[_TMP23]], align 1
// CHECK1-NEXT: [[CONV24:%.*]] = sext i8 [[TMP109]] to i32
// CHECK1-NEXT: [[TMP110:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP110]] to i32
// CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[CONV24]], [[CONV25]]
// CHECK1-NEXT: [[CONV27:%.*]] = trunc i32 [[ADD26]] to i8
// CHECK1-NEXT: store i8 [[CONV27]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP111:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP112:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i8 [[TMP108]], i8 [[TMP111]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP113]] = extractvalue { i8, i1 } [[TMP112]], 0
// CHECK1-NEXT: [[TMP114:%.*]] = extractvalue { i8, i1 } [[TMP112]], 1
// CHECK1-NEXT: br i1 [[TMP114]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1-NEXT: [[TMP105:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP110:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i8 [[TMP105]], i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[TMP106:%.*]] = load i8, i8* [[_TMP24]], align 1
// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP106]] to i32
// CHECK1-NEXT: [[TMP107:%.*]] = load i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1
// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP107]] to i32
// CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]]
// CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8
// CHECK1-NEXT: store i8 [[CONV28]], i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP108:%.*]] = load i8, i8* [[ATOMIC_TEMP]], align 1
// CHECK1-NEXT: [[TMP109:%.*]] = cmpxchg i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP105]], i8 [[TMP108]] monotonic monotonic, align 1
// CHECK1-NEXT: [[TMP110]] = extractvalue { i8, i1 } [[TMP109]], 0
// CHECK1-NEXT: [[TMP111:%.*]] = extractvalue { i8, i1 } [[TMP109]], 1
// CHECK1-NEXT: br i1 [[TMP111]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT28]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE30:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT28]], [[TMP106]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE30]], label [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_BODY19]]
// CHECK1: omp.arraycpy.done31:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, i8* [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, i8* [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq i8* [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP103]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]]
// CHECK1: omp.arraycpy.done32:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP115:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP115]])
// CHECK1-NEXT: [[TMP112:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP112]])
// CHECK1-NEXT: ret void
//
//
@ -430,7 +424,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
// CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
// CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
// CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP_I:%.*]] = alloca i8**, align 8
// CHECK1-NEXT: [[TMP4_I:%.*]] = alloca i8*, align 8
@ -444,7 +438,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
@ -457,29 +451,29 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i8***)*
// CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: [[TMP16:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP18]] to i8*
// CHECK1-NEXT: [[TMP22:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP19]], i8* [[TMP21]])
// CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP24:%.*]] = load i8**, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 1
// CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP26]], align 8
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]]
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP31:%.*]] = load i8**, i8*** [[TMP30]], align 8
// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds i8*, i8** [[TMP31]], i64 9
// CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[ARRAYIDX2_I]], align 8
@ -493,7 +487,7 @@ int main(int argc, char **argv) {
// CHECK1-NEXT: store i64 [[TMP37]], i64* @{{reduction_size[.].+[.]}}, align 8, !noalias !12
// CHECK1-NEXT: [[TMP39:%.*]] = load i8*, i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], i8* [[TMP39]], i8* [[TMP25]])
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
// CHECK1-NEXT: [[TMP42:%.*]] = load i8**, i8*** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = load i8*, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint i8* [[TMP43]] to i64

Some files were not shown because too many files have changed in this diff Show More