Reverting commit r290983 while debugging test failure on windows.

llvm-svn: 290989
This commit is contained in:
Arpith Chacko Jacob 2017-01-04 19:14:43 +00:00
parent 64cc2a0f18
commit b0d96f5375
3 changed files with 320 additions and 361 deletions

View File

@ -22,10 +22,14 @@ using namespace CodeGen;
namespace {
enum OpenMPRTLFunctionNVPTX {
/// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit);
/// \brief Call to void __kmpc_kernel_init(kmp_int32 omp_handle,
/// kmp_int32 thread_limit);
OMPRTL_NVPTX__kmpc_kernel_init,
/// \brief Call to void __kmpc_kernel_deinit();
OMPRTL_NVPTX__kmpc_kernel_deinit,
};
// NVPTX Address space
enum AddressSpace {
AddressSpaceShared = 3,
};
} // namespace
@ -66,15 +70,6 @@ static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
/// Synchronize all GPU threads in a block.
static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
/// Get the value of the thread_limit clause in the teams directive.
/// The runtime encodes thread_limit in the launch parameter, always starting
/// thread_limit+warpSize threads per team.
static llvm::Value *getThreadLimit(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
"thread_limit");
}
/// Get the thread id of the OMP master thread.
/// The master thread id is the first thread (lane) of the last warp in the
/// GPU block. Warp size is assumed to be some power of 2.
@ -108,105 +103,35 @@ void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage,
/* placeholder */ "_worker", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
WorkerFn->setLinkage(llvm::GlobalValue::InternalLinkage);
WorkerFn->addFnAttr(llvm::Attribute::NoInline);
}
void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
EntryFunctionState EST;
WorkerFunctionState WST(CGM);
void CGOpenMPRuntimeNVPTX::initializeEnvironment() {
//
// Initialize master-worker control state in shared memory.
//
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeNVPTX &RT;
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
auto DL = CGM.getDataLayout();
ActiveWorkers = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int32Ty, /*isConstant=*/false,
llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(CGM.Int32Ty), "__omp_num_threads", 0,
llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
ActiveWorkers->setAlignment(DL.getPrefTypeAlignment(CGM.Int32Ty));
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: RT(RT), EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitGenericEntryHeader(CGF, EST, WST);
}
void Exit(CodeGenFunction &CGF) override {
RT.emitGenericEntryFooter(CGF, EST);
}
} Action(*this, EST, WST);
CodeGen.setAction(Action);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
// Create the worker function
emitWorkerFunction(WST);
// Now change the name of the worker function to correspond to this target
// region's entry function.
WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
}
// Setup NVPTX threads for master-worker OpenMP scheme.
void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
auto *IsWorker =
Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
CGF.EmitBlock(WorkerBB);
CGF.EmitCallOrInvoke(WST.WorkerFn, llvm::None);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(MasterCheckBB);
auto *IsMaster =
Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
CGF.EmitBlock(MasterBB);
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
llvm::Value *Args[] = {getThreadLimit(CGF)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
}
void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(TerminateBB);
// Signal termination condition.
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), None);
// Barrier to terminate worker threads.
syncCTAThreads(CGF);
// Master thread jumps to exit point.
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
WorkID = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int64Ty, /*isConstant=*/false,
llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(CGM.Int64Ty), "__tgt_work_id", 0,
llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
WorkID->setAlignment(DL.getPrefTypeAlignment(CGM.Int64Ty));
}
void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
auto &Ctx = CGM.getContext();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
emitWorkerLoop(CGF, WST);
CGF.FinishFunction();
@ -238,26 +163,21 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(AwaitBB);
// Wait for parallel work
syncCTAThreads(CGF);
Address WorkFn =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
Address ExecStatus =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
// TODO: Call into runtime to get parallel work.
// On termination condition (workid == 0), exit loop.
llvm::Value *ShouldTerminate =
Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate");
llvm::Value *ShouldTerminate = Bld.CreateICmpEQ(
Bld.CreateAlignedLoad(WorkID, WorkID->getAlignment()),
llvm::Constant::getNullValue(WorkID->getType()->getElementType()),
"should_terminate");
Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
// Activate requested workers.
CGF.EmitBlock(SelectWorkersBB);
llvm::Value *IsActive =
Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
llvm::Value *ThreadID = getNVPTXThreadID(CGF);
llvm::Value *ActiveThread = Bld.CreateICmpSLT(
ThreadID,
Bld.CreateAlignedLoad(ActiveWorkers, ActiveWorkers->getAlignment()),
"active_thread");
Bld.CreateCondBr(ActiveThread, ExecuteBB, BarrierBB);
// Signal start of parallel region.
CGF.EmitBlock(ExecuteBB);
@ -277,6 +197,72 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(ExitBB);
}
// Setup NVPTX threads for master-worker OpenMP scheme.
void CGOpenMPRuntimeNVPTX::emitEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
// Get the master thread id.
llvm::Value *MasterID = getMasterThreadID(CGF);
// Current thread's identifier.
llvm::Value *ThreadID = getNVPTXThreadID(CGF);
// Setup BBs in entry function.
llvm::BasicBlock *WorkerCheckBB = CGF.createBasicBlock(".check.for.worker");
llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
// The head (master thread) marches on while its body of companion threads in
// the warp go to sleep.
llvm::Value *ShouldDie =
Bld.CreateICmpUGT(ThreadID, MasterID, "excess_in_master_warp");
Bld.CreateCondBr(ShouldDie, EST.ExitBB, WorkerCheckBB);
// Select worker threads...
CGF.EmitBlock(WorkerCheckBB);
llvm::Value *IsWorker = Bld.CreateICmpULT(ThreadID, MasterID, "is_worker");
Bld.CreateCondBr(IsWorker, WorkerBB, MasterBB);
// ... and send to worker loop, awaiting parallel invocation.
CGF.EmitBlock(WorkerBB);
CGF.EmitCallOrInvoke(WST.WorkerFn, llvm::None);
CGF.EmitBranch(EST.ExitBB);
// Only master thread executes subsequent serial code.
CGF.EmitBlock(MasterBB);
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
llvm::Value *Args[] = {Bld.getInt32(/*OmpHandle=*/0), getNVPTXThreadID(CGF)};
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init),
Args);
}
void CGOpenMPRuntimeNVPTX::emitEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(TerminateBB);
// Signal termination condition.
Bld.CreateAlignedStore(
llvm::Constant::getNullValue(WorkID->getType()->getElementType()), WorkID,
WorkID->getAlignment());
// Barrier to terminate worker threads.
syncCTAThreads(CGF);
// Master thread jumps to exit point.
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
}
/// \brief Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
@ -286,20 +272,14 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
llvm::Constant *RTLFn = nullptr;
switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
case OMPRTL_NVPTX__kmpc_kernel_init: {
// Build void __kmpc_kernel_init(kmp_int32 thread_limit);
llvm::Type *TypeParams[] = {CGM.Int32Ty};
// Build void __kmpc_kernel_init(kmp_int32 omp_handle,
// kmp_int32 thread_limit);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_deinit: {
// Build void __kmpc_kernel_deinit();
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, {}, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
break;
}
}
return RTLFn;
}
@ -335,14 +315,44 @@ void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
assert(!ParentName.empty() && "Invalid target region parent name!");
emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
EntryFunctionState EST;
WorkerFunctionState WST(CGM);
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeNVPTX &RT;
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: RT(RT), EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitEntryHeader(CGF, EST, WST);
}
void Exit(CodeGenFunction &CGF) override { RT.emitEntryFooter(CGF, EST); }
} Action(*this, EST, WST);
CodeGen.setAction(Action);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
// Create the worker function
emitWorkerFunction(WST);
// Now change the name of the worker function to correspond to this target
// region's entry function.
WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
}
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM) {
: CGOpenMPRuntime(CGM), ActiveWorkers(nullptr), WorkID(nullptr) {
if (!CGM.getLangOpts().OpenMPIsDevice)
llvm_unreachable("OpenMP NVPTX can only handle device code.");
// Called once per module during initialization.
initializeEnvironment();
}
void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,

View File

@ -24,7 +24,7 @@ namespace clang {
namespace CodeGen {
class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
private:
public:
struct EntryFunctionState {
llvm::BasicBlock *ExitBB = nullptr;
};
@ -40,21 +40,34 @@ private:
void createWorkerFunction(CodeGenModule &CGM);
};
/// \brief Helper for target entry function. Guide the master and worker
/// threads to their respective locations.
void emitEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
WorkerFunctionState &WST);
/// \brief Signal termination of OMP execution.
void emitEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
private:
//
// Private state and methods.
//
// Master-worker control state.
// Number of requested OMP threads in parallel region.
llvm::GlobalVariable *ActiveWorkers;
// Outlined function for the workers to execute.
llvm::GlobalVariable *WorkID;
/// \brief Initialize master-worker control state.
void initializeEnvironment();
/// \brief Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST);
/// \brief Helper for worker function. Emit body of worker loop.
void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
/// \brief Helper for generic target entry function. Guide the master and
/// worker threads to their respective locations.
void emitGenericEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
WorkerFunctionState &WST);
/// \brief Signal termination of OMP execution for generic target entry
/// function.
void emitGenericEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
/// \brief Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
@ -70,20 +83,6 @@ private:
void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size) override;
/// \brief Emit outlined function specialized for the Fork-Join
/// programming model for applicable target directives on the NVPTX device.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitGenericKernel(const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// \brief Emit outlined function for 'target' directive on the NVPTX
/// device.
/// \param D Directive to emit.

View File

@ -8,6 +8,9 @@
#ifndef HEADER
#define HEADER
// CHECK-DAG: [[OMP_NT:@.+]] = common addrspace(3) global i32 0
// CHECK-DAG: [[OMP_WID:@.+]] = common addrspace(3) global i64 0
template<typename tx, typename ty>
struct TT{
tx X;
@ -23,22 +26,19 @@ int foo(int n) {
double cn[5][n];
TT<long long, char> d;
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l93}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l87}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -54,37 +54,31 @@ int foo(int n) {
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l93]]()
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l87]]()
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T1]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//
@ -99,22 +93,19 @@ int foo(int n) {
{
}
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l173}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l158}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -130,41 +121,35 @@ int foo(int n) {
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l173]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]])
// CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l158]](i[[SZ:32|64]] [[ARG1:%[^)]+]])
// CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
// CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
// CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T2]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: {{call|invoke}} void [[T3]]_worker()
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: load i16, i16* [[AA_CADDR]],
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//
@ -175,22 +160,19 @@ int foo(int n) {
aa += 1;
}
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l285}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l261}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -206,7 +188,7 @@ int foo(int n) {
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l285]](i[[SZ]]
// CHECK: define {{.*}}void [[T4:@__omp_offloading_.+foo.+l261]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]*
@ -237,32 +219,26 @@ int foo(int n) {
// CHECK-DAG: [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]],
// CHECK-DAG: [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]],
//
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T3]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: {{call|invoke}} void [[T4]]_worker()
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
//
// Use captures.
// CHECK-64-DAG: load i32, i32* [[REF_A]]
@ -273,10 +249,10 @@ int foo(int n) {
// CHECK-DAG: getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}}
// CHECK-DAG: getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0
//
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//
@ -362,22 +338,19 @@ int bar(int n){
return a;
}
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+322}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+l298}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -393,7 +366,7 @@ int bar(int n){
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l322]](i[[SZ]]
// CHECK: define {{.*}}void [[T5:@__omp_offloading_.+static.+l298]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
@ -409,40 +382,36 @@ int bar(int n){
// CHECK-DAG: [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8*
// CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
//
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T4]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: {{call|invoke}} void [[T5]]_worker()
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
//
// CHECK-64-DAG: load i32, i32* [[REF_A]]
// CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
// CHECK-DAG: load i16, i16* [[REF_AA]]
// CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
// CHECK: br label {{%?}}[[TERMINATE:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//
@ -451,22 +420,19 @@ int bar(int n){
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l340}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l316}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -482,7 +448,7 @@ int bar(int n){
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l340]](
// CHECK: define {{.*}}void [[T6:@__omp_offloading_.+S1.+l316]](
// Create local storage for each capture.
// CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
// CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]]
@ -500,42 +466,35 @@ int bar(int n){
// CHECK-DAG: [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
// CHECK-DAG: [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
// CHECK-DAG: [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]],
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T5]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: {{call|invoke}} void [[T6]]_worker()
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// Use captures.
// CHECK-DAG: getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0
// CHECK-64-DAG:load i32, i32* [[REF_B]]
// CHECK-32-DAG:load i32, i32* [[LOCAL_B]]
// CHECK-DAG: getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}}
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//
@ -544,22 +503,19 @@ int bar(int n){
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l305}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l281}}_worker()
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
//
// CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
//
// CHECK: [[EXEC_PARALLEL]]
@ -575,7 +531,7 @@ int bar(int n){
// CHECK: [[EXIT]]
// CHECK: ret void
// CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l305]](i[[SZ]]
// CHECK: define {{.*}}void [[T7:@__omp_offloading_.+template.+l281]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
@ -588,42 +544,36 @@ int bar(int n){
// CHECK-DAG: [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
// CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
//
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
//
// CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T6]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// CHECK: {{call|invoke}} void [[T7]]_worker()
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
//
// CHECK-64-DAG: load i32, i32* [[REF_A]]
// CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
// CHECK-DAG: load i16, i16* [[REF_AA]]
// CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
//
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// CHECK: br label {{%?}}[[TERM:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit()
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]]
//