Reverting commit r290983 while debugging test failure on windows.

llvm-svn: 290989
This commit is contained in:
Arpith Chacko Jacob 2017-01-04 19:14:43 +00:00
parent 64cc2a0f18
commit b0d96f5375
3 changed files with 320 additions and 361 deletions

View File

@ -22,10 +22,14 @@ using namespace CodeGen;
namespace { namespace {
enum OpenMPRTLFunctionNVPTX { enum OpenMPRTLFunctionNVPTX {
/// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit); /// \brief Call to void __kmpc_kernel_init(kmp_int32 omp_handle,
/// kmp_int32 thread_limit);
OMPRTL_NVPTX__kmpc_kernel_init, OMPRTL_NVPTX__kmpc_kernel_init,
/// \brief Call to void __kmpc_kernel_deinit(); };
OMPRTL_NVPTX__kmpc_kernel_deinit,
// NVPTX Address space
enum AddressSpace {
AddressSpaceShared = 3,
}; };
} // namespace } // namespace
@ -66,15 +70,6 @@ static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
/// Synchronize all GPU threads in a block. /// Synchronize all GPU threads in a block.
static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); } static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
/// Get the value of the thread_limit clause in the teams directive.
/// The runtime encodes thread_limit in the launch parameter, always starting
/// thread_limit+warpSize threads per team.
static llvm::Value *getThreadLimit(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
"thread_limit");
}
/// Get the thread id of the OMP master thread. /// Get the thread id of the OMP master thread.
/// The master thread id is the first thread (lane) of the last warp in the /// The master thread id is the first thread (lane) of the last warp in the
/// GPU block. Warp size is assumed to be some power of 2. /// GPU block. Warp size is assumed to be some power of 2.
@ -108,105 +103,35 @@ void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage, CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage,
/* placeholder */ "_worker", &CGM.getModule()); /* placeholder */ "_worker", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI); CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
WorkerFn->setLinkage(llvm::GlobalValue::InternalLinkage);
WorkerFn->addFnAttr(llvm::Attribute::NoInline);
} }
void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D, void CGOpenMPRuntimeNVPTX::initializeEnvironment() {
StringRef ParentName, //
llvm::Function *&OutlinedFn, // Initialize master-worker control state in shared memory.
llvm::Constant *&OutlinedFnID, //
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
EntryFunctionState EST;
WorkerFunctionState WST(CGM);
// Emit target region as a standalone region. auto DL = CGM.getDataLayout();
class NVPTXPrePostActionTy : public PrePostActionTy { ActiveWorkers = new llvm::GlobalVariable(
CGOpenMPRuntimeNVPTX &RT; CGM.getModule(), CGM.Int32Ty, /*isConstant=*/false,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST; llvm::GlobalValue::CommonLinkage,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST; llvm::Constant::getNullValue(CGM.Int32Ty), "__omp_num_threads", 0,
llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
ActiveWorkers->setAlignment(DL.getPrefTypeAlignment(CGM.Int32Ty));
public: WorkID = new llvm::GlobalVariable(
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT, CGM.getModule(), CGM.Int64Ty, /*isConstant=*/false,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST, llvm::GlobalValue::CommonLinkage,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST) llvm::Constant::getNullValue(CGM.Int64Ty), "__tgt_work_id", 0,
: RT(RT), EST(EST), WST(WST) {} llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
void Enter(CodeGenFunction &CGF) override { WorkID->setAlignment(DL.getPrefTypeAlignment(CGM.Int64Ty));
RT.emitGenericEntryHeader(CGF, EST, WST);
}
void Exit(CodeGenFunction &CGF) override {
RT.emitGenericEntryFooter(CGF, EST);
}
} Action(*this, EST, WST);
CodeGen.setAction(Action);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
// Create the worker function
emitWorkerFunction(WST);
// Now change the name of the worker function to correspond to this target
// region's entry function.
WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
}
// Setup NVPTX threads for master-worker OpenMP scheme.
void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
auto *IsWorker =
Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
CGF.EmitBlock(WorkerBB);
CGF.EmitCallOrInvoke(WST.WorkerFn, llvm::None);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(MasterCheckBB);
auto *IsMaster =
Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
CGF.EmitBlock(MasterBB);
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
llvm::Value *Args[] = {getThreadLimit(CGF)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
}
void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(TerminateBB);
// Signal termination condition.
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), None);
// Barrier to terminate worker threads.
syncCTAThreads(CGF);
// Master thread jumps to exit point.
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
} }
void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) { void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
auto &Ctx = CGM.getContext(); auto &Ctx = CGM.getContext();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {}); CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
emitWorkerLoop(CGF, WST); emitWorkerLoop(CGF, WST);
CGF.FinishFunction(); CGF.FinishFunction();
@ -238,26 +163,21 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(AwaitBB); CGF.EmitBlock(AwaitBB);
// Wait for parallel work // Wait for parallel work
syncCTAThreads(CGF); syncCTAThreads(CGF);
Address WorkFn =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
Address ExecStatus =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
// TODO: Call into runtime to get parallel work.
// On termination condition (workid == 0), exit loop. // On termination condition (workid == 0), exit loop.
llvm::Value *ShouldTerminate = llvm::Value *ShouldTerminate = Bld.CreateICmpEQ(
Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate"); Bld.CreateAlignedLoad(WorkID, WorkID->getAlignment()),
llvm::Constant::getNullValue(WorkID->getType()->getElementType()),
"should_terminate");
Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB); Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
// Activate requested workers. // Activate requested workers.
CGF.EmitBlock(SelectWorkersBB); CGF.EmitBlock(SelectWorkersBB);
llvm::Value *IsActive = llvm::Value *ThreadID = getNVPTXThreadID(CGF);
Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active"); llvm::Value *ActiveThread = Bld.CreateICmpSLT(
Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB); ThreadID,
Bld.CreateAlignedLoad(ActiveWorkers, ActiveWorkers->getAlignment()),
"active_thread");
Bld.CreateCondBr(ActiveThread, ExecuteBB, BarrierBB);
// Signal start of parallel region. // Signal start of parallel region.
CGF.EmitBlock(ExecuteBB); CGF.EmitBlock(ExecuteBB);
@ -277,6 +197,72 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(ExitBB); CGF.EmitBlock(ExitBB);
} }
// Setup NVPTX threads for master-worker OpenMP scheme.
void CGOpenMPRuntimeNVPTX::emitEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
// Get the master thread id.
llvm::Value *MasterID = getMasterThreadID(CGF);
// Current thread's identifier.
llvm::Value *ThreadID = getNVPTXThreadID(CGF);
// Setup BBs in entry function.
llvm::BasicBlock *WorkerCheckBB = CGF.createBasicBlock(".check.for.worker");
llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
// The head (master thread) marches on while its body of companion threads in
// the warp go to sleep.
llvm::Value *ShouldDie =
Bld.CreateICmpUGT(ThreadID, MasterID, "excess_in_master_warp");
Bld.CreateCondBr(ShouldDie, EST.ExitBB, WorkerCheckBB);
// Select worker threads...
CGF.EmitBlock(WorkerCheckBB);
llvm::Value *IsWorker = Bld.CreateICmpULT(ThreadID, MasterID, "is_worker");
Bld.CreateCondBr(IsWorker, WorkerBB, MasterBB);
// ... and send to worker loop, awaiting parallel invocation.
CGF.EmitBlock(WorkerBB);
CGF.EmitCallOrInvoke(WST.WorkerFn, llvm::None);
CGF.EmitBranch(EST.ExitBB);
// Only master thread executes subsequent serial code.
CGF.EmitBlock(MasterBB);
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
llvm::Value *Args[] = {Bld.getInt32(/*OmpHandle=*/0), getNVPTXThreadID(CGF)};
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init),
Args);
}
void CGOpenMPRuntimeNVPTX::emitEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(TerminateBB);
// Signal termination condition.
Bld.CreateAlignedStore(
llvm::Constant::getNullValue(WorkID->getType()->getElementType()), WorkID,
WorkID->getAlignment());
// Barrier to terminate worker threads.
syncCTAThreads(CGF);
// Master thread jumps to exit point.
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
}
/// \brief Returns specified OpenMP runtime function for the current OpenMP /// \brief Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device. /// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function. /// \param Function OpenMP runtime function.
@ -286,20 +272,14 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
llvm::Constant *RTLFn = nullptr; llvm::Constant *RTLFn = nullptr;
switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) { switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
case OMPRTL_NVPTX__kmpc_kernel_init: { case OMPRTL_NVPTX__kmpc_kernel_init: {
// Build void __kmpc_kernel_init(kmp_int32 thread_limit); // Build void __kmpc_kernel_init(kmp_int32 omp_handle,
llvm::Type *TypeParams[] = {CGM.Int32Ty}; // kmp_int32 thread_limit);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int32Ty};
llvm::FunctionType *FnTy = llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init"); RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
break; break;
} }
case OMPRTL_NVPTX__kmpc_kernel_deinit: {
// Build void __kmpc_kernel_deinit();
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, {}, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
break;
}
} }
return RTLFn; return RTLFn;
} }
@ -335,14 +315,44 @@ void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
assert(!ParentName.empty() && "Invalid target region parent name!"); assert(!ParentName.empty() && "Invalid target region parent name!");
emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, EntryFunctionState EST;
CodeGen); WorkerFunctionState WST(CGM);
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeNVPTX &RT;
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: RT(RT), EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitEntryHeader(CGF, EST, WST);
}
void Exit(CodeGenFunction &CGF) override { RT.emitEntryFooter(CGF, EST); }
} Action(*this, EST, WST);
CodeGen.setAction(Action);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
// Create the worker function
emitWorkerFunction(WST);
// Now change the name of the worker function to correspond to this target
// region's entry function.
WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
} }
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM) CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM) { : CGOpenMPRuntime(CGM), ActiveWorkers(nullptr), WorkID(nullptr) {
if (!CGM.getLangOpts().OpenMPIsDevice) if (!CGM.getLangOpts().OpenMPIsDevice)
llvm_unreachable("OpenMP NVPTX can only handle device code."); llvm_unreachable("OpenMP NVPTX can only handle device code.");
// Called once per module during initialization.
initializeEnvironment();
} }
void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF, void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,

View File

@ -24,7 +24,7 @@ namespace clang {
namespace CodeGen { namespace CodeGen {
class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime { class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
private: public:
struct EntryFunctionState { struct EntryFunctionState {
llvm::BasicBlock *ExitBB = nullptr; llvm::BasicBlock *ExitBB = nullptr;
}; };
@ -40,21 +40,34 @@ private:
void createWorkerFunction(CodeGenModule &CGM); void createWorkerFunction(CodeGenModule &CGM);
}; };
/// \brief Helper for target entry function. Guide the master and worker
/// threads to their respective locations.
void emitEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
WorkerFunctionState &WST);
/// \brief Signal termination of OMP execution.
void emitEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
private:
//
// Private state and methods.
//
// Master-worker control state.
// Number of requested OMP threads in parallel region.
llvm::GlobalVariable *ActiveWorkers;
// Outlined function for the workers to execute.
llvm::GlobalVariable *WorkID;
/// \brief Initialize master-worker control state.
void initializeEnvironment();
/// \brief Emit the worker function for the current target region. /// \brief Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST); void emitWorkerFunction(WorkerFunctionState &WST);
/// \brief Helper for worker function. Emit body of worker loop. /// \brief Helper for worker function. Emit body of worker loop.
void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST); void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
/// \brief Helper for generic target entry function. Guide the master and
/// worker threads to their respective locations.
void emitGenericEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
WorkerFunctionState &WST);
/// \brief Signal termination of OMP execution for generic target entry
/// function.
void emitGenericEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
/// \brief Returns specified OpenMP runtime function for the current OpenMP /// \brief Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device. /// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function. /// \param Function OpenMP runtime function.
@ -70,20 +83,6 @@ private:
void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size) override; uint64_t Size) override;
/// \brief Emit outlined function specialized for the Fork-Join
/// programming model for applicable target directives on the NVPTX device.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitGenericKernel(const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// \brief Emit outlined function for 'target' directive on the NVPTX /// \brief Emit outlined function for 'target' directive on the NVPTX
/// device. /// device.
/// \param D Directive to emit. /// \param D Directive to emit.

View File

@ -8,6 +8,9 @@
#ifndef HEADER #ifndef HEADER
#define HEADER #define HEADER
// CHECK-DAG: [[OMP_NT:@.+]] = common addrspace(3) global i32 0
// CHECK-DAG: [[OMP_WID:@.+]] = common addrspace(3) global i64 0
template<typename tx, typename ty> template<typename tx, typename ty>
struct TT{ struct TT{
tx X; tx X;
@ -23,22 +26,19 @@ int foo(int n) {
double cn[5][n]; double cn[5][n];
TT<long long, char> d; TT<long long, char> d;
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l93}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l87}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -54,37 +54,31 @@ int foo(int n) {
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l93]]() // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l87]]()
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]] // CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] // CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T1]]_worker() // CHECK: {{call|invoke}} void [[T1]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]] // CHECK: br label {{%?}}[[TERM:.+]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: [[TERM]]
// CHECK: call void @__kmpc_kernel_deinit() // CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //
@ -99,22 +93,19 @@ int foo(int n) {
{ {
} }
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l173}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l158}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -130,41 +121,35 @@ int foo(int n) {
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l173]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]]) // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l158]](i[[SZ:32|64]] [[ARG1:%[^)]+]])
// CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]], // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
// CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]], // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
// CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16* // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]] // CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] // CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T2]]_worker() // CHECK: {{call|invoke}} void [[T3]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: load i16, i16* [[AA_CADDR]], // CHECK: load i16, i16* [[AA_CADDR]],
// CHECK: br label {{%?}}[[TERMINATE:.+]] // CHECK: br label {{%?}}[[TERM:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: [[TERM]]
// CHECK: call void @__kmpc_kernel_deinit() // CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //
@ -175,22 +160,19 @@ int foo(int n) {
aa += 1; aa += 1;
} }
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l285}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l261}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -206,7 +188,7 @@ int foo(int n) {
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l285]](i[[SZ]] // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+foo.+l261]](i[[SZ]]
// Create local storage for each capture. // Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]* // CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]*
@ -237,32 +219,26 @@ int foo(int n) {
// CHECK-DAG: [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]], // CHECK-DAG: [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]],
// CHECK-DAG: [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]], // CHECK-DAG: [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]],
// //
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]] // CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] // CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T3]]_worker() // CHECK: {{call|invoke}} void [[T4]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// //
// Use captures. // Use captures.
// CHECK-64-DAG: load i32, i32* [[REF_A]] // CHECK-64-DAG: load i32, i32* [[REF_A]]
@ -273,10 +249,10 @@ int foo(int n) {
// CHECK-DAG: getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}} // CHECK-DAG: getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}}
// CHECK-DAG: getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0 // CHECK-DAG: getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0
// //
// CHECK: br label {{%?}}[[TERMINATE:.+]] // CHECK: br label {{%?}}[[TERM:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: [[TERM]]
// CHECK: call void @__kmpc_kernel_deinit() // CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //
@ -362,22 +338,19 @@ int bar(int n){
return a; return a;
} }
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+322}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+l298}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -393,7 +366,7 @@ int bar(int n){
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l322]](i[[SZ]] // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+static.+l298]](i[[SZ]]
// Create local storage for each capture. // Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
@ -409,40 +382,36 @@ int bar(int n){
// CHECK-DAG: [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8* // CHECK-DAG: [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8*
// CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]], // CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
// //
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]] // CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] // CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T4]]_worker() // CHECK: {{call|invoke}} void [[T5]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]] //
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK-64-DAG: load i32, i32* [[REF_A]] // CHECK-64-DAG: load i32, i32* [[REF_A]]
// CHECK-32-DAG: load i32, i32* [[LOCAL_A]] // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
// CHECK-DAG: load i16, i16* [[REF_AA]] // CHECK-DAG: load i16, i16* [[REF_AA]]
// CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2 // CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
// CHECK: br label {{%?}}[[TERMINATE:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: br label {{%?}}[[TERM:.+]]
// CHECK: call void @__kmpc_kernel_deinit() //
// CHECK: [[TERM]]
// CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //
@ -451,22 +420,19 @@ int bar(int n){
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l340}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l316}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -482,7 +448,7 @@ int bar(int n){
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l340]]( // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+S1.+l316]](
// Create local storage for each capture. // Create local storage for each capture.
// CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]* // CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
// CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]]
@ -500,42 +466,35 @@ int bar(int n){
// CHECK-DAG: [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]], // CHECK-DAG: [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
// CHECK-DAG: [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]], // CHECK-DAG: [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
// CHECK-DAG: [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]], // CHECK-DAG: [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]],
// CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
// //
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[CHECK_WORKER]]
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T5]]_worker() // CHECK: {{call|invoke}} void [[T6]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// Use captures. // Use captures.
// CHECK-DAG: getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0 // CHECK-DAG: getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0
// CHECK-64-DAG:load i32, i32* [[REF_B]] // CHECK-64-DAG:load i32, i32* [[REF_B]]
// CHECK-32-DAG:load i32, i32* [[LOCAL_B]] // CHECK-32-DAG:load i32, i32* [[LOCAL_B]]
// CHECK-DAG: getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}} // CHECK-DAG: getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}}
// CHECK: br label {{%?}}[[TERMINATE:.+]] // CHECK: br label {{%?}}[[TERM:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: [[TERM]]
// CHECK: call void @__kmpc_kernel_deinit() // CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //
@ -544,22 +503,19 @@ int bar(int n){
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l305}}_worker() // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l281}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]] // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// //
// CHECK: [[AWAIT_WORK]] // CHECK: [[AWAIT_WORK]]
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]], // CHECK: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]] // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// //
// CHECK: [[SEL_WORKERS]] // CHECK: [[SEL_WORKERS]]
// CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]], // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0 // CHECK: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
// CHECK: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
// CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]] // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
// //
// CHECK: [[EXEC_PARALLEL]] // CHECK: [[EXEC_PARALLEL]]
@ -575,7 +531,7 @@ int bar(int n){
// CHECK: [[EXIT]] // CHECK: [[EXIT]]
// CHECK: ret void // CHECK: ret void
// CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l305]](i[[SZ]] // CHECK: define {{.*}}void [[T7:@__omp_offloading_.+template.+l281]](i[[SZ]]
// Create local storage for each capture. // Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
@ -588,42 +544,36 @@ int bar(int n){
// CHECK-DAG: [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16* // CHECK-DAG: [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
// CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]], // CHECK-DAG: [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
// //
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[A:%.+]] = sub i32 [[WS]], 1
// CHECK-DAG: [[TH_LIMIT:%.+]] = sub i32 [[NTH]], [[WS]] // CHECK: [[B:%.+]] = sub i32 [[NTID]], 1
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]] // CHECK: [[MID:%.+]] = and i32 [[B]],
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]] // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
// CHECK: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
//
// CHECK: [[CHECK_WORKER]]
// CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
// CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
// //
// CHECK: [[WORKER]] // CHECK: [[WORKER]]
// CHECK: {{call|invoke}} void [[T6]]_worker() // CHECK: {{call|invoke}} void [[T7]]_worker()
// CHECK: br label {{%?}}[[EXIT:.+]] // CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[CHECK_MASTER]]
// CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
// CHECK: [[CMTMP1:%.+]] = sub i32 [[CMWS]], 1
// CHECK: [[CMTMP2:%.+]] = sub i32 [[CMNTH]], 1
// CHECK: [[MID:%.+]] = and i32 [[CMTMP2]],
// CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], [[MID]]
// CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
// //
// CHECK: [[MASTER]] // CHECK: [[MASTER]]
// CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
// CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// //
// CHECK-64-DAG: load i32, i32* [[REF_A]] // CHECK-64-DAG: load i32, i32* [[REF_A]]
// CHECK-32-DAG: load i32, i32* [[LOCAL_A]] // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
// CHECK-DAG: load i16, i16* [[REF_AA]] // CHECK-DAG: load i16, i16* [[REF_AA]]
// CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2 // CHECK-DAG: getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
// //
// CHECK: br label {{%?}}[[TERMINATE:.+]] // CHECK: br label {{%?}}[[TERM:.+]]
// //
// CHECK: [[TERMINATE]] // CHECK: [[TERM]]
// CHECK: call void @__kmpc_kernel_deinit() // CHECK: store i64 0, i64 addrspace(3)* [[OMP_WID]],
// CHECK: call void @llvm.nvvm.barrier0() // CHECK: call void @llvm.nvvm.barrier0()
// CHECK: br label {{%?}}[[EXIT]] // CHECK: br label {{%?}}[[EXIT]]
// //