2014-05-06 18:08:46 +08:00
|
|
|
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This provides a class for OpenMP runtime code generation.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-14 00:25:19 +08:00
|
|
|
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
|
|
|
|
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
|
2014-05-06 18:08:46 +08:00
|
|
|
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
#include "CGValue.h"
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
#include "clang/AST/Type.h"
|
2014-12-15 15:07:06 +08:00
|
|
|
#include "clang/Basic/OpenMPKinds.h"
|
2015-01-14 19:29:14 +08:00
|
|
|
#include "clang/Basic/SourceLocation.h"
|
2014-05-06 18:08:46 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2016-02-02 22:24:21 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2014-09-22 18:01:53 +08:00
|
|
|
#include "llvm/ADT/StringMap.h"
|
2016-02-02 22:24:21 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2014-11-11 12:05:39 +08:00
|
|
|
#include "llvm/IR/ValueHandle.h"
|
2014-05-06 18:08:46 +08:00
|
|
|
|
2014-10-10 20:19:54 +08:00
|
|
|
namespace llvm {
|
|
|
|
class ArrayType;
|
|
|
|
class Constant;
|
|
|
|
class FunctionType;
|
2014-11-11 12:05:39 +08:00
|
|
|
class GlobalVariable;
|
2014-10-10 20:19:54 +08:00
|
|
|
class StructType;
|
|
|
|
class Type;
|
|
|
|
class Value;
|
|
|
|
} // namespace llvm
|
2014-10-08 22:01:46 +08:00
|
|
|
|
2014-10-10 20:19:54 +08:00
|
|
|
namespace clang {
|
2014-11-20 12:34:54 +08:00
|
|
|
class Expr;
|
2016-01-06 21:42:12 +08:00
|
|
|
class GlobalDecl;
|
2016-05-25 20:36:08 +08:00
|
|
|
class OMPDependClause;
|
2014-10-10 20:19:54 +08:00
|
|
|
class OMPExecutableDirective;
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
class OMPLoopDirective;
|
2014-10-10 20:19:54 +08:00
|
|
|
class VarDecl;
|
2016-03-04 17:22:22 +08:00
|
|
|
class OMPDeclareReductionDecl;
|
|
|
|
class IdentifierInfo;
|
2014-10-08 22:01:46 +08:00
|
|
|
|
2014-10-10 20:19:54 +08:00
|
|
|
namespace CodeGen {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
class Address;
|
2014-10-10 20:19:54 +08:00
|
|
|
class CodeGenFunction;
|
|
|
|
class CodeGenModule;
|
2014-05-06 18:08:46 +08:00
|
|
|
|
2016-03-29 13:34:15 +08:00
|
|
|
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
|
|
|
|
/// region.
|
|
|
|
class PrePostActionTy {
|
|
|
|
public:
|
|
|
|
explicit PrePostActionTy() {}
|
|
|
|
virtual void Enter(CodeGenFunction &CGF) {}
|
|
|
|
virtual void Exit(CodeGenFunction &CGF) {}
|
|
|
|
virtual ~PrePostActionTy() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Class provides a way to call simple version of codegen for OpenMP region, or
|
|
|
|
/// an advanced with possible pre|post-actions in codegen.
|
|
|
|
class RegionCodeGenTy final {
|
|
|
|
intptr_t CodeGen;
|
|
|
|
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
|
|
|
|
CodeGenTy Callback;
|
|
|
|
mutable PrePostActionTy *PrePostAction;
|
|
|
|
RegionCodeGenTy() = delete;
|
|
|
|
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
|
|
|
|
template <typename Callable>
|
|
|
|
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
|
|
|
|
PrePostActionTy &Action) {
|
|
|
|
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
template <typename Callable>
|
|
|
|
RegionCodeGenTy(
|
|
|
|
Callable &&CodeGen,
|
|
|
|
typename std::enable_if<
|
|
|
|
!std::is_same<typename std::remove_reference<Callable>::type,
|
|
|
|
RegionCodeGenTy>::value>::type * = nullptr)
|
|
|
|
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
|
|
|
|
Callback(CallbackFn<typename std::remove_reference<Callable>::type>),
|
|
|
|
PrePostAction(nullptr) {}
|
|
|
|
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
|
|
|
|
void operator()(CodeGenFunction &CGF) const;
|
|
|
|
};
|
2015-04-10 12:50:10 +08:00
|
|
|
|
2016-04-28 17:23:51 +08:00
|
|
|
struct OMPTaskDataTy final {
|
|
|
|
SmallVector<const Expr *, 4> PrivateVars;
|
|
|
|
SmallVector<const Expr *, 4> PrivateCopies;
|
|
|
|
SmallVector<const Expr *, 4> FirstprivateVars;
|
|
|
|
SmallVector<const Expr *, 4> FirstprivateCopies;
|
|
|
|
SmallVector<const Expr *, 4> FirstprivateInits;
|
2016-05-05 16:46:22 +08:00
|
|
|
SmallVector<const Expr *, 4> LastprivateVars;
|
|
|
|
SmallVector<const Expr *, 4> LastprivateCopies;
|
2017-07-17 21:30:36 +08:00
|
|
|
SmallVector<const Expr *, 4> ReductionVars;
|
|
|
|
SmallVector<const Expr *, 4> ReductionCopies;
|
|
|
|
SmallVector<const Expr *, 4> ReductionOps;
|
2016-04-28 17:23:51 +08:00
|
|
|
SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
|
|
|
|
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
|
|
|
|
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
|
2016-05-10 20:21:02 +08:00
|
|
|
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
|
2017-07-17 21:30:36 +08:00
|
|
|
llvm::Value *Reductions = nullptr;
|
2016-04-28 17:23:51 +08:00
|
|
|
unsigned NumberOfParts = 0;
|
|
|
|
bool Tied = true;
|
|
|
|
bool Nogroup = false;
|
|
|
|
};
|
|
|
|
|
2017-07-13 21:36:14 +08:00
|
|
|
/// Class intended to support codegen of all kind of the reduction clauses.
|
|
|
|
class ReductionCodeGen {
|
|
|
|
private:
|
2017-07-14 01:34:44 +08:00
|
|
|
/// Data required for codegen of reduction clauses.
|
2017-07-13 21:36:14 +08:00
|
|
|
struct ReductionData {
|
|
|
|
/// Reference to the original shared item.
|
|
|
|
const Expr *Ref = nullptr;
|
|
|
|
/// Helper expression for generation of private copy.
|
|
|
|
const Expr *Private = nullptr;
|
|
|
|
/// Helper expression for generation reduction operation.
|
|
|
|
const Expr *ReductionOp = nullptr;
|
|
|
|
ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
|
|
|
|
: Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
|
|
|
|
};
|
|
|
|
/// List of reduction-based clauses.
|
|
|
|
SmallVector<ReductionData, 4> ClausesData;
|
|
|
|
|
|
|
|
/// List of addresses of original shared variables/expressions.
|
|
|
|
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
|
|
|
|
/// Sizes of the reduction items in chars.
|
2017-07-17 21:30:36 +08:00
|
|
|
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
|
2017-07-13 21:36:14 +08:00
|
|
|
/// Base declarations for the reduction items.
|
|
|
|
SmallVector<const VarDecl *, 4> BaseDecls;
|
2017-07-17 21:30:36 +08:00
|
|
|
|
2017-07-13 21:36:14 +08:00
|
|
|
/// Emits lvalue for shared expresion.
|
|
|
|
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
|
|
|
|
/// Emits upper bound for shared expression (if array section).
|
|
|
|
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
|
|
|
|
/// Performs aggregate initialization.
|
|
|
|
/// \param N Number of reduction item in the common list.
|
|
|
|
/// \param PrivateAddr Address of the corresponding private item.
|
2017-07-17 21:30:36 +08:00
|
|
|
/// \param SharedLVal Address of the original shared variable.
|
|
|
|
/// \param DRD Declare reduction construct used for reduction item.
|
2017-07-13 21:36:14 +08:00
|
|
|
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
|
2017-07-17 21:30:36 +08:00
|
|
|
Address PrivateAddr, LValue SharedLVal,
|
|
|
|
const OMPDeclareReductionDecl *DRD);
|
2017-07-13 21:36:14 +08:00
|
|
|
|
|
|
|
public:
|
|
|
|
ReductionCodeGen(ArrayRef<const Expr *> Shareds,
|
|
|
|
ArrayRef<const Expr *> Privates,
|
|
|
|
ArrayRef<const Expr *> ReductionOps);
|
|
|
|
/// Emits lvalue for a reduction item.
|
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
|
|
|
|
/// Emits the code for the variable-modified type, if required.
|
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
|
|
|
|
/// Emits the code for the variable-modified type, if required.
|
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
/// \param Size Size of the type in chars.
|
|
|
|
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
|
|
|
|
/// Performs initialization of the private copy for the reduction item.
|
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
/// \param PrivateAddr Address of the corresponding private item.
|
|
|
|
/// \param DefaultInit Default initialization sequence that should be
|
|
|
|
/// performed if no reduction specific initialization is found.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param SharedLVal Address of the original shared variable.
|
2017-07-13 21:36:14 +08:00
|
|
|
void
|
|
|
|
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
|
|
|
|
LValue SharedLVal,
|
|
|
|
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
|
2017-07-14 01:34:44 +08:00
|
|
|
/// Returns true if the private copy requires cleanups.
|
2017-07-13 21:36:14 +08:00
|
|
|
bool needCleanups(unsigned N);
|
2017-07-14 01:34:44 +08:00
|
|
|
/// Emits cleanup code for the reduction item.
|
2017-07-13 21:36:14 +08:00
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
/// \param PrivateAddr Address of the corresponding private item.
|
|
|
|
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
|
2017-07-14 01:34:44 +08:00
|
|
|
/// Adjusts \p PrivatedAddr for using instead of the original variable
|
2017-07-13 21:36:14 +08:00
|
|
|
/// address in normal operations.
|
|
|
|
/// \param N Number of the reduction item.
|
|
|
|
/// \param PrivateAddr Address of the corresponding private item.
|
|
|
|
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
|
|
|
|
Address PrivateAddr);
|
|
|
|
/// Returns LValue for the reduction item.
|
|
|
|
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
|
2017-07-17 21:30:36 +08:00
|
|
|
/// Returns the size of the reduction item (in chars and total number of
|
|
|
|
/// elements in the item), or nullptr, if the size is a constant.
|
|
|
|
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
|
|
|
|
return Sizes[N];
|
|
|
|
}
|
2017-07-13 21:36:14 +08:00
|
|
|
/// Returns the base declaration of the reduction item.
|
|
|
|
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
|
2017-07-17 21:30:36 +08:00
|
|
|
/// Returns true if the initialization of the reduction item uses initializer
|
|
|
|
/// from declare reduction construct.
|
|
|
|
bool usesReductionInitializer(unsigned N) const;
|
2017-07-13 21:36:14 +08:00
|
|
|
};
|
|
|
|
|
2014-05-06 18:08:46 +08:00
|
|
|
class CGOpenMPRuntime {
|
2016-03-22 09:48:56 +08:00
|
|
|
protected:
|
2014-05-06 18:08:46 +08:00
|
|
|
CodeGenModule &CGM;
|
2016-03-22 09:48:56 +08:00
|
|
|
|
|
|
|
/// \brief Creates offloading entry for the provided entry ID \a ID,
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
/// address \a Addr, size \a Size, and flags \a Flags.
|
2016-03-22 09:48:56 +08:00
|
|
|
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
uint64_t Size, int32_t Flags = 0);
|
2016-03-22 09:48:56 +08:00
|
|
|
|
|
|
|
/// \brief Helper to emit outlined function for 'target' directive.
|
|
|
|
/// \param D Directive to emit.
|
|
|
|
/// \param ParentName Name of the function that encloses the target region.
|
|
|
|
/// \param OutlinedFn Outlined function value to be defined by this call.
|
|
|
|
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
|
|
|
|
/// \param IsOffloadEntry True if the outlined function is an offload entry.
|
|
|
|
/// \param CodeGen Lambda codegen specific to an accelerator device.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// An outlined function may not be an entry if, e.g. the if clause always
|
2016-03-22 09:48:56 +08:00
|
|
|
/// evaluates to false.
|
|
|
|
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
|
|
|
|
StringRef ParentName,
|
|
|
|
llvm::Function *&OutlinedFn,
|
|
|
|
llvm::Constant *&OutlinedFnID,
|
|
|
|
bool IsOffloadEntry,
|
|
|
|
const RegionCodeGenTy &CodeGen);
|
|
|
|
|
2017-01-10 23:42:51 +08:00
|
|
|
/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
|
|
|
|
/// function. Here is the logic:
|
|
|
|
/// if (Cond) {
|
|
|
|
/// ThenGen();
|
|
|
|
/// } else {
|
|
|
|
/// ElseGen();
|
|
|
|
/// }
|
|
|
|
void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
|
|
|
|
const RegionCodeGenTy &ThenGen,
|
|
|
|
const RegionCodeGenTy &ElseGen);
|
|
|
|
|
|
|
|
/// \brief Emits object of ident_t type with info for source location.
|
|
|
|
/// \param Flags Flags for OpenMP location.
|
|
|
|
///
|
|
|
|
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
unsigned Flags = 0);
|
|
|
|
|
|
|
|
/// \brief Returns pointer to ident_t type.
|
|
|
|
llvm::Type *getIdentTyPointerTy();
|
|
|
|
|
|
|
|
/// \brief Gets thread id value for the current thread.
|
|
|
|
///
|
|
|
|
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
|
|
|
|
|
|
|
|
/// \brief Get the function name of an outlined region.
|
|
|
|
// The name can be customized depending on the target.
|
|
|
|
//
|
|
|
|
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
|
|
|
|
|
2016-03-22 09:48:56 +08:00
|
|
|
private:
|
2014-05-06 18:08:46 +08:00
|
|
|
/// \brief Default const ident_t object used for initialization of all other
|
|
|
|
/// ident_t objects.
|
2016-03-04 17:22:22 +08:00
|
|
|
llvm::Constant *DefaultOpenMPPSource = nullptr;
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \brief Map of flags and corresponding default locations.
|
2014-05-07 14:18:01 +08:00
|
|
|
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
|
|
|
|
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
|
2016-02-19 18:38:26 +08:00
|
|
|
Address getOrCreateDefaultLocation(unsigned Flags);
|
|
|
|
|
2016-03-29 13:34:15 +08:00
|
|
|
llvm::StructType *IdentTy = nullptr;
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \brief Map for SourceLocation and OpenMP runtime library debug locations.
|
2014-05-30 13:48:40 +08:00
|
|
|
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
|
|
|
|
OpenMPDebugLocMapTy OpenMPDebugLocMap;
|
2014-05-06 18:08:46 +08:00
|
|
|
/// \brief The type for a microtask which gets passed to __kmpc_fork_call().
|
|
|
|
/// Original representation is:
|
|
|
|
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
|
2016-03-29 13:34:15 +08:00
|
|
|
llvm::FunctionType *Kmpc_MicroTy = nullptr;
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \brief Stores debug location and ThreadID for the function.
|
|
|
|
struct DebugLocThreadIdTy {
|
|
|
|
llvm::Value *DebugLoc;
|
|
|
|
llvm::Value *ThreadID;
|
|
|
|
};
|
|
|
|
/// \brief Map of local debug location, ThreadId and functions.
|
|
|
|
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
|
|
|
|
OpenMPLocThreadIDMapTy;
|
|
|
|
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
|
2016-03-04 17:22:22 +08:00
|
|
|
/// Map of UDRs and corresponding combiner/initializer.
|
|
|
|
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
|
|
|
|
std::pair<llvm::Function *, llvm::Function *>>
|
|
|
|
UDRMapTy;
|
|
|
|
UDRMapTy UDRMap;
|
|
|
|
/// Map of functions and locally defined UDRs.
|
|
|
|
typedef llvm::DenseMap<llvm::Function *,
|
|
|
|
SmallVector<const OMPDeclareReductionDecl *, 4>>
|
|
|
|
FunctionUDRMapTy;
|
|
|
|
FunctionUDRMapTy FunctionUDRMap;
|
|
|
|
IdentifierInfo *In = nullptr;
|
|
|
|
IdentifierInfo *Out = nullptr;
|
|
|
|
IdentifierInfo *Priv = nullptr;
|
|
|
|
IdentifierInfo *Orig = nullptr;
|
2014-09-22 18:01:53 +08:00
|
|
|
/// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
|
|
|
|
/// kmp_critical_name[8];
|
|
|
|
llvm::ArrayType *KmpCriticalNameTy;
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \brief An ordered map of auto-generated variables to their unique names.
|
|
|
|
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
|
|
|
|
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
|
|
|
|
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
|
|
|
|
/// variables.
|
|
|
|
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
|
|
|
|
InternalVars;
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// \brief Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
|
2016-03-04 17:22:22 +08:00
|
|
|
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
QualType KmpRoutineEntryPtrQTy;
|
2015-05-18 15:54:53 +08:00
|
|
|
/// \brief Type typedef struct kmp_task {
|
|
|
|
/// void * shareds; /**< pointer to block of pointers to
|
|
|
|
/// shared vars */
|
|
|
|
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
|
|
|
|
/// executing task */
|
|
|
|
/// kmp_int32 part_id; /**< part id for the task */
|
|
|
|
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
|
|
|
|
/// deconstructors of firstprivate C++ objects */
|
|
|
|
/// } kmp_task_t;
|
|
|
|
QualType KmpTaskTQTy;
|
[OPENMP] Codegen for 'depend' clause (OpenMP 4.0).
If task directive has associated 'depend' clause then function kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called instead of __kmpc_omp_task().
If this directive has associated 'if' clause then also before a call of kmpc_omp_task_begin_if0() a function void __kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called.
Array sections are not supported yet.
llvm-svn: 240532
2015-06-24 19:01:36 +08:00
|
|
|
/// \brief Type typedef struct kmp_depend_info {
|
|
|
|
/// kmp_intptr_t base_addr;
|
|
|
|
/// size_t len;
|
|
|
|
/// struct {
|
|
|
|
/// bool in:1;
|
|
|
|
/// bool out:1;
|
|
|
|
/// } flags;
|
|
|
|
/// } kmp_depend_info_t;
|
|
|
|
QualType KmpDependInfoTy;
|
2016-05-25 20:36:08 +08:00
|
|
|
/// struct kmp_dim { // loop bounds info casted to kmp_int64
|
|
|
|
/// kmp_int64 lo; // lower
|
|
|
|
/// kmp_int64 up; // upper
|
|
|
|
/// kmp_int64 st; // stride
|
|
|
|
/// };
|
|
|
|
QualType KmpDimTy;
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \brief Type struct __tgt_offload_entry{
|
|
|
|
/// void *addr; // Pointer to the offload entry info.
|
|
|
|
/// // (function or global)
|
|
|
|
/// char *name; // Name of the function or global.
|
|
|
|
/// size_t size; // Size of the entry info (0 if it a function).
|
|
|
|
/// };
|
|
|
|
QualType TgtOffloadEntryQTy;
|
|
|
|
/// struct __tgt_device_image{
|
|
|
|
/// void *ImageStart; // Pointer to the target code start.
|
|
|
|
/// void *ImageEnd; // Pointer to the target code end.
|
|
|
|
/// // We also add the host entries to the device image, as it may be useful
|
|
|
|
/// // for the target runtime to have access to that information.
|
|
|
|
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
|
|
|
|
/// // the entries.
|
|
|
|
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
|
|
|
/// // entries (non inclusive).
|
|
|
|
/// };
|
|
|
|
QualType TgtDeviceImageQTy;
|
|
|
|
/// struct __tgt_bin_desc{
|
|
|
|
/// int32_t NumDevices; // Number of devices supported.
|
|
|
|
/// __tgt_device_image *DeviceImages; // Arrays of device images
|
|
|
|
/// // (one per device).
|
|
|
|
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
|
|
|
|
/// // entries.
|
|
|
|
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
|
|
|
/// // entries (non inclusive).
|
|
|
|
/// };
|
|
|
|
QualType TgtBinaryDescriptorQTy;
|
|
|
|
/// \brief Entity that registers the offloading constants that were emitted so
|
|
|
|
/// far.
|
|
|
|
class OffloadEntriesInfoManagerTy {
|
|
|
|
CodeGenModule &CGM;
|
|
|
|
|
|
|
|
/// \brief Number of entries registered so far.
|
|
|
|
unsigned OffloadingEntriesNum;
|
|
|
|
|
|
|
|
public:
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
/// Base class of the entries info.
|
2016-01-06 21:42:12 +08:00
|
|
|
class OffloadEntryInfo {
|
|
|
|
public:
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
/// Kind of a given entry. Currently, only target regions are
|
2016-01-06 21:42:12 +08:00
|
|
|
/// supported.
|
2016-01-12 04:55:16 +08:00
|
|
|
enum OffloadingEntryInfoKinds : unsigned {
|
2016-01-06 21:42:12 +08:00
|
|
|
// Entry is a target region.
|
|
|
|
OFFLOAD_ENTRY_INFO_TARGET_REGION = 0,
|
|
|
|
// Invalid entry info.
|
|
|
|
OFFLOAD_ENTRY_INFO_INVALID = ~0u
|
|
|
|
};
|
|
|
|
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
OffloadEntryInfo()
|
|
|
|
: Flags(0), Order(~0u), Kind(OFFLOAD_ENTRY_INFO_INVALID) {}
|
|
|
|
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
|
|
|
|
int32_t Flags)
|
|
|
|
: Flags(Flags), Order(Order), Kind(Kind) {}
|
2016-01-06 21:42:12 +08:00
|
|
|
|
|
|
|
bool isValid() const { return Order != ~0u; }
|
|
|
|
unsigned getOrder() const { return Order; }
|
|
|
|
OffloadingEntryInfoKinds getKind() const { return Kind; }
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
int32_t getFlags() const { return Flags; }
|
|
|
|
void setFlags(int32_t NewFlags) { Flags = NewFlags; }
|
2016-01-06 21:42:12 +08:00
|
|
|
static bool classof(const OffloadEntryInfo *Info) { return true; }
|
|
|
|
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
private:
|
|
|
|
/// Flags associated with the device global.
|
|
|
|
int32_t Flags;
|
|
|
|
|
|
|
|
/// Order this entry was emitted.
|
2016-01-06 21:42:12 +08:00
|
|
|
unsigned Order;
|
|
|
|
|
|
|
|
OffloadingEntryInfoKinds Kind;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// \brief Return true if a there are no entries defined.
|
|
|
|
bool empty() const;
|
|
|
|
/// \brief Return number of entries defined so far.
|
|
|
|
unsigned size() const { return OffloadingEntriesNum; }
|
|
|
|
OffloadEntriesInfoManagerTy(CodeGenModule &CGM)
|
|
|
|
: CGM(CGM), OffloadingEntriesNum(0) {}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Target region entries related.
|
|
|
|
///
|
|
|
|
/// \brief Target region entries info.
|
|
|
|
class OffloadEntryInfoTargetRegion : public OffloadEntryInfo {
|
|
|
|
// \brief Address of the entity that has to be mapped for offloading.
|
|
|
|
llvm::Constant *Addr;
|
|
|
|
// \brief Address that can be used as the ID of the entry.
|
|
|
|
llvm::Constant *ID;
|
|
|
|
|
|
|
|
public:
|
|
|
|
OffloadEntryInfoTargetRegion()
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
: OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, ~0u,
|
|
|
|
/*Flags=*/0),
|
2016-01-06 21:42:12 +08:00
|
|
|
Addr(nullptr), ID(nullptr) {}
|
|
|
|
explicit OffloadEntryInfoTargetRegion(unsigned Order,
|
|
|
|
llvm::Constant *Addr,
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
llvm::Constant *ID, int32_t Flags)
|
|
|
|
: OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, Order, Flags),
|
2016-01-06 21:42:12 +08:00
|
|
|
Addr(Addr), ID(ID) {}
|
|
|
|
|
|
|
|
llvm::Constant *getAddress() const { return Addr; }
|
|
|
|
llvm::Constant *getID() const { return ID; }
|
|
|
|
void setAddress(llvm::Constant *V) {
|
|
|
|
assert(!Addr && "Address as been set before!");
|
|
|
|
Addr = V;
|
|
|
|
}
|
|
|
|
void setID(llvm::Constant *V) {
|
|
|
|
assert(!ID && "ID as been set before!");
|
|
|
|
ID = V;
|
|
|
|
}
|
|
|
|
static bool classof(const OffloadEntryInfo *Info) {
|
|
|
|
return Info->getKind() == OFFLOAD_ENTRY_INFO_TARGET_REGION;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
/// \brief Initialize target region entry.
|
|
|
|
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
|
|
|
StringRef ParentName, unsigned LineNum,
|
[OpenMP] Rename the offload entry points.
Summary:
Unlike other outlined regions in OpenMP, offloading entry points have to have be visible (external linkage) for the device side. Using dots in the names of the entries can be therefore problematic for some toolchains, e.g. NVPTX.
Also the patch drops the column information in the unique name of the entry points. The parsing of directives ignore unknown tokens, preventing several target regions to be implemented in the same line. Therefore, the line information is sufficient for the name to be unique. Also, the preprocessor printer does not preserve the column information, causing offloading-entry detection issues if the host uses an integrated preprocessor and the target doesn't (or vice versa).
Reviewers: hfinkel, arpith-jacob, carlo.bertolli, kkwli0, ABataev
Subscribers: cfe-commits, fraggamuffin, caomhin
Differential Revision: http://reviews.llvm.org/D17179
llvm-svn: 260837
2016-02-14 07:35:10 +08:00
|
|
|
unsigned Order);
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \brief Register target region entry.
|
|
|
|
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
|
|
|
StringRef ParentName, unsigned LineNum,
|
[OpenMP] Add fields for flags in the offload entry descriptor.
Summary:
This patch adds two fields to the offload entry descriptor. One field is meant to signal Ctors/Dtors and `link` global variables, and the other is reserved for runtime library use.
Currently, these fields are only filled with zeros in the current code generation, but that will change when `declare target` is added.
The reason, we are adding these fields now is to make the code generation consistent with the runtime library proposal under review in https://reviews.llvm.org/D14031.
Reviewers: ABataev, hfinkel, carlo.bertolli, kkwli0, arpith-jacob, Hahnfeld
Subscribers: cfe-commits, caomhin, jholewinski
Differential Revision: https://reviews.llvm.org/D28298
llvm-svn: 291124
2017-01-06 00:02:49 +08:00
|
|
|
llvm::Constant *Addr, llvm::Constant *ID,
|
|
|
|
int32_t Flags);
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \brief Return true if a target region entry with the provided
|
|
|
|
/// information exists.
|
|
|
|
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
[OpenMP] Rename the offload entry points.
Summary:
Unlike other outlined regions in OpenMP, offloading entry points have to have be visible (external linkage) for the device side. Using dots in the names of the entries can be therefore problematic for some toolchains, e.g. NVPTX.
Also the patch drops the column information in the unique name of the entry points. The parsing of directives ignore unknown tokens, preventing several target regions to be implemented in the same line. Therefore, the line information is sufficient for the name to be unique. Also, the preprocessor printer does not preserve the column information, causing offloading-entry detection issues if the host uses an integrated preprocessor and the target doesn't (or vice versa).
Reviewers: hfinkel, arpith-jacob, carlo.bertolli, kkwli0, ABataev
Subscribers: cfe-commits, fraggamuffin, caomhin
Differential Revision: http://reviews.llvm.org/D17179
llvm-svn: 260837
2016-02-14 07:35:10 +08:00
|
|
|
StringRef ParentName, unsigned LineNum) const;
|
2016-01-06 21:42:12 +08:00
|
|
|
/// brief Applies action \a Action on all registered entries.
|
|
|
|
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
|
[OpenMP] Rename the offload entry points.
Summary:
Unlike other outlined regions in OpenMP, offloading entry points have to have be visible (external linkage) for the device side. Using dots in the names of the entries can be therefore problematic for some toolchains, e.g. NVPTX.
Also the patch drops the column information in the unique name of the entry points. The parsing of directives ignore unknown tokens, preventing several target regions to be implemented in the same line. Therefore, the line information is sufficient for the name to be unique. Also, the preprocessor printer does not preserve the column information, causing offloading-entry detection issues if the host uses an integrated preprocessor and the target doesn't (or vice versa).
Reviewers: hfinkel, arpith-jacob, carlo.bertolli, kkwli0, ABataev
Subscribers: cfe-commits, fraggamuffin, caomhin
Differential Revision: http://reviews.llvm.org/D17179
llvm-svn: 260837
2016-02-14 07:35:10 +08:00
|
|
|
OffloadEntryInfoTargetRegion &)>
|
2016-01-06 21:42:12 +08:00
|
|
|
OffloadTargetRegionEntryInfoActTy;
|
|
|
|
void actOnTargetRegionEntriesInfo(
|
|
|
|
const OffloadTargetRegionEntryInfoActTy &Action);
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Storage for target region entries kind. The storage is to be indexed by
|
[OpenMP] Rename the offload entry points.
Summary:
Unlike other outlined regions in OpenMP, offloading entry points have to have be visible (external linkage) for the device side. Using dots in the names of the entries can be therefore problematic for some toolchains, e.g. NVPTX.
Also the patch drops the column information in the unique name of the entry points. The parsing of directives ignore unknown tokens, preventing several target regions to be implemented in the same line. Therefore, the line information is sufficient for the name to be unique. Also, the preprocessor printer does not preserve the column information, causing offloading-entry detection issues if the host uses an integrated preprocessor and the target doesn't (or vice versa).
Reviewers: hfinkel, arpith-jacob, carlo.bertolli, kkwli0, ABataev
Subscribers: cfe-commits, fraggamuffin, caomhin
Differential Revision: http://reviews.llvm.org/D17179
llvm-svn: 260837
2016-02-14 07:35:10 +08:00
|
|
|
// file ID, device ID, parent function name and line number.
|
2016-01-06 21:42:12 +08:00
|
|
|
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
|
|
|
|
OffloadEntriesTargetRegionPerLine;
|
|
|
|
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
|
|
|
|
OffloadEntriesTargetRegionPerParentName;
|
|
|
|
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
|
|
|
|
OffloadEntriesTargetRegionPerFile;
|
|
|
|
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
|
|
|
|
OffloadEntriesTargetRegionPerDevice;
|
|
|
|
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
|
|
|
|
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
|
|
|
|
};
|
|
|
|
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
|
|
|
|
|
|
|
|
/// \brief Creates and registers offloading binary descriptor for the current
|
|
|
|
/// compilation unit. The function that does the registration is returned.
|
|
|
|
llvm::Function *createOffloadingBinaryDescriptorRegistration();
|
|
|
|
|
|
|
|
/// \brief Creates all the offload entries in the current compilation unit
|
|
|
|
/// along with the associated metadata.
|
|
|
|
void createOffloadEntriesAndInfoMetadata();
|
|
|
|
|
|
|
|
/// \brief Loads all the offload entries information from the host IR
|
|
|
|
/// metadata.
|
|
|
|
void loadOffloadInfoMetadata();
|
[OPENMP] Codegen for 'depend' clause (OpenMP 4.0).
If task directive has associated 'depend' clause then function kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called instead of __kmpc_omp_task().
If this directive has associated 'if' clause then also before a call of kmpc_omp_task_begin_if0() a function void __kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called.
Array sections are not supported yet.
llvm-svn: 240532
2015-06-24 19:01:36 +08:00
|
|
|
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \brief Returns __tgt_offload_entry type.
|
|
|
|
QualType getTgtOffloadEntryQTy();
|
|
|
|
|
|
|
|
/// \brief Returns __tgt_device_image type.
|
|
|
|
QualType getTgtDeviceImageQTy();
|
|
|
|
|
|
|
|
/// \brief Returns __tgt_bin_desc type.
|
|
|
|
QualType getTgtBinaryDescriptorQTy();
|
|
|
|
|
|
|
|
/// \brief Start scanning from statement \a S and and emit all target regions
|
|
|
|
/// found along the way.
|
|
|
|
/// \param S Starting statement.
|
|
|
|
/// \param ParentName Name of the function declaration that is being scanned.
|
|
|
|
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
|
|
|
|
/// \brief Build type kmp_routine_entry_t (if not built yet).
|
|
|
|
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
|
2014-05-06 18:08:46 +08:00
|
|
|
|
[OPENMP] Codegen for 'if' clause in 'parallel' directive.
Adds codegen for 'if' clause. Currently only for 'if' clause used with the 'parallel' directive.
If condition evaluates to true, the code executes parallel version of the code by calling __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/), where loc - debug location, 1 - number of additional parameters after "microtask" argument, microtask - is outlined finction for the code associated with the 'parallel' directive, captured_struct - list of variables captured in this outlined function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
global_thread_id.addr = alloca i32
store i32 global_thread_id, global_thread_id.addr
zero.addr = alloca i32
store i32 0, zero.addr
kmpc_serialized_parallel(loc, global_thread_id);
microtask(global_thread_id.addr, zero.addr, captured_struct/*context*/);
kmpc_end_serialized_parallel(loc, global_thread_id);
Where loc - debug location, global_thread_id - global thread id, returned by __kmpc_global_thread_num() call or passed as a first parameter in microtask() call, global_thread_id.addr - address of the variable, where stored global_thread_id value, zero.addr - implicit bound thread id (should be set to 0 for serial call), microtask() and captured_struct are the same as in parallel call.
Also this patch checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D4716
llvm-svn: 219597
2014-10-13 14:02:40 +08:00
|
|
|
/// \brief Returns pointer to kmpc_micro type.
|
2014-05-06 18:08:46 +08:00
|
|
|
llvm::Type *getKmpc_MicroPointerTy();
|
|
|
|
|
|
|
|
/// \brief Returns specified OpenMP runtime function.
|
|
|
|
/// \param Function OpenMP runtime function.
|
|
|
|
/// \return Specified function.
|
2016-02-19 18:38:26 +08:00
|
|
|
llvm::Constant *createRuntimeFunction(unsigned Function);
|
2014-09-22 18:01:53 +08:00
|
|
|
|
2015-03-13 18:38:23 +08:00
|
|
|
/// \brief Returns __kmpc_for_static_init_* runtime function for the specified
|
|
|
|
/// size \a IVSize and sign \a IVSigned.
|
|
|
|
llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
|
|
|
|
|
2015-03-12 21:37:50 +08:00
|
|
|
/// \brief Returns __kmpc_dispatch_init_* runtime function for the specified
|
|
|
|
/// size \a IVSize and sign \a IVSigned.
|
|
|
|
llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned);
|
|
|
|
|
|
|
|
/// \brief Returns __kmpc_dispatch_next_* runtime function for the specified
|
|
|
|
/// size \a IVSize and sign \a IVSigned.
|
|
|
|
llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned);
|
|
|
|
|
2015-04-22 19:15:40 +08:00
|
|
|
/// \brief Returns __kmpc_dispatch_fini_* runtime function for the specified
|
|
|
|
/// size \a IVSize and sign \a IVSigned.
|
|
|
|
llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
|
|
|
|
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \brief If the specified mangled name is not in the module, create and
|
|
|
|
/// return threadprivate cache object. This object is a pointer's worth of
|
|
|
|
/// storage that's reserved for use by the OpenMP runtime.
|
2014-11-11 15:58:06 +08:00
|
|
|
/// \param VD Threadprivate variable.
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \return Cache variable for the specified threadprivate.
|
|
|
|
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
|
|
|
|
|
[OPENMP] Codegen for 'if' clause in 'parallel' directive.
Adds codegen for 'if' clause. Currently only for 'if' clause used with the 'parallel' directive.
If condition evaluates to true, the code executes parallel version of the code by calling __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/), where loc - debug location, 1 - number of additional parameters after "microtask" argument, microtask - is outlined finction for the code associated with the 'parallel' directive, captured_struct - list of variables captured in this outlined function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
global_thread_id.addr = alloca i32
store i32 global_thread_id, global_thread_id.addr
zero.addr = alloca i32
store i32 0, zero.addr
kmpc_serialized_parallel(loc, global_thread_id);
microtask(global_thread_id.addr, zero.addr, captured_struct/*context*/);
kmpc_end_serialized_parallel(loc, global_thread_id);
Where loc - debug location, global_thread_id - global thread id, returned by __kmpc_global_thread_num() call or passed as a first parameter in microtask() call, global_thread_id.addr - address of the variable, where stored global_thread_id value, zero.addr - implicit bound thread id (should be set to 0 for serial call), microtask() and captured_struct are the same as in parallel call.
Also this patch checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D4716
llvm-svn: 219597
2014-10-13 14:02:40 +08:00
|
|
|
/// \brief Emits address of the word in a memory where current thread id is
|
|
|
|
/// stored.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
|
[OPENMP] Codegen for 'if' clause in 'parallel' directive.
Adds codegen for 'if' clause. Currently only for 'if' clause used with the 'parallel' directive.
If condition evaluates to true, the code executes parallel version of the code by calling __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/), where loc - debug location, 1 - number of additional parameters after "microtask" argument, microtask - is outlined finction for the code associated with the 'parallel' directive, captured_struct - list of variables captured in this outlined function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
global_thread_id.addr = alloca i32
store i32 global_thread_id, global_thread_id.addr
zero.addr = alloca i32
store i32 0, zero.addr
kmpc_serialized_parallel(loc, global_thread_id);
microtask(global_thread_id.addr, zero.addr, captured_struct/*context*/);
kmpc_end_serialized_parallel(loc, global_thread_id);
Where loc - debug location, global_thread_id - global thread id, returned by __kmpc_global_thread_num() call or passed as a first parameter in microtask() call, global_thread_id.addr - address of the variable, where stored global_thread_id value, zero.addr - implicit bound thread id (should be set to 0 for serial call), microtask() and captured_struct are the same as in parallel call.
Also this patch checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D4716
llvm-svn: 219597
2014-10-13 14:02:40 +08:00
|
|
|
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \brief Gets (if variable with the given name already exist) or creates
|
|
|
|
/// internal global variable with the specified Name. The created variable has
|
|
|
|
/// linkage CommonLinkage by default and is initialized by null value.
|
|
|
|
/// \param Ty Type of the global variable. If it is exist already the type
|
|
|
|
/// must be the same.
|
|
|
|
/// \param Name Name of the variable.
|
2015-02-25 16:32:46 +08:00
|
|
|
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
|
2014-11-11 12:05:39 +08:00
|
|
|
const llvm::Twine &Name);
|
|
|
|
|
|
|
|
/// \brief Set of threadprivate variables with the generated initializer.
|
2016-02-02 22:24:21 +08:00
|
|
|
llvm::SmallPtrSet<const VarDecl *, 4> ThreadPrivateWithDefinition;
|
2014-11-11 12:05:39 +08:00
|
|
|
|
|
|
|
/// \brief Emits initialization code for the threadprivate variables.
|
|
|
|
/// \param VDAddr Address of the global variable \a VD.
|
|
|
|
/// \param Ctor Pointer to a global init function for \a VD.
|
|
|
|
/// \param CopyCtor Pointer to a global copy function for \a VD.
|
|
|
|
/// \param Dtor Pointer to a global destructor function for \a VD.
|
|
|
|
/// \param Loc Location of threadprivate declaration.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
|
2015-02-25 16:32:46 +08:00
|
|
|
llvm::Value *Ctor, llvm::Value *CopyCtor,
|
|
|
|
llvm::Value *Dtor, SourceLocation Loc);
|
2014-11-11 12:05:39 +08:00
|
|
|
|
2014-12-01 19:32:38 +08:00
|
|
|
/// \brief Returns corresponding lock object for the specified critical region
|
|
|
|
/// name. If the lock object does not exist it is created, otherwise the
|
|
|
|
/// reference to the existing copy is returned.
|
|
|
|
/// \param CriticalName Name of the critical region.
|
|
|
|
///
|
2015-02-25 16:32:46 +08:00
|
|
|
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
|
2014-12-01 19:32:38 +08:00
|
|
|
|
2016-04-28 17:23:51 +08:00
|
|
|
struct TaskResultTy {
|
|
|
|
llvm::Value *NewTask = nullptr;
|
|
|
|
llvm::Value *TaskEntry = nullptr;
|
|
|
|
llvm::Value *NewTaskNewTaskTTy = nullptr;
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
LValue TDBase;
|
2016-04-28 17:23:51 +08:00
|
|
|
RecordDecl *KmpTaskTQTyRD = nullptr;
|
2016-05-05 16:46:22 +08:00
|
|
|
llvm::Value *TaskDupFn = nullptr;
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
};
|
|
|
|
/// Emit task region for the task directive. The task region is emitted in
|
|
|
|
/// several steps:
|
|
|
|
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
|
|
|
|
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
|
|
|
|
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
|
|
|
|
/// function:
|
|
|
|
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
|
|
|
|
/// TaskFunction(gtid, tt->part_id, tt->shareds);
|
|
|
|
/// return 0;
|
|
|
|
/// }
|
|
|
|
/// 2. Copy a list of shared variables to field shareds of the resulting
|
|
|
|
/// structure kmp_task_t returned by the previous call (if any).
|
|
|
|
/// 3. Copy a pointer to destructions function to field destructions of the
|
|
|
|
/// resulting structure kmp_task_t.
|
|
|
|
/// \param D Current task directive.
|
|
|
|
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
|
|
|
|
/// /*part_id*/, captured_struct */*__context*/);
|
|
|
|
/// \param SharedsTy A type which contains references the shared variables.
|
|
|
|
/// \param Shareds Context with the list of shared variables from the \p
|
|
|
|
/// TaskFunction.
|
2016-04-28 17:23:51 +08:00
|
|
|
/// \param Data Additional data for task generation like tiednsee, final
|
|
|
|
/// state, list of privates etc.
|
|
|
|
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
const OMPExecutableDirective &D,
|
|
|
|
llvm::Value *TaskFunction, QualType SharedsTy,
|
|
|
|
Address Shareds, const OMPTaskDataTy &Data);
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
public:
|
|
|
|
explicit CGOpenMPRuntime(CodeGenModule &CGM);
|
2015-10-20 21:23:58 +08:00
|
|
|
virtual ~CGOpenMPRuntime() {}
|
2015-03-18 12:13:55 +08:00
|
|
|
virtual void clear();
|
2014-10-08 22:01:46 +08:00
|
|
|
|
2016-03-04 17:22:22 +08:00
|
|
|
/// Emit code for the specified user defined reduction construct.
|
|
|
|
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
|
|
|
|
const OMPDeclareReductionDecl *D);
|
2016-03-17 18:19:46 +08:00
|
|
|
/// Get combiner/initializer for the specified user-defined reduction, if any.
|
|
|
|
virtual std::pair<llvm::Function *, llvm::Function *>
|
|
|
|
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
|
2017-01-19 02:18:53 +08:00
|
|
|
|
2015-04-10 12:50:10 +08:00
|
|
|
/// \brief Emits outlined function for the specified OpenMP parallel directive
|
|
|
|
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
|
|
|
|
/// kmp_int32 BoundID, struct context_vars*).
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \param D OpenMP directive.
|
|
|
|
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
|
2015-07-03 17:56:58 +08:00
|
|
|
/// \param InnermostKind Kind of innermost directive (for simple directives it
|
|
|
|
/// is a directive itself, for combined - its innermost directive).
|
2015-04-10 12:50:10 +08:00
|
|
|
/// \param CodeGen Code generation sequence for the \a D directive.
|
2017-01-19 02:18:53 +08:00
|
|
|
virtual llvm::Value *emitParallelOutlinedFunction(
|
|
|
|
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
|
|
|
|
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
|
|
|
|
|
|
|
|
/// \brief Emits outlined function for the specified OpenMP teams directive
|
|
|
|
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
|
|
|
|
/// kmp_int32 BoundID, struct context_vars*).
|
|
|
|
/// \param D OpenMP directive.
|
|
|
|
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
|
|
|
|
/// \param InnermostKind Kind of innermost directive (for simple directives it
|
|
|
|
/// is a directive itself, for combined - its innermost directive).
|
|
|
|
/// \param CodeGen Code generation sequence for the \a D directive.
|
|
|
|
virtual llvm::Value *emitTeamsOutlinedFunction(
|
2015-07-03 17:56:58 +08:00
|
|
|
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
|
|
|
|
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
|
2014-10-10 20:19:54 +08:00
|
|
|
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// \brief Emits outlined function for the OpenMP task directive \a D. This
|
2016-04-20 12:01:36 +08:00
|
|
|
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
|
|
|
|
/// TaskT).
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// \param D OpenMP directive.
|
|
|
|
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
|
2016-04-20 12:01:36 +08:00
|
|
|
/// \param PartIDVar Variable for partition id in the current OpenMP untied
|
|
|
|
/// task region.
|
|
|
|
/// \param TaskTVar Variable for task_t argument.
|
2015-07-03 17:56:58 +08:00
|
|
|
/// \param InnermostKind Kind of innermost directive (for simple directives it
|
|
|
|
/// is a directive itself, for combined - its innermost directive).
|
2015-04-10 12:50:10 +08:00
|
|
|
/// \param CodeGen Code generation sequence for the \a D directive.
|
2016-04-20 12:01:36 +08:00
|
|
|
/// \param Tied true if task is generated for tied task, false otherwise.
|
|
|
|
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
|
|
|
|
/// tasks.
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
///
|
2015-07-03 17:56:58 +08:00
|
|
|
virtual llvm::Value *emitTaskOutlinedFunction(
|
|
|
|
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
|
2016-04-20 12:01:36 +08:00
|
|
|
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
|
|
|
|
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
|
|
|
|
bool Tied, unsigned &NumberOfParts);
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
/// \brief Cleans up references to the objects in finished function.
|
|
|
|
///
|
2015-02-25 16:32:46 +08:00
|
|
|
void functionFinished(CodeGenFunction &CGF);
|
2014-10-08 22:01:46 +08:00
|
|
|
|
[OPENMP] Codegen for 'if' clause in 'task' directive.
If condition evaluates to true, the code executes task by calling @__kmpc_omp_task() runtime function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
call void @__kmpc_omp_task_begin_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
proxy_task_entry(<gtid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
call void @__kmpc_omp_task_complete_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
Also it checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D9143
llvm-svn: 235507
2015-04-22 21:57:31 +08:00
|
|
|
/// \brief Emits code for parallel or serial call of the \a OutlinedFn with
|
|
|
|
/// variables captured in a record which address is stored in \a
|
|
|
|
/// CapturedStruct.
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
|
2015-09-11 16:13:32 +08:00
|
|
|
/// \param CapturedVars A pointer to the record with the references to
|
2014-10-08 22:01:46 +08:00
|
|
|
/// variables used in \a OutlinedFn function.
|
[OPENMP] Codegen for 'if' clause in 'task' directive.
If condition evaluates to true, the code executes task by calling @__kmpc_omp_task() runtime function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
call void @__kmpc_omp_task_begin_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
proxy_task_entry(<gtid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
call void @__kmpc_omp_task_complete_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
Also it checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D9143
llvm-svn: 235507
2015-04-22 21:57:31 +08:00
|
|
|
/// \param IfCond Condition in the associated 'if' clause, if it was
|
|
|
|
/// specified, nullptr otherwise.
|
2014-10-08 22:01:46 +08:00
|
|
|
///
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
llvm::Value *OutlinedFn,
|
2015-09-10 16:12:02 +08:00
|
|
|
ArrayRef<llvm::Value *> CapturedVars,
|
|
|
|
const Expr *IfCond);
|
[OPENMP] Codegen for 'if' clause in 'parallel' directive.
Adds codegen for 'if' clause. Currently only for 'if' clause used with the 'parallel' directive.
If condition evaluates to true, the code executes parallel version of the code by calling __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/), where loc - debug location, 1 - number of additional parameters after "microtask" argument, microtask - is outlined finction for the code associated with the 'parallel' directive, captured_struct - list of variables captured in this outlined function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
global_thread_id.addr = alloca i32
store i32 global_thread_id, global_thread_id.addr
zero.addr = alloca i32
store i32 0, zero.addr
kmpc_serialized_parallel(loc, global_thread_id);
microtask(global_thread_id.addr, zero.addr, captured_struct/*context*/);
kmpc_end_serialized_parallel(loc, global_thread_id);
Where loc - debug location, global_thread_id - global thread id, returned by __kmpc_global_thread_num() call or passed as a first parameter in microtask() call, global_thread_id.addr - address of the variable, where stored global_thread_id value, zero.addr - implicit bound thread id (should be set to 0 for serial call), microtask() and captured_struct are the same as in parallel call.
Also this patch checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D4716
llvm-svn: 219597
2014-10-13 14:02:40 +08:00
|
|
|
|
2014-12-01 19:32:38 +08:00
|
|
|
/// \brief Emits a critical region.
|
2014-10-10 20:19:54 +08:00
|
|
|
/// \param CriticalName Name of the critical region.
|
2014-12-01 19:32:38 +08:00
|
|
|
/// \param CriticalOpGen Generator for the statement associated with the given
|
|
|
|
/// critical region.
|
2015-12-15 18:55:09 +08:00
|
|
|
/// \param Hint Value of the 'hint' clause (optional).
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
|
2015-04-10 12:50:10 +08:00
|
|
|
const RegionCodeGenTy &CriticalOpGen,
|
2015-12-15 18:55:09 +08:00
|
|
|
SourceLocation Loc,
|
|
|
|
const Expr *Hint = nullptr);
|
2014-10-08 22:01:46 +08:00
|
|
|
|
2014-12-04 15:23:53 +08:00
|
|
|
/// \brief Emits a master region.
|
|
|
|
/// \param MasterOpGen Generator for the statement associated with the given
|
|
|
|
/// master region.
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitMasterRegion(CodeGenFunction &CGF,
|
2015-04-10 12:50:10 +08:00
|
|
|
const RegionCodeGenTy &MasterOpGen,
|
2015-02-25 16:32:46 +08:00
|
|
|
SourceLocation Loc);
|
2014-12-04 15:23:53 +08:00
|
|
|
|
2015-02-05 13:57:51 +08:00
|
|
|
/// \brief Emits code for a taskyield directive.
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
|
2015-02-05 13:57:51 +08:00
|
|
|
|
2015-06-18 20:14:09 +08:00
|
|
|
/// \brief Emit a taskgroup region.
|
|
|
|
/// \param TaskgroupOpGen Generator for the statement associated with the
|
|
|
|
/// given taskgroup region.
|
|
|
|
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
|
|
|
|
const RegionCodeGenTy &TaskgroupOpGen,
|
|
|
|
SourceLocation Loc);
|
|
|
|
|
2015-02-05 14:35:41 +08:00
|
|
|
/// \brief Emits a single region.
|
|
|
|
/// \param SingleOpGen Generator for the statement associated with the given
|
|
|
|
/// single region.
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitSingleRegion(CodeGenFunction &CGF,
|
2015-04-10 12:50:10 +08:00
|
|
|
const RegionCodeGenTy &SingleOpGen,
|
2015-03-23 14:18:07 +08:00
|
|
|
SourceLocation Loc,
|
|
|
|
ArrayRef<const Expr *> CopyprivateVars,
|
2015-04-14 13:11:24 +08:00
|
|
|
ArrayRef<const Expr *> DestExprs,
|
2015-03-23 14:18:07 +08:00
|
|
|
ArrayRef<const Expr *> SrcExprs,
|
|
|
|
ArrayRef<const Expr *> AssignmentOps);
|
2015-02-05 14:35:41 +08:00
|
|
|
|
2015-04-22 19:15:40 +08:00
|
|
|
/// \brief Emit an ordered region.
|
|
|
|
/// \param OrderedOpGen Generator for the statement associated with the given
|
2015-06-18 20:14:09 +08:00
|
|
|
/// ordered region.
|
2015-04-22 19:15:40 +08:00
|
|
|
virtual void emitOrderedRegion(CodeGenFunction &CGF,
|
|
|
|
const RegionCodeGenTy &OrderedOpGen,
|
2015-09-29 11:48:57 +08:00
|
|
|
SourceLocation Loc, bool IsThreads);
|
2015-04-22 19:15:40 +08:00
|
|
|
|
2015-03-30 12:30:22 +08:00
|
|
|
/// \brief Emit an implicit/explicit barrier for OpenMP threads.
|
|
|
|
/// \param Kind Directive for which this implicit barrier call must be
|
|
|
|
/// generated. Must be OMPD_barrier for explicit barrier generation.
|
2015-09-15 20:52:43 +08:00
|
|
|
/// \param EmitChecks true if need to emit checks for cancellation barriers.
|
|
|
|
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
|
|
|
|
/// runtime class decides which one to emit (simple or with cancellation
|
|
|
|
/// checks).
|
2014-10-08 22:01:46 +08:00
|
|
|
///
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
|
2015-07-03 17:56:58 +08:00
|
|
|
OpenMPDirectiveKind Kind,
|
2015-09-15 20:52:43 +08:00
|
|
|
bool EmitChecks = true,
|
|
|
|
bool ForceSimpleCall = false);
|
2014-10-13 16:23:51 +08:00
|
|
|
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \brief Check if the specified \a ScheduleKind is static non-chunked.
|
|
|
|
/// This kind of worksharing directive is emitted without outer loop.
|
|
|
|
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
|
|
|
|
/// \param Chunked True if chunk is specified in the clause.
|
|
|
|
///
|
|
|
|
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
|
|
|
|
bool Chunked) const;
|
|
|
|
|
2016-03-08 00:04:49 +08:00
|
|
|
/// \brief Check if the specified \a ScheduleKind is static non-chunked.
|
|
|
|
/// This kind of distribute directive is emitted without outer loop.
|
|
|
|
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
|
|
|
|
/// \param Chunked True if chunk is specified in the clause.
|
|
|
|
///
|
|
|
|
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
|
|
|
|
bool Chunked) const;
|
|
|
|
|
2015-01-22 16:49:35 +08:00
|
|
|
/// \brief Check if the specified \a ScheduleKind is dynamic.
|
|
|
|
/// This kind of worksharing directive is emitted without outer loop.
|
|
|
|
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
|
|
|
|
///
|
|
|
|
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
|
|
|
|
|
2017-04-26 01:52:12 +08:00
|
|
|
/// struct with the values to be passed to the dispatch runtime function
|
|
|
|
struct DispatchRTInput {
|
|
|
|
/// Loop lower bound
|
|
|
|
llvm::Value *LB = nullptr;
|
|
|
|
/// Loop upper bound
|
|
|
|
llvm::Value *UB = nullptr;
|
|
|
|
/// Chunk size specified using 'schedule' clause (nullptr if chunk
|
|
|
|
/// was not specified)
|
|
|
|
llvm::Value *Chunk = nullptr;
|
|
|
|
DispatchRTInput() = default;
|
|
|
|
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
|
|
|
|
: LB(LB), UB(UB), Chunk(Chunk) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Call the appropriate runtime routine to initialize it before start
|
|
|
|
/// of loop.
|
|
|
|
|
|
|
|
/// This is used for non static scheduled types and when the ordered
|
|
|
|
/// clause is present on the loop construct.
|
|
|
|
/// Depending on the loop schedule, it is necessary to call some runtime
|
|
|
|
/// routine before start of the OpenMP loop to get the loop upper / lower
|
|
|
|
/// bounds \a LB and \a UB and stride \a ST.
|
|
|
|
///
|
|
|
|
/// \param CGF Reference to current CodeGenFunction.
|
|
|
|
/// \param Loc Clang source location.
|
|
|
|
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
|
|
|
|
/// \param IVSize Size of the iteration variable in bits.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param IVSigned Sign of the iteration variable.
|
2017-04-26 01:52:12 +08:00
|
|
|
/// \param Ordered true if loop is ordered, false otherwise.
|
|
|
|
/// \param DispatchValues struct containing llvm values for lower bound, upper
|
|
|
|
/// bound, and chunk expression.
|
|
|
|
/// For the default (nullptr) value, the chunk 1 will be used.
|
|
|
|
///
|
2015-09-08 17:42:41 +08:00
|
|
|
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
|
2016-05-10 17:57:36 +08:00
|
|
|
const OpenMPScheduleTy &ScheduleKind,
|
|
|
|
unsigned IVSize, bool IVSigned, bool Ordered,
|
2017-04-26 01:52:12 +08:00
|
|
|
const DispatchRTInput &DispatchValues);
|
2015-09-08 17:42:41 +08:00
|
|
|
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \brief Call the appropriate runtime routine to initialize it before start
|
|
|
|
/// of loop.
|
|
|
|
///
|
2017-04-26 01:52:12 +08:00
|
|
|
/// This is used only in case of static schedule, when the user did not
|
|
|
|
/// specify a ordered clause on the loop construct.
|
|
|
|
/// Depending on the loop schedule, it is necessary to call some runtime
|
2014-12-15 15:07:06 +08:00
|
|
|
/// routine before start of the OpenMP loop to get the loop upper / lower
|
|
|
|
/// bounds \a LB and \a UB and stride \a ST.
|
|
|
|
///
|
|
|
|
/// \param CGF Reference to current CodeGenFunction.
|
|
|
|
/// \param Loc Clang source location.
|
2016-05-10 17:57:36 +08:00
|
|
|
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \param IVSize Size of the iteration variable in bits.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param IVSigned Sign of the iteration variable.
|
2015-05-20 21:12:48 +08:00
|
|
|
/// \param Ordered true if loop is ordered, false otherwise.
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \param IL Address of the output variable in which the flag of the
|
|
|
|
/// last iteration is returned.
|
|
|
|
/// \param LB Address of the output variable in which the lower iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param UB Address of the output variable in which the upper iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param ST Address of the output variable in which the stride value is
|
2017-07-14 01:34:44 +08:00
|
|
|
/// returned necessary to generated the static_chunked scheduled loop.
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \param Chunk Value of the chunk for the static_chunked scheduled loop.
|
|
|
|
/// For the default (nullptr) value, the chunk 1 will be used.
|
|
|
|
///
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
|
2016-05-10 17:57:36 +08:00
|
|
|
const OpenMPScheduleTy &ScheduleKind,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
unsigned IVSize, bool IVSigned, bool Ordered,
|
2016-05-10 17:57:36 +08:00
|
|
|
Address IL, Address LB, Address UB, Address ST,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Chunk = nullptr);
|
2014-12-15 15:07:06 +08:00
|
|
|
|
2016-03-08 00:04:49 +08:00
|
|
|
///
|
|
|
|
/// \param CGF Reference to current CodeGenFunction.
|
|
|
|
/// \param Loc Clang source location.
|
|
|
|
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
|
|
|
|
/// \param IVSize Size of the iteration variable in bits.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param IVSigned Sign of the iteration variable.
|
2016-03-08 00:04:49 +08:00
|
|
|
/// \param Ordered true if loop is ordered, false otherwise.
|
|
|
|
/// \param IL Address of the output variable in which the flag of the
|
|
|
|
/// last iteration is returned.
|
|
|
|
/// \param LB Address of the output variable in which the lower iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param UB Address of the output variable in which the upper iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param ST Address of the output variable in which the stride value is
|
2017-07-14 01:34:44 +08:00
|
|
|
/// returned necessary to generated the static_chunked scheduled loop.
|
2016-03-08 00:04:49 +08:00
|
|
|
/// \param Chunk Value of the chunk for the static_chunked scheduled loop.
|
|
|
|
/// For the default (nullptr) value, the chunk 1 will be used.
|
|
|
|
///
|
|
|
|
virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
OpenMPDistScheduleClauseKind SchedKind,
|
|
|
|
unsigned IVSize, bool IVSigned,
|
|
|
|
bool Ordered, Address IL, Address LB,
|
|
|
|
Address UB, Address ST,
|
|
|
|
llvm::Value *Chunk = nullptr);
|
|
|
|
|
2015-04-22 19:15:40 +08:00
|
|
|
/// \brief Call the appropriate runtime routine to notify that we finished
|
|
|
|
/// iteration of the ordered loop with the dynamic scheduling.
|
|
|
|
///
|
|
|
|
/// \param CGF Reference to current CodeGenFunction.
|
|
|
|
/// \param Loc Clang source location.
|
|
|
|
/// \param IVSize Size of the iteration variable in bits.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param IVSigned Sign of the iteration variable.
|
2015-04-22 19:15:40 +08:00
|
|
|
///
|
2015-05-20 21:12:48 +08:00
|
|
|
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
|
|
|
|
SourceLocation Loc, unsigned IVSize,
|
|
|
|
bool IVSigned);
|
2015-04-22 19:15:40 +08:00
|
|
|
|
2014-12-15 15:07:06 +08:00
|
|
|
/// \brief Call the appropriate runtime routine to notify that we finished
|
|
|
|
/// all the work with current loop.
|
|
|
|
///
|
|
|
|
/// \param CGF Reference to current CodeGenFunction.
|
|
|
|
/// \param Loc Clang source location.
|
|
|
|
///
|
2015-04-22 19:15:40 +08:00
|
|
|
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc);
|
2014-12-15 15:07:06 +08:00
|
|
|
|
2015-03-12 21:37:50 +08:00
|
|
|
/// Call __kmpc_dispatch_next(
|
|
|
|
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
|
|
|
|
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
|
|
|
|
/// kmp_int[32|64] *p_stride);
|
|
|
|
/// \param IVSize Size of the iteration variable in bits.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// \param IVSigned Sign of the iteration variable.
|
2015-03-12 21:37:50 +08:00
|
|
|
/// \param IL Address of the output variable in which the flag of the
|
|
|
|
/// last iteration is returned.
|
|
|
|
/// \param LB Address of the output variable in which the lower iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param UB Address of the output variable in which the upper iteration
|
|
|
|
/// number is returned.
|
|
|
|
/// \param ST Address of the output variable in which the stride value is
|
|
|
|
/// returned.
|
|
|
|
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
unsigned IVSize, bool IVSigned,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address IL, Address LB,
|
|
|
|
Address UB, Address ST);
|
2015-03-12 21:37:50 +08:00
|
|
|
|
2014-10-13 16:23:51 +08:00
|
|
|
/// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
|
|
|
|
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
|
|
|
|
/// clause.
|
|
|
|
/// \param NumThreads An integer value of threads.
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *NumThreads,
|
|
|
|
SourceLocation Loc);
|
2014-11-11 12:05:39 +08:00
|
|
|
|
2015-06-18 21:40:03 +08:00
|
|
|
/// \brief Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
|
|
|
|
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
|
|
|
|
virtual void emitProcBindClause(CodeGenFunction &CGF,
|
|
|
|
OpenMPProcBindClauseKind ProcBind,
|
|
|
|
SourceLocation Loc);
|
|
|
|
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \brief Returns address of the threadprivate variable for the current
|
|
|
|
/// thread.
|
2014-11-11 15:58:06 +08:00
|
|
|
/// \param VD Threadprivate variable.
|
2014-11-11 12:05:39 +08:00
|
|
|
/// \param VDAddr Address of the global variable \a VD.
|
|
|
|
/// \param Loc Location of the reference to threadprivate var.
|
|
|
|
/// \return Address of the threadprivate variable for the current thread.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
|
|
|
|
const VarDecl *VD,
|
|
|
|
Address VDAddr,
|
|
|
|
SourceLocation Loc);
|
2014-11-11 12:05:39 +08:00
|
|
|
|
|
|
|
/// \brief Emit a code for initialization of threadprivate variable. It emits
|
|
|
|
/// a call to runtime library which adds initial value to the newly created
|
|
|
|
/// threadprivate variable (if it is not constant) and registers destructor
|
|
|
|
/// for the variable (if any).
|
|
|
|
/// \param VD Threadprivate variable.
|
|
|
|
/// \param VDAddr Address of the global variable \a VD.
|
|
|
|
/// \param Loc Location of threadprivate declaration.
|
|
|
|
/// \param PerformInit true if initialization expression is not constant.
|
|
|
|
virtual llvm::Function *
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
|
2015-02-25 16:32:46 +08:00
|
|
|
SourceLocation Loc, bool PerformInit,
|
|
|
|
CodeGenFunction *CGF = nullptr);
|
2014-11-20 12:34:54 +08:00
|
|
|
|
2017-07-17 21:30:36 +08:00
|
|
|
/// Creates artificial threadprivate variable with name \p Name and type \p
|
|
|
|
/// VarType.
|
|
|
|
/// \param VarType Type of the artificial threadprivate variable.
|
|
|
|
/// \param Name Name of the artificial threadprivate variable.
|
|
|
|
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
|
|
|
|
QualType VarType,
|
|
|
|
StringRef Name);
|
|
|
|
|
2014-11-20 12:34:54 +08:00
|
|
|
/// \brief Emit flush of the variables specified in 'omp flush' directive.
|
|
|
|
/// \param Vars List of variables to flush.
|
2015-02-25 16:32:46 +08:00
|
|
|
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
|
|
|
|
SourceLocation Loc);
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
|
|
|
|
/// \brief Emit task region for the task directive. The task region is
|
2015-04-29 02:19:18 +08:00
|
|
|
/// emitted in several steps:
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
|
|
|
|
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
|
|
|
|
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
|
|
|
|
/// function:
|
|
|
|
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
|
|
|
|
/// TaskFunction(gtid, tt->part_id, tt->shareds);
|
|
|
|
/// return 0;
|
|
|
|
/// }
|
|
|
|
/// 2. Copy a list of shared variables to field shareds of the resulting
|
|
|
|
/// structure kmp_task_t returned by the previous call (if any).
|
|
|
|
/// 3. Copy a pointer to destructions function to field destructions of the
|
|
|
|
/// resulting structure kmp_task_t.
|
|
|
|
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
|
|
|
|
/// kmp_task_t *new_task), where new_task is a resulting structure from
|
|
|
|
/// previous items.
|
2015-04-30 14:51:57 +08:00
|
|
|
/// \param D Current task directive.
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
|
|
|
|
/// /*part_id*/, captured_struct */*__context*/);
|
|
|
|
/// \param SharedsTy A type which contains references the shared variables.
|
[OPENMP] Codegen for 'depend' clause (OpenMP 4.0).
If task directive has associated 'depend' clause then function kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called instead of __kmpc_omp_task().
If this directive has associated 'if' clause then also before a call of kmpc_omp_task_begin_if0() a function void __kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) must be called.
Array sections are not supported yet.
llvm-svn: 240532
2015-06-24 19:01:36 +08:00
|
|
|
/// \param Shareds Context with the list of shared variables from the \p
|
[OPENMP] Initial codegen for 'omp task' directive.
The task region is emmitted in several steps:
Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t *task_entry).
Here task_entry is a pointer to the function:
kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
TaskFunction(gtid, tt->part_id, tt->shareds);
return 0;
}
Copy a list of shared variables to field shareds of the resulting structure kmp_task_t returned by the previous call (if any).
Copy a pointer to destructions function to field destructions of the resulting structure kmp_task_t.
Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *new_task), where new_task is a resulting structure from previous items.
Differential Revision: http://reviews.llvm.org/D7560
llvm-svn: 231762
2015-03-10 15:28:44 +08:00
|
|
|
/// TaskFunction.
|
[OPENMP] Codegen for 'if' clause in 'task' directive.
If condition evaluates to true, the code executes task by calling @__kmpc_omp_task() runtime function.
If condition evaluates to false, the code executes serial version of the code by executing the following code:
call void @__kmpc_omp_task_begin_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
proxy_task_entry(<gtid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
call void @__kmpc_omp_task_complete_if0(<loc>, <threadid>, <task_t_ptr, returned by @__kmpc_omp_task_alloc()>);
Also it checks if the condition is constant and if it is constant it evaluates its value and then generates either parallel version of the code (if the condition evaluates to true), or the serial version of the code (if the condition evaluates to false).
Differential Revision: http://reviews.llvm.org/D9143
llvm-svn: 235507
2015-04-22 21:57:31 +08:00
|
|
|
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
|
|
|
|
/// otherwise.
|
2016-04-28 17:23:51 +08:00
|
|
|
/// \param Data Additional data for task generation like tiednsee, final
|
|
|
|
/// state, list of privates etc.
|
|
|
|
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
const OMPExecutableDirective &D,
|
|
|
|
llvm::Value *TaskFunction, QualType SharedsTy,
|
|
|
|
Address Shareds, const Expr *IfCond,
|
|
|
|
const OMPTaskDataTy &Data);
|
[OPENMP] Codegen for 'reduction' clause in 'parallel' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
...
*(Type<i> *)lhs[i] = RedOp<i>(*(Type<i> *)lhs[i], *(Type<i> *)rhs[i]);
...
}
... void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n> - 1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
...
<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
...
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
...
Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
...
break;
default:
;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
Differential Revision: http://reviews.llvm.org/D8915
llvm-svn: 234583
2015-04-10 18:43:45 +08:00
|
|
|
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
/// Emit task region for the taskloop directive. The taskloop region is
|
|
|
|
/// emitted in several steps:
|
|
|
|
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
|
|
|
|
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
|
|
|
|
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
|
|
|
|
/// function:
|
|
|
|
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
|
|
|
|
/// TaskFunction(gtid, tt->part_id, tt->shareds);
|
|
|
|
/// return 0;
|
|
|
|
/// }
|
|
|
|
/// 2. Copy a list of shared variables to field shareds of the resulting
|
|
|
|
/// structure kmp_task_t returned by the previous call (if any).
|
|
|
|
/// 3. Copy a pointer to destructions function to field destructions of the
|
|
|
|
/// resulting structure kmp_task_t.
|
|
|
|
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
|
|
|
|
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
|
|
|
|
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
|
|
|
|
/// is a resulting structure from
|
|
|
|
/// previous items.
|
|
|
|
/// \param D Current task directive.
|
|
|
|
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
|
|
|
|
/// /*part_id*/, captured_struct */*__context*/);
|
|
|
|
/// \param SharedsTy A type which contains references the shared variables.
|
|
|
|
/// \param Shareds Context with the list of shared variables from the \p
|
|
|
|
/// TaskFunction.
|
|
|
|
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
|
|
|
|
/// otherwise.
|
2016-04-28 17:23:51 +08:00
|
|
|
/// \param Data Additional data for task generation like tiednsee, final
|
|
|
|
/// state, list of privates etc.
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
virtual void emitTaskLoopCall(
|
|
|
|
CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
|
2016-04-28 17:23:51 +08:00
|
|
|
llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
|
|
|
|
const Expr *IfCond, const OMPTaskDataTy &Data);
|
[OPENMP 4.5] Codegen for 'taskloop' directive.
The taskloop construct specifies that the iterations of one or more associated loops will be executed in parallel using OpenMP tasks. The iterations are distributed across tasks created by the construct and scheduled to be executed.
The next code will be generated for the taskloop directive:
#pragma omp taskloop num_tasks(N) lastprivate(j)
for( i=0; i<N*GRAIN*STRIDE-1; i+=STRIDE ) {
int th = omp_get_thread_num();
#pragma omp atomic
counter++;
#pragma omp atomic
th_counter[th]++;
j = i;
}
Generated code:
task = __kmpc_omp_task_alloc(NULL,gtid,1,sizeof(struct
task),sizeof(struct shar),&task_entry);
psh = task->shareds;
psh->pth_counter = &th_counter;
psh->pcounter = &counter;
psh->pj = &j;
task->lb = 0;
task->ub = N*GRAIN*STRIDE-2;
task->st = STRIDE;
__kmpc_taskloop(
NULL, // location
gtid, // gtid
task, // task structure
1, // if clause value
&task->lb, // lower bound
&task->ub, // upper bound
STRIDE, // loop increment
0, // 1 if nogroup specified
2, // schedule type: 0-none, 1-grainsize, 2-num_tasks
N, // schedule value (ignored for type 0)
(void*)&__task_dup_entry // tasks duplication routine
);
llvm-svn: 267395
2016-04-25 20:22:29 +08:00
|
|
|
|
2015-04-10 12:50:10 +08:00
|
|
|
/// \brief Emit code for the directive that does not require outlining.
|
|
|
|
///
|
2015-07-03 17:56:58 +08:00
|
|
|
/// \param InnermostKind Kind of innermost directive (for simple directives it
|
|
|
|
/// is a directive itself, for combined - its innermost directive).
|
2015-04-10 12:50:10 +08:00
|
|
|
/// \param CodeGen Code generation sequence for the \a D directive.
|
2015-09-15 20:52:43 +08:00
|
|
|
/// \param HasCancel true if region has inner cancel directive, false
|
|
|
|
/// otherwise.
|
2015-04-10 12:50:10 +08:00
|
|
|
virtual void emitInlinedDirective(CodeGenFunction &CGF,
|
2015-07-03 17:56:58 +08:00
|
|
|
OpenMPDirectiveKind InnermostKind,
|
2015-09-15 20:52:43 +08:00
|
|
|
const RegionCodeGenTy &CodeGen,
|
|
|
|
bool HasCancel = false);
|
2017-02-17 00:20:16 +08:00
|
|
|
|
|
|
|
/// Emits reduction function.
|
|
|
|
/// \param ArgsType Array type containing pointers to reduction variables.
|
|
|
|
/// \param Privates List of private copies for original reduction arguments.
|
|
|
|
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
|
|
|
|
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
|
|
|
|
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
|
|
|
|
/// or 'operator binop(LHS, RHS)'.
|
|
|
|
llvm::Value *emitReductionFunction(CodeGenModule &CGM, llvm::Type *ArgsType,
|
|
|
|
ArrayRef<const Expr *> Privates,
|
|
|
|
ArrayRef<const Expr *> LHSExprs,
|
|
|
|
ArrayRef<const Expr *> RHSExprs,
|
|
|
|
ArrayRef<const Expr *> ReductionOps);
|
|
|
|
|
|
|
|
/// Emits single reduction combiner
|
|
|
|
void emitSingleReductionCombiner(CodeGenFunction &CGF,
|
|
|
|
const Expr *ReductionOp,
|
|
|
|
const Expr *PrivateRef,
|
|
|
|
const DeclRefExpr *LHS,
|
|
|
|
const DeclRefExpr *RHS);
|
|
|
|
|
|
|
|
struct ReductionOptionsTy {
|
|
|
|
bool WithNowait;
|
|
|
|
bool SimpleReduction;
|
|
|
|
OpenMPDirectiveKind ReductionKind;
|
|
|
|
};
|
[OPENMP] Codegen for 'reduction' clause in 'parallel' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
...
*(Type<i> *)lhs[i] = RedOp<i>(*(Type<i> *)lhs[i], *(Type<i> *)rhs[i]);
...
}
... void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n> - 1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
...
<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
...
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
...
Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
...
break;
default:
;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
Differential Revision: http://reviews.llvm.org/D8915
llvm-svn: 234583
2015-04-10 18:43:45 +08:00
|
|
|
/// \brief Emit a code for reduction clause. Next code should be emitted for
|
|
|
|
/// reduction:
|
|
|
|
/// \code
|
|
|
|
///
|
|
|
|
/// static kmp_critical_name lock = { 0 };
|
|
|
|
///
|
|
|
|
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
|
|
|
|
/// ...
|
|
|
|
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
|
|
|
|
/// ...
|
|
|
|
/// }
|
|
|
|
///
|
|
|
|
/// ...
|
|
|
|
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
|
|
|
|
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
|
|
|
|
/// RedList, reduce_func, &<lock>)) {
|
|
|
|
/// case 1:
|
|
|
|
/// ...
|
|
|
|
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
|
|
|
|
/// ...
|
|
|
|
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
|
|
|
|
/// break;
|
|
|
|
/// case 2:
|
|
|
|
/// ...
|
|
|
|
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
|
|
|
|
/// ...
|
|
|
|
/// break;
|
|
|
|
/// default:;
|
|
|
|
/// }
|
|
|
|
/// \endcode
|
|
|
|
///
|
2015-10-08 17:10:53 +08:00
|
|
|
/// \param Privates List of private copies for original reduction arguments.
|
[OPENMP] Codegen for 'reduction' clause in 'parallel' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
...
*(Type<i> *)lhs[i] = RedOp<i>(*(Type<i> *)lhs[i], *(Type<i> *)rhs[i]);
...
}
... void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n> - 1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
...
<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
...
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
...
Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
...
break;
default:
;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
Differential Revision: http://reviews.llvm.org/D8915
llvm-svn: 234583
2015-04-10 18:43:45 +08:00
|
|
|
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
|
|
|
|
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
|
|
|
|
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
|
|
|
|
/// or 'operator binop(LHS, RHS)'.
|
2017-02-17 00:20:16 +08:00
|
|
|
/// \param Options List of options for reduction codegen:
|
|
|
|
/// WithNowait true if parent directive has also nowait clause, false
|
|
|
|
/// otherwise.
|
|
|
|
/// SimpleReduction Emit reduction operation only. Used for omp simd
|
|
|
|
/// directive on the host.
|
|
|
|
/// ReductionKind The kind of reduction to perform.
|
[OPENMP] Codegen for 'reduction' clause in 'parallel' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
...
*(Type<i> *)lhs[i] = RedOp<i>(*(Type<i> *)lhs[i], *(Type<i> *)rhs[i]);
...
}
... void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n> - 1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
...
<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
...
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
...
Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
...
break;
default:
;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
Differential Revision: http://reviews.llvm.org/D8915
llvm-svn: 234583
2015-04-10 18:43:45 +08:00
|
|
|
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
|
2015-10-08 17:10:53 +08:00
|
|
|
ArrayRef<const Expr *> Privates,
|
[OPENMP] Codegen for 'reduction' clause in 'parallel' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
...
*(Type<i> *)lhs[i] = RedOp<i>(*(Type<i> *)lhs[i], *(Type<i> *)rhs[i]);
...
}
... void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n> - 1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
...
<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
...
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
...
Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
...
break;
default:
;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
Differential Revision: http://reviews.llvm.org/D8915
llvm-svn: 234583
2015-04-10 18:43:45 +08:00
|
|
|
ArrayRef<const Expr *> LHSExprs,
|
|
|
|
ArrayRef<const Expr *> RHSExprs,
|
|
|
|
ArrayRef<const Expr *> ReductionOps,
|
2017-02-17 00:20:16 +08:00
|
|
|
ReductionOptionsTy Options);
|
2015-04-27 13:22:09 +08:00
|
|
|
|
2017-07-17 21:30:36 +08:00
|
|
|
/// Emit a code for initialization of task reduction clause. Next code
|
|
|
|
/// should be emitted for reduction:
|
|
|
|
/// \code
|
|
|
|
///
|
|
|
|
/// _task_red_item_t red_data[n];
|
|
|
|
/// ...
|
|
|
|
/// red_data[i].shar = &origs[i];
|
|
|
|
/// red_data[i].size = sizeof(origs[i]);
|
|
|
|
/// red_data[i].f_init = (void*)RedInit<i>;
|
|
|
|
/// red_data[i].f_fini = (void*)RedDest<i>;
|
|
|
|
/// red_data[i].f_comb = (void*)RedOp<i>;
|
|
|
|
/// red_data[i].flags = <Flag_i>;
|
|
|
|
/// ...
|
|
|
|
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
|
|
|
|
/// \endcode
|
|
|
|
///
|
|
|
|
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
|
|
|
|
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
|
|
|
|
/// \param Data Additional data for task generation like tiedness, final
|
|
|
|
/// state, list of privates, reductions etc.
|
|
|
|
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
|
|
|
|
SourceLocation Loc,
|
|
|
|
ArrayRef<const Expr *> LHSExprs,
|
|
|
|
ArrayRef<const Expr *> RHSExprs,
|
|
|
|
const OMPTaskDataTy &Data);
|
|
|
|
|
|
|
|
/// Required to resolve existing problems in the runtime. Emits threadprivate
|
|
|
|
/// variables to store the size of the VLAs/array sections for
|
|
|
|
/// initializer/combiner/finalizer functions + emits threadprivate variable to
|
|
|
|
/// store the pointer to the original reduction item for the custom
|
|
|
|
/// initializer defined by declare reduction construct.
|
|
|
|
/// \param RCG Allows to reuse an existing data for the reductions.
|
|
|
|
/// \param N Reduction item for which fixups must be emitted.
|
|
|
|
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
ReductionCodeGen &RCG, unsigned N);
|
|
|
|
|
|
|
|
/// Get the address of `void *` type of the privatue copy of the reduction
|
|
|
|
/// item specified by the \p SharedLVal.
|
|
|
|
/// \param ReductionsPtr Pointer to the reduction data returned by the
|
|
|
|
/// emitTaskReductionInit function.
|
|
|
|
/// \param SharedLVal Address of the original reduction item.
|
|
|
|
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
|
|
|
|
llvm::Value *ReductionsPtr,
|
|
|
|
LValue SharedLVal);
|
|
|
|
|
2015-04-27 13:22:09 +08:00
|
|
|
/// \brief Emit code for 'taskwait' directive.
|
|
|
|
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
|
2015-07-02 12:17:07 +08:00
|
|
|
|
|
|
|
/// \brief Emit code for 'cancellation point' construct.
|
|
|
|
/// \param CancelRegion Region kind for which the cancellation point must be
|
|
|
|
/// emitted.
|
|
|
|
///
|
|
|
|
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
|
|
|
|
SourceLocation Loc,
|
|
|
|
OpenMPDirectiveKind CancelRegion);
|
2015-07-06 13:50:32 +08:00
|
|
|
|
|
|
|
/// \brief Emit code for 'cancel' construct.
|
2015-09-18 16:07:34 +08:00
|
|
|
/// \param IfCond Condition in the associated 'if' clause, if it was
|
|
|
|
/// specified, nullptr otherwise.
|
2015-07-06 13:50:32 +08:00
|
|
|
/// \param CancelRegion Region kind for which the cancel must be emitted.
|
|
|
|
///
|
|
|
|
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
|
2015-09-18 16:07:34 +08:00
|
|
|
const Expr *IfCond,
|
2015-07-06 13:50:32 +08:00
|
|
|
OpenMPDirectiveKind CancelRegion);
|
2015-10-03 00:14:20 +08:00
|
|
|
|
|
|
|
/// \brief Emit outilined function for 'target' directive.
|
|
|
|
/// \param D Directive to emit.
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \param ParentName Name of the function that encloses the target region.
|
|
|
|
/// \param OutlinedFn Outlined function value to be defined by this call.
|
|
|
|
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
|
|
|
|
/// \param IsOffloadEntry True if the outlined function is an offload entry.
|
2016-03-29 13:34:15 +08:00
|
|
|
/// \param CodeGen Code generation sequence for the \a D directive.
|
2017-07-14 01:34:44 +08:00
|
|
|
/// An outlined function may not be an entry if, e.g. the if clause always
|
2016-01-06 21:42:12 +08:00
|
|
|
/// evaluates to false.
|
|
|
|
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
|
|
|
|
StringRef ParentName,
|
|
|
|
llvm::Function *&OutlinedFn,
|
|
|
|
llvm::Constant *&OutlinedFnID,
|
2016-03-29 13:34:15 +08:00
|
|
|
bool IsOffloadEntry,
|
|
|
|
const RegionCodeGenTy &CodeGen);
|
2015-10-03 00:14:20 +08:00
|
|
|
|
|
|
|
/// \brief Emit the target offloading code associated with \a D. The emitted
|
|
|
|
/// code attempts offloading the execution to the device, an the event of
|
|
|
|
/// a failure it executes the host version outlined in \a OutlinedFn.
|
|
|
|
/// \param D Directive to emit.
|
|
|
|
/// \param OutlinedFn Host version of the code to be offloaded.
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \param OutlinedFnID ID of host version of the code to be offloaded.
|
2015-10-03 00:14:20 +08:00
|
|
|
/// \param IfCond Expression evaluated in if clause associated with the target
|
|
|
|
/// directive, or null if no if clause is used.
|
|
|
|
/// \param Device Expression evaluated in device clause associated with the
|
|
|
|
/// target directive, or null if no device clause is used.
|
|
|
|
/// \param CapturedVars Values captured in the current region.
|
|
|
|
virtual void emitTargetCall(CodeGenFunction &CGF,
|
|
|
|
const OMPExecutableDirective &D,
|
2016-01-06 21:42:12 +08:00
|
|
|
llvm::Value *OutlinedFn,
|
|
|
|
llvm::Value *OutlinedFnID, const Expr *IfCond,
|
2015-10-03 00:14:20 +08:00
|
|
|
const Expr *Device,
|
|
|
|
ArrayRef<llvm::Value *> CapturedVars);
|
2016-01-06 21:42:12 +08:00
|
|
|
|
|
|
|
/// \brief Emit the target regions enclosed in \a GD function definition or
|
|
|
|
/// the function itself in case it is a valid device function. Returns true if
|
|
|
|
/// \a GD was dealt with successfully.
|
2016-01-07 03:13:49 +08:00
|
|
|
/// \param GD Function to scan.
|
2016-01-06 21:42:12 +08:00
|
|
|
virtual bool emitTargetFunctions(GlobalDecl GD);
|
|
|
|
|
|
|
|
/// \brief Emit the global variable if it is a valid device global variable.
|
|
|
|
/// Returns true if \a GD was dealt with successfully.
|
|
|
|
/// \param GD Variable declaration to emit.
|
|
|
|
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
|
|
|
|
|
|
|
|
/// \brief Emit the global \a GD if it is meaningful for the target. Returns
|
2017-03-30 22:13:19 +08:00
|
|
|
/// if it was emitted successfully.
|
2016-01-06 21:42:12 +08:00
|
|
|
/// \param GD Global to scan.
|
|
|
|
virtual bool emitTargetGlobal(GlobalDecl GD);
|
|
|
|
|
|
|
|
/// \brief Creates the offloading descriptor in the event any target region
|
|
|
|
/// was emitted in the current module and return the function that registers
|
|
|
|
/// it.
|
|
|
|
virtual llvm::Function *emitRegistrationFunction();
|
2016-03-04 04:34:23 +08:00
|
|
|
|
|
|
|
/// \brief Emits code for teams call of the \a OutlinedFn with
|
|
|
|
/// variables captured in a record which address is stored in \a
|
|
|
|
/// CapturedStruct.
|
|
|
|
/// \param OutlinedFn Outlined function to be run by team masters. Type of
|
|
|
|
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
|
|
|
|
/// \param CapturedVars A pointer to the record with the references to
|
|
|
|
/// variables used in \a OutlinedFn function.
|
|
|
|
///
|
|
|
|
virtual void emitTeamsCall(CodeGenFunction &CGF,
|
|
|
|
const OMPExecutableDirective &D,
|
|
|
|
SourceLocation Loc, llvm::Value *OutlinedFn,
|
|
|
|
ArrayRef<llvm::Value *> CapturedVars);
|
|
|
|
|
|
|
|
/// \brief Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
|
|
|
|
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
|
|
|
|
/// for num_teams clause.
|
2016-04-04 23:55:02 +08:00
|
|
|
/// \param NumTeams An integer expression of teams.
|
|
|
|
/// \param ThreadLimit An integer expression of threads.
|
|
|
|
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
|
|
|
|
const Expr *ThreadLimit, SourceLocation Loc);
|
2016-04-28 06:58:19 +08:00
|
|
|
|
2016-07-28 22:23:26 +08:00
|
|
|
/// Struct that keeps all the relevant information that should be kept
|
|
|
|
/// throughout a 'target data' region.
|
|
|
|
class TargetDataInfo {
|
|
|
|
/// Set to true if device pointer information have to be obtained.
|
|
|
|
bool RequiresDevicePointerInfo = false;
|
|
|
|
|
|
|
|
public:
|
|
|
|
/// The array of base pointer passed to the runtime library.
|
|
|
|
llvm::Value *BasePointersArray = nullptr;
|
|
|
|
/// The array of section pointers passed to the runtime library.
|
|
|
|
llvm::Value *PointersArray = nullptr;
|
|
|
|
/// The array of sizes passed to the runtime library.
|
|
|
|
llvm::Value *SizesArray = nullptr;
|
|
|
|
/// The array of map types passed to the runtime library.
|
|
|
|
llvm::Value *MapTypesArray = nullptr;
|
|
|
|
/// The total number of pointers passed to the runtime library.
|
|
|
|
unsigned NumberOfPtrs = 0u;
|
|
|
|
/// Map between the a declaration of a capture and the corresponding base
|
|
|
|
/// pointer address where the runtime returns the device pointers.
|
|
|
|
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
|
|
|
|
|
|
|
|
explicit TargetDataInfo() {}
|
|
|
|
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
|
|
|
|
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
|
|
|
|
/// Clear information about the data arrays.
|
|
|
|
void clearArrayInfo() {
|
|
|
|
BasePointersArray = nullptr;
|
|
|
|
PointersArray = nullptr;
|
|
|
|
SizesArray = nullptr;
|
|
|
|
MapTypesArray = nullptr;
|
|
|
|
NumberOfPtrs = 0u;
|
|
|
|
}
|
|
|
|
/// Return true if the current target data information has valid arrays.
|
|
|
|
bool isValid() {
|
|
|
|
return BasePointersArray && PointersArray && SizesArray &&
|
|
|
|
MapTypesArray && NumberOfPtrs;
|
|
|
|
}
|
|
|
|
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
|
|
|
|
};
|
|
|
|
|
2016-04-28 06:58:19 +08:00
|
|
|
/// \brief Emit the target data mapping code associated with \a D.
|
|
|
|
/// \param D Directive to emit.
|
2016-07-28 22:23:26 +08:00
|
|
|
/// \param IfCond Expression evaluated in if clause associated with the
|
|
|
|
/// target directive, or null if no device clause is used.
|
2016-04-28 06:58:19 +08:00
|
|
|
/// \param Device Expression evaluated in device clause associated with the
|
|
|
|
/// target directive, or null if no device clause is used.
|
2016-07-28 22:23:26 +08:00
|
|
|
/// \param Info A record used to store information that needs to be preserved
|
|
|
|
/// until the region is closed.
|
2016-04-28 06:58:19 +08:00
|
|
|
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
|
|
|
|
const OMPExecutableDirective &D,
|
|
|
|
const Expr *IfCond, const Expr *Device,
|
2016-07-28 22:23:26 +08:00
|
|
|
const RegionCodeGenTy &CodeGen,
|
|
|
|
TargetDataInfo &Info);
|
2016-04-28 07:07:29 +08:00
|
|
|
|
2016-05-27 02:30:22 +08:00
|
|
|
/// \brief Emit the data mapping/movement code associated with the directive
|
|
|
|
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
|
2016-04-28 07:07:29 +08:00
|
|
|
/// \param D Directive to emit.
|
|
|
|
/// \param IfCond Expression evaluated in if clause associated with the target
|
|
|
|
/// directive, or null if no if clause is used.
|
|
|
|
/// \param Device Expression evaluated in device clause associated with the
|
|
|
|
/// target directive, or null if no device clause is used.
|
2016-05-27 02:30:22 +08:00
|
|
|
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
|
|
|
|
const OMPExecutableDirective &D,
|
|
|
|
const Expr *IfCond,
|
|
|
|
const Expr *Device);
|
2016-05-06 17:40:08 +08:00
|
|
|
|
|
|
|
/// Marks function \a Fn with properly mangled versions of vector functions.
|
|
|
|
/// \param FD Function marked as 'declare simd'.
|
|
|
|
/// \param Fn LLVM function that must be marked with 'declare simd'
|
|
|
|
/// attributes.
|
|
|
|
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
|
|
|
|
llvm::Function *Fn);
|
2016-05-25 20:36:08 +08:00
|
|
|
|
|
|
|
/// Emit initialization for doacross loop nesting support.
|
|
|
|
/// \param D Loop-based construct used in doacross nesting construct.
|
|
|
|
virtual void emitDoacrossInit(CodeGenFunction &CGF,
|
|
|
|
const OMPLoopDirective &D);
|
|
|
|
|
|
|
|
/// Emit code for doacross ordered directive with 'depend' clause.
|
|
|
|
/// \param C 'depend' clause with 'sink|source' dependency kind.
|
|
|
|
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
|
|
|
|
const OMPDependClause *C);
|
2017-08-05 03:10:54 +08:00
|
|
|
|
2017-08-09 02:04:06 +08:00
|
|
|
/// Translates the native parameter of outlined function if this is required
|
|
|
|
/// for target.
|
|
|
|
/// \param FD Field decl from captured record for the paramater.
|
|
|
|
/// \param NativeParam Parameter itself.
|
|
|
|
virtual const VarDecl *translateParameter(const FieldDecl *FD,
|
|
|
|
const VarDecl *NativeParam) const {
|
|
|
|
return NativeParam;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the address of the native argument basing on the address of the
|
|
|
|
/// target-specific parameter.
|
|
|
|
/// \param NativeParam Parameter itself.
|
|
|
|
/// \param TargetParam Corresponding target-specific parameter.
|
|
|
|
virtual Address getParameterAddress(CodeGenFunction &CGF,
|
|
|
|
const VarDecl *NativeParam,
|
|
|
|
const VarDecl *TargetParam) const;
|
|
|
|
|
2017-08-05 03:10:54 +08:00
|
|
|
/// Emits call of the outlined function with the provided arguments,
|
|
|
|
/// translating these arguments to correct target-specific arguments.
|
|
|
|
virtual void
|
|
|
|
emitOutlinedFunctionCall(CodeGenFunction &CGF, llvm::Value *OutlinedFn,
|
|
|
|
ArrayRef<llvm::Value *> Args = llvm::None) const;
|
2014-05-06 18:08:46 +08:00
|
|
|
};
|
2015-02-26 18:27:34 +08:00
|
|
|
|
2014-06-18 15:08:49 +08:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace clang
|
2014-05-06 18:08:46 +08:00
|
|
|
|
|
|
|
#endif
|