llvm-project/llvm/lib/AsmParser/LLParser.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

613 lines
26 KiB
C
Raw Normal View History

//===-- LLParser.h - Parser Class -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the parser class for .ll files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_ASMPARSER_LLPARSER_H
#define LLVM_LIB_ASMPARSER_LLPARSER_H
#include "LLLexer.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringMap.h"
Infer alignment of unmarked loads in IR/bitcode parsing. For IR generated by a compiler, this is really simple: you just take the datalayout from the beginning of the file, and apply it to all the IR later in the file. For optimization testcases that don't care about the datalayout, this is also really simple: we just use the default datalayout. The complexity here comes from the fact that some LLVM tools allow overriding the datalayout: some tools have an explicit flag for this, some tools will infer a datalayout based on the code generation target. Supporting this properly required plumbing through a bunch of new machinery: we want to allow overriding the datalayout after the datalayout is parsed from the file, but before we use any information from it. Therefore, IR/bitcode parsing now has a callback to allow tools to compute the datalayout at the appropriate time. Not sure if I covered all the LLVM tools that want to use the callback. (clang? lli? Misc IR manipulation tools like llvm-link?). But this is at least enough for all the LLVM regression tests, and IR without a datalayout is not something frontends should generate. This change had some sort of weird effects for certain CodeGen regression tests: if the datalayout is overridden with a datalayout with a different program or stack address space, we now parse IR based on the overridden datalayout, instead of the one written in the file (or the default one, if none is specified). This broke a few AVR tests, and one AMDGPU test. Outside the CodeGen tests I mentioned, the test changes are all just fixing CHECK lines and moving around datalayout lines in weird places. Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include <map>
namespace llvm {
class Module;
class Function;
class Value;
class BasicBlock;
class Instruction;
class Constant;
class GlobalValue;
class Comdat;
class MDString;
class MDNode;
struct SlotMapping;
/// ValID - Represents a reference of a definition of some sort with no type.
/// There are several cases where we have to parse the value but where the
/// type can depend on later context. This may either be a numeric reference
/// or a symbolic (%var) reference. This is just a discriminated union.
struct ValID {
enum {
t_LocalID, t_GlobalID, // ID in UIntVal.
t_LocalName, t_GlobalName, // Name in StrVal.
t_APSInt, t_APFloat, // Value in APSIntVal/APFloatVal.
t_Null, t_Undef, t_Zero, t_None, // No value.
t_EmptyArray, // No value: []
t_Constant, // Value in ConstantVal.
t_InlineAsm, // Value in FTy/StrVal/StrVal2/UIntVal.
t_ConstantStruct, // Value in ConstantStructElts.
t_PackedConstantStruct // Value in ConstantStructElts.
} Kind = t_LocalID;
LLLexer::LocTy Loc;
unsigned UIntVal;
FunctionType *FTy = nullptr;
std::string StrVal, StrVal2;
APSInt APSIntVal;
APFloat APFloatVal{0.0};
Constant *ConstantVal;
std::unique_ptr<Constant *[]> ConstantStructElts;
ValID() = default;
ValID(const ValID &RHS)
: Kind(RHS.Kind), Loc(RHS.Loc), UIntVal(RHS.UIntVal), FTy(RHS.FTy),
StrVal(RHS.StrVal), StrVal2(RHS.StrVal2), APSIntVal(RHS.APSIntVal),
APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal) {
assert(!RHS.ConstantStructElts);
}
bool operator<(const ValID &RHS) const {
if (Kind == t_LocalID || Kind == t_GlobalID)
return UIntVal < RHS.UIntVal;
assert((Kind == t_LocalName || Kind == t_GlobalName ||
Kind == t_ConstantStruct || Kind == t_PackedConstantStruct) &&
"Ordering not defined for this ValID kind yet");
return StrVal < RHS.StrVal;
}
};
class LLParser {
public:
typedef LLLexer::LocTy LocTy;
private:
LLVMContext &Context;
LLLexer Lex;
// Module being parsed, null if we are only parsing summary index.
Module *M;
// Summary index being parsed, null if we are only parsing Module.
ModuleSummaryIndex *Index;
SlotMapping *Slots;
// Instruction metadata resolution. Each instruction can have a list of
// MDRef info associated with them.
//
// The simpler approach of just creating temporary MDNodes and then calling
// RAUW on them when the definition is processed doesn't work because some
// instruction metadata kinds, such as dbg, get stored in the IR in an
// "optimized" format which doesn't participate in the normal value use
// lists. This means that RAUW doesn't work, even on temporary MDNodes
// which otherwise support RAUW. Instead, we defer resolving MDNode
// references until the definitions have been processed.
struct MDRef {
SMLoc Loc;
unsigned MDKind, MDSlot;
};
SmallVector<Instruction*, 64> InstsWithTBAATag;
// Type resolution handling data structures. The location is set when we
// have processed a use of the type but not a definition yet.
StringMap<std::pair<Type*, LocTy> > NamedTypes;
std::map<unsigned, std::pair<Type*, LocTy> > NumberedTypes;
std::map<unsigned, TrackingMDNodeRef> NumberedMetadata;
std::map<unsigned, std::pair<TempMDTuple, LocTy>> ForwardRefMDNodes;
// Global Value reference information.
std::map<std::string, std::pair<GlobalValue*, LocTy> > ForwardRefVals;
std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
std::vector<GlobalValue*> NumberedVals;
// Comdat forward reference information.
std::map<std::string, LocTy> ForwardRefComdats;
// References to blockaddress. The key is the function ValID, the value is
// a list of references to blocks in that function.
std::map<ValID, std::map<ValID, GlobalValue *>> ForwardRefBlockAddresses;
class PerFunctionState;
/// Reference to per-function state to allow basic blocks to be
/// forward-referenced by blockaddress instructions within the same
/// function.
PerFunctionState *BlockAddressPFS;
// Attribute builder reference information.
std::map<Value*, std::vector<unsigned> > ForwardRefAttrGroups;
std::map<unsigned, AttrBuilder> NumberedAttrBuilders;
// Summary global value reference information.
std::map<unsigned, std::vector<std::pair<ValueInfo *, LocTy>>>
ForwardRefValueInfos;
std::map<unsigned, std::vector<std::pair<AliasSummary *, LocTy>>>
ForwardRefAliasees;
std::vector<ValueInfo> NumberedValueInfos;
// Summary type id reference information.
std::map<unsigned, std::vector<std::pair<GlobalValue::GUID *, LocTy>>>
ForwardRefTypeIds;
// Map of module ID to path.
std::map<unsigned, StringRef> ModuleIdMap;
/// Only the llvm-as tool may set this to false to bypass
/// UpgradeDebuginfo so it can generate broken bitcode.
bool UpgradeDebugInfo;
std::string SourceFileName;
public:
LLParser(StringRef F, SourceMgr &SM, SMDiagnostic &Err, Module *M,
ModuleSummaryIndex *Index, LLVMContext &Context,
Infer alignment of unmarked loads in IR/bitcode parsing. For IR generated by a compiler, this is really simple: you just take the datalayout from the beginning of the file, and apply it to all the IR later in the file. For optimization testcases that don't care about the datalayout, this is also really simple: we just use the default datalayout. The complexity here comes from the fact that some LLVM tools allow overriding the datalayout: some tools have an explicit flag for this, some tools will infer a datalayout based on the code generation target. Supporting this properly required plumbing through a bunch of new machinery: we want to allow overriding the datalayout after the datalayout is parsed from the file, but before we use any information from it. Therefore, IR/bitcode parsing now has a callback to allow tools to compute the datalayout at the appropriate time. Not sure if I covered all the LLVM tools that want to use the callback. (clang? lli? Misc IR manipulation tools like llvm-link?). But this is at least enough for all the LLVM regression tests, and IR without a datalayout is not something frontends should generate. This change had some sort of weird effects for certain CodeGen regression tests: if the datalayout is overridden with a datalayout with a different program or stack address space, we now parse IR based on the overridden datalayout, instead of the one written in the file (or the default one, if none is specified). This broke a few AVR tests, and one AMDGPU test. Outside the CodeGen tests I mentioned, the test changes are all just fixing CHECK lines and moving around datalayout lines in weird places. Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
SlotMapping *Slots = nullptr)
: Context(Context), Lex(F, SM, Err, Context), M(M), Index(Index),
Infer alignment of unmarked loads in IR/bitcode parsing. For IR generated by a compiler, this is really simple: you just take the datalayout from the beginning of the file, and apply it to all the IR later in the file. For optimization testcases that don't care about the datalayout, this is also really simple: we just use the default datalayout. The complexity here comes from the fact that some LLVM tools allow overriding the datalayout: some tools have an explicit flag for this, some tools will infer a datalayout based on the code generation target. Supporting this properly required plumbing through a bunch of new machinery: we want to allow overriding the datalayout after the datalayout is parsed from the file, but before we use any information from it. Therefore, IR/bitcode parsing now has a callback to allow tools to compute the datalayout at the appropriate time. Not sure if I covered all the LLVM tools that want to use the callback. (clang? lli? Misc IR manipulation tools like llvm-link?). But this is at least enough for all the LLVM regression tests, and IR without a datalayout is not something frontends should generate. This change had some sort of weird effects for certain CodeGen regression tests: if the datalayout is overridden with a datalayout with a different program or stack address space, we now parse IR based on the overridden datalayout, instead of the one written in the file (or the default one, if none is specified). This broke a few AVR tests, and one AMDGPU test. Outside the CodeGen tests I mentioned, the test changes are all just fixing CHECK lines and moving around datalayout lines in weird places. Differential Revision: https://reviews.llvm.org/D78403
2020-05-15 03:59:45 +08:00
Slots(Slots), BlockAddressPFS(nullptr) {}
bool Run(
bool UpgradeDebugInfo, DataLayoutCallbackTy DataLayoutCallback =
[](StringRef) { return None; });
bool parseStandaloneConstantValue(Constant *&C, const SlotMapping *Slots);
bool parseTypeAtBeginning(Type *&Ty, unsigned &Read,
const SlotMapping *Slots);
LLVMContext &getContext() { return Context; }
private:
bool error(LocTy L, const Twine &Msg) const { return Lex.Error(L, Msg); }
bool tokError(const Twine &Msg) const { return error(Lex.getLoc(), Msg); }
/// Restore the internal name and slot mappings using the mappings that
/// were created at an earlier parsing stage.
void restoreParsingState(const SlotMapping *Slots);
/// getGlobalVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
GlobalValue *getGlobalVal(const std::string &N, Type *Ty, LocTy Loc,
bool IsCall);
GlobalValue *getGlobalVal(unsigned ID, Type *Ty, LocTy Loc, bool IsCall);
/// Get a Comdat with the specified name, creating a forward reference
/// record if needed.
Comdat *getComdat(const std::string &Name, LocTy Loc);
// Helper Routines.
bool parseToken(lltok::Kind T, const char *ErrMsg);
bool EatIfPresent(lltok::Kind T) {
if (Lex.getKind() != T) return false;
Lex.Lex();
return true;
}
FastMathFlags EatFastMathFlagsIfPresent() {
FastMathFlags FMF;
while (true)
switch (Lex.getKind()) {
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-07 00:27:15 +08:00
case lltok::kw_fast: FMF.setFast(); Lex.Lex(); continue;
case lltok::kw_nnan: FMF.setNoNaNs(); Lex.Lex(); continue;
case lltok::kw_ninf: FMF.setNoInfs(); Lex.Lex(); continue;
case lltok::kw_nsz: FMF.setNoSignedZeros(); Lex.Lex(); continue;
case lltok::kw_arcp: FMF.setAllowReciprocal(); Lex.Lex(); continue;
case lltok::kw_contract:
FMF.setAllowContract(true);
Lex.Lex();
continue;
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 llvm-svn: 317488
2017-11-07 00:27:15 +08:00
case lltok::kw_reassoc: FMF.setAllowReassoc(); Lex.Lex(); continue;
case lltok::kw_afn: FMF.setApproxFunc(); Lex.Lex(); continue;
default: return FMF;
}
return FMF;
}
bool parseOptionalToken(lltok::Kind T, bool &Present,
LocTy *Loc = nullptr) {
if (Lex.getKind() != T) {
Present = false;
} else {
if (Loc)
*Loc = Lex.getLoc();
Lex.Lex();
Present = true;
}
return false;
}
bool parseStringConstant(std::string &Result);
bool parseUInt32(unsigned &Val);
bool parseUInt32(unsigned &Val, LocTy &Loc) {
Loc = Lex.getLoc();
return parseUInt32(Val);
}
bool parseUInt64(uint64_t &Val);
bool parseUInt64(uint64_t &Val, LocTy &Loc) {
Loc = Lex.getLoc();
return parseUInt64(Val);
}
bool parseFlag(unsigned &Val);
bool parseStringAttribute(AttrBuilder &B);
bool parseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
bool parseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
bool parseOptionalUnnamedAddr(GlobalVariable::UnnamedAddr &UnnamedAddr);
bool parseOptionalAddrSpace(unsigned &AddrSpace, unsigned DefaultAS = 0);
bool parseOptionalProgramAddrSpace(unsigned &AddrSpace) {
return parseOptionalAddrSpace(
AddrSpace, M->getDataLayout().getProgramAddressSpace());
};
bool parseOptionalParamAttrs(AttrBuilder &B);
bool parseOptionalReturnAttrs(AttrBuilder &B);
bool parseOptionalLinkage(unsigned &Res, bool &HasLinkage,
Represent runtime preemption in the IR. Currently we do not represent runtime preemption in the IR, which has several drawbacks: 1) The semantics of GlobalValues differ depending on the object file format you are targeting (as well as the relocation-model and -fPIE value). 2) We have no way of disabling inlining of run time interposable functions, since in the IR we only know if a function is link-time interposable. Because of this llvm cannot support elf-interposition semantics. 3) In LTO builds of executables we will have extra knowledge that a symbol resolved to a local definition and can't be preemptable, but have no way to propagate that knowledge through the compiler. This patch adds preemptability specifiers to the IR with the following meaning: dso_local --> means the compiler may assume the symbol will resolve to a definition within the current linkage unit and the symbol may be accessed directly even if the definition is not within this compilation unit. dso_preemptable --> means that the compiler must assume the GlobalValue may be replaced with a definition from outside the current linkage unit at runtime. To ease transitioning dso_preemptable is treated as a 'default' in that low-level codegen will still do the same checks it did previously to see if a symbol should be accessed indirectly. Eventually when IR producers emit the specifiers on all Globalvalues we can change dso_preemptable to mean 'always access indirectly', and remove the current logic. Differential Revision: https://reviews.llvm.org/D20217 llvm-svn: 316668
2017-10-26 23:00:26 +08:00
unsigned &Visibility, unsigned &DLLStorageClass,
bool &DSOLocal);
void parseOptionalDSOLocal(bool &DSOLocal);
void parseOptionalVisibility(unsigned &Res);
void parseOptionalDLLStorageClass(unsigned &Res);
bool parseOptionalCallingConv(unsigned &CC);
bool parseOptionalAlignment(MaybeAlign &Alignment,
bool AllowParens = false);
bool parseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
bool parseScopeAndOrdering(bool IsAtomic, SyncScope::ID &SSID,
AtomicOrdering &Ordering);
bool parseScope(SyncScope::ID &SSID);
bool parseOrdering(AtomicOrdering &Ordering);
bool parseOptionalStackAlignment(unsigned &Alignment);
bool parseOptionalCommaAlign(MaybeAlign &Alignment, bool &AteExtraComma);
bool parseOptionalCommaAddrSpace(unsigned &AddrSpace, LocTy &Loc,
bool &AteExtraComma);
bool parseOptionalCommaInAlloca(bool &IsInAlloca);
bool parseAllocSizeArguments(unsigned &BaseSizeArg,
Optional<unsigned> &HowManyArg);
bool parseIndexList(SmallVectorImpl<unsigned> &Indices,
bool &AteExtraComma);
bool parseIndexList(SmallVectorImpl<unsigned> &Indices) {
bool AteExtraComma;
if (parseIndexList(Indices, AteExtraComma))
return true;
if (AteExtraComma)
return tokError("expected index");
return false;
}
// Top-Level Entities
bool parseTopLevelEntities();
bool validateEndOfModule(bool UpgradeDebugInfo);
bool validateEndOfIndex();
bool parseTargetDefinitions();
bool parseTargetDefinition();
bool parseModuleAsm();
bool parseSourceFileName();
bool parseDepLibs(); // FIXME: Remove in 4.0.
bool parseUnnamedType();
bool parseNamedType();
bool parseDeclare();
bool parseDefine();
bool parseGlobalType(bool &IsConstant);
bool parseUnnamedGlobal();
bool parseNamedGlobal();
bool parseGlobal(const std::string &Name, LocTy NameLoc, unsigned Linkage,
bool HasLinkage, unsigned Visibility,
Represent runtime preemption in the IR. Currently we do not represent runtime preemption in the IR, which has several drawbacks: 1) The semantics of GlobalValues differ depending on the object file format you are targeting (as well as the relocation-model and -fPIE value). 2) We have no way of disabling inlining of run time interposable functions, since in the IR we only know if a function is link-time interposable. Because of this llvm cannot support elf-interposition semantics. 3) In LTO builds of executables we will have extra knowledge that a symbol resolved to a local definition and can't be preemptable, but have no way to propagate that knowledge through the compiler. This patch adds preemptability specifiers to the IR with the following meaning: dso_local --> means the compiler may assume the symbol will resolve to a definition within the current linkage unit and the symbol may be accessed directly even if the definition is not within this compilation unit. dso_preemptable --> means that the compiler must assume the GlobalValue may be replaced with a definition from outside the current linkage unit at runtime. To ease transitioning dso_preemptable is treated as a 'default' in that low-level codegen will still do the same checks it did previously to see if a symbol should be accessed indirectly. Eventually when IR producers emit the specifiers on all Globalvalues we can change dso_preemptable to mean 'always access indirectly', and remove the current logic. Differential Revision: https://reviews.llvm.org/D20217 llvm-svn: 316668
2017-10-26 23:00:26 +08:00
unsigned DLLStorageClass, bool DSOLocal,
IR: Introduce local_unnamed_addr attribute. If a local_unnamed_addr attribute is attached to a global, the address is known to be insignificant within the module. It is distinct from the existing unnamed_addr attribute in that it only describes a local property of the module rather than a global property of the symbol. This attribute is intended to be used by the code generator and LTO to allow the linker to decide whether the global needs to be in the symbol table. It is possible to exclude a global from the symbol table if three things are true: - This attribute is present on every instance of the global (which means that the normal rule that the global must have a unique address can be broken without being observable by the program by performing comparisons against the global's address) - The global has linkonce_odr linkage (which means that each linkage unit must have its own copy of the global if it requires one, and the copy in each linkage unit must be the same) - It is a constant or a function (which means that the program cannot observe that the unique-address rule has been broken by writing to the global) Although this attribute could in principle be computed from the module contents, LTO clients (i.e. linkers) will normally need to be able to compute this property as part of symbol resolution, and it would be inefficient to materialize every module just to compute it. See: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160509/356401.html http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160516/356738.html for earlier discussion. Part of the fix for PR27553. Differential Revision: http://reviews.llvm.org/D20348 llvm-svn: 272709
2016-06-15 05:01:22 +08:00
GlobalVariable::ThreadLocalMode TLM,
GlobalVariable::UnnamedAddr UnnamedAddr);
bool parseIndirectSymbol(const std::string &Name, LocTy NameLoc,
unsigned L, unsigned Visibility,
Represent runtime preemption in the IR. Currently we do not represent runtime preemption in the IR, which has several drawbacks: 1) The semantics of GlobalValues differ depending on the object file format you are targeting (as well as the relocation-model and -fPIE value). 2) We have no way of disabling inlining of run time interposable functions, since in the IR we only know if a function is link-time interposable. Because of this llvm cannot support elf-interposition semantics. 3) In LTO builds of executables we will have extra knowledge that a symbol resolved to a local definition and can't be preemptable, but have no way to propagate that knowledge through the compiler. This patch adds preemptability specifiers to the IR with the following meaning: dso_local --> means the compiler may assume the symbol will resolve to a definition within the current linkage unit and the symbol may be accessed directly even if the definition is not within this compilation unit. dso_preemptable --> means that the compiler must assume the GlobalValue may be replaced with a definition from outside the current linkage unit at runtime. To ease transitioning dso_preemptable is treated as a 'default' in that low-level codegen will still do the same checks it did previously to see if a symbol should be accessed indirectly. Eventually when IR producers emit the specifiers on all Globalvalues we can change dso_preemptable to mean 'always access indirectly', and remove the current logic. Differential Revision: https://reviews.llvm.org/D20217 llvm-svn: 316668
2017-10-26 23:00:26 +08:00
unsigned DLLStorageClass, bool DSOLocal,
GlobalVariable::ThreadLocalMode TLM,
IR: Introduce local_unnamed_addr attribute. If a local_unnamed_addr attribute is attached to a global, the address is known to be insignificant within the module. It is distinct from the existing unnamed_addr attribute in that it only describes a local property of the module rather than a global property of the symbol. This attribute is intended to be used by the code generator and LTO to allow the linker to decide whether the global needs to be in the symbol table. It is possible to exclude a global from the symbol table if three things are true: - This attribute is present on every instance of the global (which means that the normal rule that the global must have a unique address can be broken without being observable by the program by performing comparisons against the global's address) - The global has linkonce_odr linkage (which means that each linkage unit must have its own copy of the global if it requires one, and the copy in each linkage unit must be the same) - It is a constant or a function (which means that the program cannot observe that the unique-address rule has been broken by writing to the global) Although this attribute could in principle be computed from the module contents, LTO clients (i.e. linkers) will normally need to be able to compute this property as part of symbol resolution, and it would be inefficient to materialize every module just to compute it. See: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160509/356401.html http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160516/356738.html for earlier discussion. Part of the fix for PR27553. Differential Revision: http://reviews.llvm.org/D20348 llvm-svn: 272709
2016-06-15 05:01:22 +08:00
GlobalVariable::UnnamedAddr UnnamedAddr);
bool parseComdat();
bool parseStandaloneMetadata();
bool parseNamedMetadata();
bool parseMDString(MDString *&Result);
bool parseMDNodeID(MDNode *&Result);
bool parseUnnamedAttrGrp();
bool parseFnAttributeValuePairs(AttrBuilder &B,
std::vector<unsigned> &FwdRefAttrGrps,
bool inAttrGrp, LocTy &BuiltinLoc);
bool parseOptionalTypeAttr(Type *&Result, lltok::Kind AttrName);
bool parseRequiredTypeAttr(Type *&Result, lltok::Kind AttrName);
bool parsePreallocated(Type *&Result);
bool parseByRef(Type *&Result);
// Module Summary Index Parsing.
bool skipModuleSummaryEntry();
bool parseSummaryEntry();
bool parseModuleEntry(unsigned ID);
bool parseModuleReference(StringRef &ModulePath);
bool parseGVReference(ValueInfo &VI, unsigned &GVId);
bool parseSummaryIndexFlags();
bool parseBlockCount();
bool parseGVEntry(unsigned ID);
bool parseFunctionSummary(std::string Name, GlobalValue::GUID, unsigned ID);
bool parseVariableSummary(std::string Name, GlobalValue::GUID, unsigned ID);
bool parseAliasSummary(std::string Name, GlobalValue::GUID, unsigned ID);
bool parseGVFlags(GlobalValueSummary::GVFlags &GVFlags);
bool parseGVarFlags(GlobalVarSummary::GVarFlags &GVarFlags);
bool parseOptionalFFlags(FunctionSummary::FFlags &FFlags);
bool parseOptionalCalls(std::vector<FunctionSummary::EdgeTy> &Calls);
bool parseHotness(CalleeInfo::HotnessType &Hotness);
bool parseOptionalTypeIdInfo(FunctionSummary::TypeIdInfo &TypeIdInfo);
bool parseTypeTests(std::vector<GlobalValue::GUID> &TypeTests);
bool parseVFuncIdList(lltok::Kind Kind,
std::vector<FunctionSummary::VFuncId> &VFuncIdList);
bool parseConstVCallList(
lltok::Kind Kind,
std::vector<FunctionSummary::ConstVCall> &ConstVCallList);
using IdToIndexMapType =
std::map<unsigned, std::vector<std::pair<unsigned, LocTy>>>;
bool parseConstVCall(FunctionSummary::ConstVCall &ConstVCall,
IdToIndexMapType &IdToIndexMap, unsigned Index);
bool parseVFuncId(FunctionSummary::VFuncId &VFuncId,
IdToIndexMapType &IdToIndexMap, unsigned Index);
bool parseOptionalVTableFuncs(VTableFuncList &VTableFuncs);
bool parseOptionalParamAccesses(
std::vector<FunctionSummary::ParamAccess> &Params);
bool parseParamNo(uint64_t &ParamNo);
using IdLocListType = std::vector<std::pair<unsigned, LocTy>>;
bool parseParamAccess(FunctionSummary::ParamAccess &Param,
IdLocListType &IdLocList);
bool parseParamAccessCall(FunctionSummary::ParamAccess::Call &Call,
IdLocListType &IdLocList);
bool parseParamAccessOffset(ConstantRange &Range);
bool parseOptionalRefs(std::vector<ValueInfo> &Refs);
bool parseTypeIdEntry(unsigned ID);
bool parseTypeIdSummary(TypeIdSummary &TIS);
bool parseTypeIdCompatibleVtableEntry(unsigned ID);
bool parseTypeTestResolution(TypeTestResolution &TTRes);
bool parseOptionalWpdResolutions(
std::map<uint64_t, WholeProgramDevirtResolution> &WPDResMap);
bool parseWpdRes(WholeProgramDevirtResolution &WPDRes);
bool parseOptionalResByArg(
std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg>
&ResByArg);
bool parseArgs(std::vector<uint64_t> &Args);
void addGlobalValueToIndex(std::string Name, GlobalValue::GUID,
GlobalValue::LinkageTypes Linkage, unsigned ID,
std::unique_ptr<GlobalValueSummary> Summary);
// Type Parsing.
bool parseType(Type *&Result, const Twine &Msg, bool AllowVoid = false);
bool parseType(Type *&Result, bool AllowVoid = false) {
return parseType(Result, "expected type", AllowVoid);
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-16 03:07:53 +08:00
}
bool parseType(Type *&Result, const Twine &Msg, LocTy &Loc,
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-16 03:07:53 +08:00
bool AllowVoid = false) {
Loc = Lex.getLoc();
return parseType(Result, Msg, AllowVoid);
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-16 03:07:53 +08:00
}
bool parseType(Type *&Result, LocTy &Loc, bool AllowVoid = false) {
Loc = Lex.getLoc();
return parseType(Result, AllowVoid);
}
bool parseAnonStructType(Type *&Result, bool Packed);
bool parseStructBody(SmallVectorImpl<Type *> &Body);
bool parseStructDefinition(SMLoc TypeLoc, StringRef Name,
std::pair<Type *, LocTy> &Entry,
Type *&ResultTy);
bool parseArrayVectorType(Type *&Result, bool IsVector);
bool parseFunctionType(Type *&Result);
// Function Semantic Analysis.
class PerFunctionState {
LLParser &P;
Function &F;
std::map<std::string, std::pair<Value*, LocTy> > ForwardRefVals;
std::map<unsigned, std::pair<Value*, LocTy> > ForwardRefValIDs;
std::vector<Value*> NumberedVals;
/// FunctionNumber - If this is an unnamed function, this is the slot
/// number of it, otherwise it is -1.
int FunctionNumber;
public:
PerFunctionState(LLParser &p, Function &f, int functionNumber);
~PerFunctionState();
Function &getFunction() const { return F; }
bool finishFunction();
/// GetVal - Get a value with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// exists but does not have the right type.
Value *getVal(const std::string &Name, Type *Ty, LocTy Loc, bool IsCall);
Value *getVal(unsigned ID, Type *Ty, LocTy Loc, bool IsCall);
/// setInstName - After an instruction is parsed and inserted into its
/// basic block, this installs its name.
bool setInstName(int NameID, const std::string &NameStr, LocTy NameLoc,
Instruction *Inst);
/// GetBB - Get a basic block with the specified name or ID, creating a
/// forward reference record if needed. This can return null if the value
/// is not a BasicBlock.
BasicBlock *getBB(const std::string &Name, LocTy Loc);
BasicBlock *getBB(unsigned ID, LocTy Loc);
/// DefineBB - Define the specified basic block, which is either named or
/// unnamed. If there is an error, this returns null otherwise it returns
/// the block being defined.
BasicBlock *defineBB(const std::string &Name, int NameID, LocTy Loc);
bool resolveForwardRefBlockAddresses();
};
bool convertValIDToValue(Type *Ty, ValID &ID, Value *&V,
PerFunctionState *PFS, bool IsCall);
Value *checkValidVariableType(LocTy Loc, const Twine &Name, Type *Ty,
Value *Val, bool IsCall);
bool parseConstantValue(Type *Ty, Constant *&C);
bool parseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
bool parseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
return parseValue(Ty, V, &PFS);
}
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 13:38:55 +08:00
bool parseValue(Type *Ty, Value *&V, LocTy &Loc, PerFunctionState &PFS) {
Loc = Lex.getLoc();
return parseValue(Ty, V, &PFS);
}
bool parseTypeAndValue(Value *&V, PerFunctionState *PFS);
bool parseTypeAndValue(Value *&V, PerFunctionState &PFS) {
return parseTypeAndValue(V, &PFS);
}
bool parseTypeAndValue(Value *&V, LocTy &Loc, PerFunctionState &PFS) {
Loc = Lex.getLoc();
return parseTypeAndValue(V, PFS);
}
bool parseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
PerFunctionState &PFS);
bool parseTypeAndBasicBlock(BasicBlock *&BB, PerFunctionState &PFS) {
LocTy Loc;
return parseTypeAndBasicBlock(BB, Loc, PFS);
}
struct ParamInfo {
LocTy Loc;
Value *V;
AttributeSet Attrs;
ParamInfo(LocTy loc, Value *v, AttributeSet attrs)
: Loc(loc), V(v), Attrs(attrs) {}
};
bool parseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
PerFunctionState &PFS, bool IsMustTailCall = false,
bool InVarArgsFunc = false);
bool
parseOptionalOperandBundles(SmallVectorImpl<OperandBundleDef> &BundleList,
PerFunctionState &PFS);
bool parseExceptionArgs(SmallVectorImpl<Value *> &Args,
PerFunctionState &PFS);
// Constant Parsing.
bool parseValID(ValID &ID, PerFunctionState *PFS = nullptr);
bool parseGlobalValue(Type *Ty, Constant *&C);
bool parseGlobalTypeAndValue(Constant *&V);
bool parseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
Optional<unsigned> *InRangeOp = nullptr);
bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
bool parseMetadataAsValue(Value *&V, PerFunctionState &PFS);
bool parseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
PerFunctionState *PFS);
bool parseMetadata(Metadata *&MD, PerFunctionState *PFS);
bool parseMDTuple(MDNode *&MD, bool IsDistinct = false);
bool parseMDNode(MDNode *&N);
bool parseMDNodeTail(MDNode *&N);
bool parseMDNodeVector(SmallVectorImpl<Metadata *> &Elts);
bool parseMetadataAttachment(unsigned &Kind, MDNode *&MD);
bool parseInstructionMetadata(Instruction &Inst);
bool parseGlobalObjectMetadataAttachment(GlobalObject &GO);
bool parseOptionalFunctionMetadata(Function &F);
template <class FieldTy>
bool parseMDField(LocTy Loc, StringRef Name, FieldTy &Result);
template <class FieldTy> bool parseMDField(StringRef Name, FieldTy &Result);
template <class ParserTy> bool parseMDFieldsImplBody(ParserTy ParseField);
template <class ParserTy>
bool parseMDFieldsImpl(ParserTy ParseField, LocTy &ClosingLoc);
bool parseSpecializedMDNode(MDNode *&N, bool IsDistinct = false);
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
bool parse##CLASS(MDNode *&Result, bool IsDistinct);
#include "llvm/IR/Metadata.def"
// Function Parsing.
struct ArgInfo {
LocTy Loc;
Type *Ty;
AttributeSet Attrs;
std::string Name;
ArgInfo(LocTy L, Type *ty, AttributeSet Attr, const std::string &N)
: Loc(L), Ty(ty), Attrs(Attr), Name(N) {}
};
bool parseArgumentList(SmallVectorImpl<ArgInfo> &ArgList, bool &IsVarArg);
bool parseFunctionHeader(Function *&Fn, bool IsDefine);
bool parseFunctionBody(Function &Fn);
bool parseBasicBlock(PerFunctionState &PFS);
enum TailCallType { TCT_None, TCT_Tail, TCT_MustTail };
// Instruction Parsing. Each instruction parsing routine can return with a
// normal result, an error result, or return having eaten an extra comma.
enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
int parseInstruction(Instruction *&Inst, BasicBlock *BB,
PerFunctionState &PFS);
bool parseCmpPredicate(unsigned &P, unsigned Opc);
bool parseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS);
bool parseBr(Instruction *&Inst, PerFunctionState &PFS);
bool parseSwitch(Instruction *&Inst, PerFunctionState &PFS);
bool parseIndirectBr(Instruction *&Inst, PerFunctionState &PFS);
bool parseInvoke(Instruction *&Inst, PerFunctionState &PFS);
bool parseResume(Instruction *&Inst, PerFunctionState &PFS);
bool parseCleanupRet(Instruction *&Inst, PerFunctionState &PFS);
bool parseCatchRet(Instruction *&Inst, PerFunctionState &PFS);
bool parseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS);
bool parseCatchPad(Instruction *&Inst, PerFunctionState &PFS);
bool parseCleanupPad(Instruction *&Inst, PerFunctionState &PFS);
bool parseCallBr(Instruction *&Inst, PerFunctionState &PFS);
bool parseUnaryOp(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc,
bool IsFP);
bool parseArithmetic(Instruction *&Inst, PerFunctionState &PFS,
unsigned Opc, bool IsFP);
bool parseLogical(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
bool parseCompare(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
bool parseCast(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
bool parseSelect(Instruction *&Inst, PerFunctionState &PFS);
bool parseVAArg(Instruction *&Inst, PerFunctionState &PFS);
bool parseExtractElement(Instruction *&Inst, PerFunctionState &PFS);
bool parseInsertElement(Instruction *&Inst, PerFunctionState &PFS);
bool parseShuffleVector(Instruction *&Inst, PerFunctionState &PFS);
int parsePHI(Instruction *&Inst, PerFunctionState &PFS);
bool parseLandingPad(Instruction *&Inst, PerFunctionState &PFS);
bool parseCall(Instruction *&Inst, PerFunctionState &PFS,
CallInst::TailCallKind TCK);
int parseAlloc(Instruction *&Inst, PerFunctionState &PFS);
int parseLoad(Instruction *&Inst, PerFunctionState &PFS);
int parseStore(Instruction *&Inst, PerFunctionState &PFS);
int parseCmpXchg(Instruction *&Inst, PerFunctionState &PFS);
int parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS);
int parseFence(Instruction *&Inst, PerFunctionState &PFS);
int parseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS);
int parseExtractValue(Instruction *&Inst, PerFunctionState &PFS);
int parseInsertValue(Instruction *&Inst, PerFunctionState &PFS);
bool parseFreeze(Instruction *&I, PerFunctionState &PFS);
// Use-list order directives.
bool parseUseListOrder(PerFunctionState *PFS = nullptr);
bool parseUseListOrderBB();
bool parseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes);
bool sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes, SMLoc Loc);
};
} // End llvm namespace
#endif