forked from OSchip/llvm-project
[WebAssembly] clang-tidy (NFC)
Summary: This patch fixes clang-tidy warnings on wasm-only files. The list of checks used is: `-*,clang-diagnostic-*,llvm-*,misc-*,-misc-unused-parameters,readability-identifier-naming,modernize-*` (LLVM's default .clang-tidy list is the same except it does not have `modernize-*`. But I've seen in multiple CLs in LLVM the modernize style was recommended and code was fixed based on the style, so I added it as well.) The common fixes are: - Variable names start with an uppercase letter - Function names start with a lowercase letter - Use `auto` when you use casts so the type is evident - Use inline initialization for class member variables - Use `= default` for empty constructors / destructors - Use `using` in place of `typedef` Reviewers: sbc100, tlively, aardappel Subscribers: dschuff, sunfish, jgravelle-google, yurydelendik, kripken, MatzeB, mgorny, rupprecht, llvm-commits Differential Revision: https://reviews.llvm.org/D57500 llvm-svn: 353075
This commit is contained in:
parent
2e862c7555
commit
18c56a0762
|
@ -55,7 +55,7 @@ public:
|
|||
|
||||
/// Decides whether a '.section' directive should be printed before the
|
||||
/// section name
|
||||
bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
|
||||
bool shouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
|
||||
|
||||
StringRef getSectionName() const { return SectionName; }
|
||||
const MCSymbolWasm *getGroup() const { return Group; }
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
|
||||
#include "llvm/BinaryFormat/Wasm.h"
|
||||
|
||||
std::string llvm::wasm::toString(wasm::WasmSymbolType type) {
|
||||
switch (type) {
|
||||
std::string llvm::wasm::toString(wasm::WasmSymbolType Type) {
|
||||
switch (Type) {
|
||||
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
|
||||
return "WASM_SYMBOL_TYPE_FUNCTION";
|
||||
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
|
||||
|
@ -24,8 +24,8 @@ std::string llvm::wasm::toString(wasm::WasmSymbolType type) {
|
|||
llvm_unreachable("unknown symbol type");
|
||||
}
|
||||
|
||||
std::string llvm::wasm::relocTypetoString(uint32_t type) {
|
||||
switch (type) {
|
||||
std::string llvm::wasm::relocTypetoString(uint32_t Type) {
|
||||
switch (Type) {
|
||||
#define WASM_RELOC(NAME, VALUE) \
|
||||
case VALUE: \
|
||||
return #NAME;
|
||||
|
|
|
@ -32,8 +32,8 @@ using namespace llvm;
|
|||
namespace {
|
||||
|
||||
class WasmAsmParser : public MCAsmParserExtension {
|
||||
MCAsmParser *Parser;
|
||||
MCAsmLexer *Lexer;
|
||||
MCAsmParser *Parser = nullptr;
|
||||
MCAsmLexer *Lexer = nullptr;
|
||||
|
||||
template<bool (WasmAsmParser::*HandlerMethod)(StringRef, SMLoc)>
|
||||
void addDirectiveHandler(StringRef Directive) {
|
||||
|
@ -44,9 +44,7 @@ class WasmAsmParser : public MCAsmParserExtension {
|
|||
}
|
||||
|
||||
public:
|
||||
WasmAsmParser() : Parser(nullptr), Lexer(nullptr) {
|
||||
BracketExpressionsSupported = true;
|
||||
}
|
||||
WasmAsmParser() { BracketExpressionsSupported = true; }
|
||||
|
||||
void Initialize(MCAsmParser &P) override {
|
||||
Parser = &P;
|
||||
|
@ -60,19 +58,20 @@ public:
|
|||
addDirectiveHandler<&WasmAsmParser::parseDirectiveType>(".type");
|
||||
}
|
||||
|
||||
bool Error(const StringRef &msg, const AsmToken &tok) {
|
||||
return Parser->Error(tok.getLoc(), msg + tok.getString());
|
||||
bool error(const StringRef &Msg, const AsmToken &Tok) {
|
||||
return Parser->Error(Tok.getLoc(), Msg + Tok.getString());
|
||||
}
|
||||
|
||||
bool IsNext(AsmToken::TokenKind Kind) {
|
||||
auto ok = Lexer->is(Kind);
|
||||
if (ok) Lex();
|
||||
return ok;
|
||||
bool isNext(AsmToken::TokenKind Kind) {
|
||||
auto Ok = Lexer->is(Kind);
|
||||
if (Ok)
|
||||
Lex();
|
||||
return Ok;
|
||||
}
|
||||
|
||||
bool Expect(AsmToken::TokenKind Kind, const char *KindName) {
|
||||
if (!IsNext(Kind))
|
||||
return Error(std::string("Expected ") + KindName + ", instead got: ",
|
||||
bool expect(AsmToken::TokenKind Kind, const char *KindName) {
|
||||
if (!isNext(Kind))
|
||||
return error(std::string("Expected ") + KindName + ", instead got: ",
|
||||
Lexer->getTok());
|
||||
return false;
|
||||
}
|
||||
|
@ -87,9 +86,9 @@ public:
|
|||
if (Parser->parseIdentifier(Name))
|
||||
return TokError("expected identifier in directive");
|
||||
// FIXME: currently requiring this very fixed format.
|
||||
if (Expect(AsmToken::Comma, ",") || Expect(AsmToken::String, "string") ||
|
||||
Expect(AsmToken::Comma, ",") || Expect(AsmToken::At, "@") ||
|
||||
Expect(AsmToken::EndOfStatement, "eol"))
|
||||
if (expect(AsmToken::Comma, ",") || expect(AsmToken::String, "string") ||
|
||||
expect(AsmToken::Comma, ",") || expect(AsmToken::At, "@") ||
|
||||
expect(AsmToken::EndOfStatement, "eol"))
|
||||
return true;
|
||||
// This is done automatically by the assembler for text sections currently,
|
||||
// so we don't need to emit that here. This is what it would do (and may
|
||||
|
@ -106,12 +105,12 @@ public:
|
|||
if (Parser->parseIdentifier(Name))
|
||||
return TokError("expected identifier in directive");
|
||||
auto Sym = getContext().getOrCreateSymbol(Name);
|
||||
if (Expect(AsmToken::Comma, ","))
|
||||
if (expect(AsmToken::Comma, ","))
|
||||
return true;
|
||||
const MCExpr *Expr;
|
||||
if (Parser->parseExpression(Expr))
|
||||
return true;
|
||||
if (Expect(AsmToken::EndOfStatement, "eol"))
|
||||
if (expect(AsmToken::EndOfStatement, "eol"))
|
||||
return true;
|
||||
// This is done automatically by the assembler for functions currently,
|
||||
// so we don't need to emit that here. This is what it would do:
|
||||
|
@ -124,24 +123,24 @@ public:
|
|||
// This could be the start of a function, check if followed by
|
||||
// "label,@function"
|
||||
if (!Lexer->is(AsmToken::Identifier))
|
||||
return Error("Expected label after .type directive, got: ",
|
||||
return error("Expected label after .type directive, got: ",
|
||||
Lexer->getTok());
|
||||
auto WasmSym = cast<MCSymbolWasm>(
|
||||
getStreamer().getContext().getOrCreateSymbol(
|
||||
Lexer->getTok().getString()));
|
||||
Lex();
|
||||
if (!(IsNext(AsmToken::Comma) && IsNext(AsmToken::At) &&
|
||||
if (!(isNext(AsmToken::Comma) && isNext(AsmToken::At) &&
|
||||
Lexer->is(AsmToken::Identifier)))
|
||||
return Error("Expected label,@type declaration, got: ", Lexer->getTok());
|
||||
return error("Expected label,@type declaration, got: ", Lexer->getTok());
|
||||
auto TypeName = Lexer->getTok().getString();
|
||||
if (TypeName == "function")
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
|
||||
else if (TypeName == "global")
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
|
||||
else
|
||||
return Error("Unknown WASM symbol type: ", Lexer->getTok());
|
||||
return error("Unknown WASM symbol type: ", Lexer->getTok());
|
||||
Lex();
|
||||
return Expect(AsmToken::EndOfStatement, "EOL");
|
||||
return expect(AsmToken::EndOfStatement, "EOL");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
|
||||
using namespace llvm;
|
||||
|
||||
MCSectionWasm::~MCSectionWasm() {} // anchor.
|
||||
MCSectionWasm::~MCSectionWasm() = default; // anchor.
|
||||
|
||||
// Decides whether a '.section' directive
|
||||
// should be printed before the section name.
|
||||
bool MCSectionWasm::ShouldOmitSectionDirective(StringRef Name,
|
||||
bool MCSectionWasm::shouldOmitSectionDirective(StringRef Name,
|
||||
const MCAsmInfo &MAI) const {
|
||||
return MAI.shouldOmitSectionDirective(Name);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ void MCSectionWasm::PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
|
|||
raw_ostream &OS,
|
||||
const MCExpr *Subsection) const {
|
||||
|
||||
if (ShouldOmitSectionDirective(SectionName, MAI)) {
|
||||
if (shouldOmitSectionDirective(SectionName, MAI)) {
|
||||
OS << '\t' << getSectionName();
|
||||
if (Subsection) {
|
||||
OS << '\t';
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
|
||||
using namespace llvm;
|
||||
|
||||
MCWasmObjectTargetWriter::MCWasmObjectTargetWriter(bool Is64Bit_)
|
||||
: Is64Bit(Is64Bit_) {}
|
||||
MCWasmObjectTargetWriter::MCWasmObjectTargetWriter(bool Is64Bit)
|
||||
: Is64Bit(Is64Bit) {}
|
||||
|
||||
// Pin the vtable to this object file
|
||||
MCWasmObjectTargetWriter::~MCWasmObjectTargetWriter() = default;
|
||||
|
|
|
@ -34,15 +34,15 @@
|
|||
|
||||
using namespace llvm;
|
||||
|
||||
MCWasmStreamer::~MCWasmStreamer() {}
|
||||
MCWasmStreamer::~MCWasmStreamer() = default; // anchor.
|
||||
|
||||
void MCWasmStreamer::mergeFragment(MCDataFragment *DF, MCDataFragment *EF) {
|
||||
flushPendingLabels(DF, DF->getContents().size());
|
||||
|
||||
for (unsigned i = 0, e = EF->getFixups().size(); i != e; ++i) {
|
||||
EF->getFixups()[i].setOffset(EF->getFixups()[i].getOffset() +
|
||||
for (unsigned I = 0, E = EF->getFixups().size(); I != E; ++I) {
|
||||
EF->getFixups()[I].setOffset(EF->getFixups()[I].getOffset() +
|
||||
DF->getContents().size());
|
||||
DF->getFixups().push_back(EF->getFixups()[i]);
|
||||
DF->getFixups().push_back(EF->getFixups()[I]);
|
||||
}
|
||||
if (DF->getSubtargetInfo() == nullptr && EF->getSubtargetInfo())
|
||||
DF->setHasInstructions(*EF->getSubtargetInfo());
|
||||
|
@ -179,9 +179,9 @@ void MCWasmStreamer::EmitInstToData(const MCInst &Inst,
|
|||
MCDataFragment *DF = getOrCreateDataFragment();
|
||||
|
||||
// Add the fixups and data.
|
||||
for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
|
||||
Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
|
||||
DF->getFixups().push_back(Fixups[i]);
|
||||
for (unsigned I = 0, E = Fixups.size(); I != E; ++I) {
|
||||
Fixups[I].setOffset(Fixups[I].getOffset() + DF->getContents().size());
|
||||
DF->getFixups().push_back(Fixups[I]);
|
||||
}
|
||||
DF->setHasInstructions(STI);
|
||||
DF->getContents().append(Code.begin(), Code.end());
|
||||
|
|
|
@ -40,7 +40,7 @@ namespace {
|
|||
|
||||
// Went we ceate the indirect function table we start at 1, so that there is
|
||||
// and emtpy slot at 0 and therefore calling a null function pointer will trap.
|
||||
static const uint32_t kInitialTableOffset = 1;
|
||||
static const uint32_t InitialTableOffset = 1;
|
||||
|
||||
// For patching purposes, we need to remember where each section starts, both
|
||||
// for patching up the section size field, and for patching up references to
|
||||
|
@ -60,7 +60,7 @@ struct SectionBookkeeping {
|
|||
// TODO: Consider using wasm::WasmSignature directly instead.
|
||||
struct WasmSignature {
|
||||
// Support empty and tombstone instances, needed by DenseMap.
|
||||
enum { Plain, Empty, Tombstone } State;
|
||||
enum { Plain, Empty, Tombstone } State = Plain;
|
||||
|
||||
// The return types of the function.
|
||||
SmallVector<wasm::ValType, 1> Returns;
|
||||
|
@ -68,8 +68,6 @@ struct WasmSignature {
|
|||
// The parameter types of the function.
|
||||
SmallVector<wasm::ValType, 4> Params;
|
||||
|
||||
WasmSignature() : State(Plain) {}
|
||||
|
||||
bool operator==(const WasmSignature &Other) const {
|
||||
return State == Other.State && Returns == Other.Returns &&
|
||||
Params == Other.Params;
|
||||
|
@ -172,7 +170,7 @@ struct WasmRelocationEntry {
|
|||
#endif
|
||||
};
|
||||
|
||||
static const uint32_t INVALID_INDEX = -1;
|
||||
static const uint32_t InvalidIndex = -1;
|
||||
|
||||
struct WasmCustomSection {
|
||||
|
||||
|
@ -184,7 +182,7 @@ struct WasmCustomSection {
|
|||
|
||||
WasmCustomSection(StringRef Name, MCSectionWasm *Section)
|
||||
: Name(Name), Section(Section), OutputContentsOffset(0),
|
||||
OutputIndex(INVALID_INDEX) {}
|
||||
OutputIndex(InvalidIndex) {}
|
||||
};
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
|
@ -196,7 +194,7 @@ raw_ostream &operator<<(raw_ostream &OS, const WasmRelocationEntry &Rel) {
|
|||
|
||||
// Write X as an (unsigned) LEB value at offset Offset in Stream, padded
|
||||
// to allow patching.
|
||||
static void WritePatchableLEB(raw_pwrite_stream &Stream, uint32_t X,
|
||||
static void writePatchableLEB(raw_pwrite_stream &Stream, uint32_t X,
|
||||
uint64_t Offset) {
|
||||
uint8_t Buffer[5];
|
||||
unsigned SizeLen = encodeULEB128(X, Buffer, 5);
|
||||
|
@ -206,7 +204,7 @@ static void WritePatchableLEB(raw_pwrite_stream &Stream, uint32_t X,
|
|||
|
||||
// Write X as an signed LEB value at offset Offset in Stream, padded
|
||||
// to allow patching.
|
||||
static void WritePatchableSLEB(raw_pwrite_stream &Stream, int32_t X,
|
||||
static void writePatchableSLEB(raw_pwrite_stream &Stream, int32_t X,
|
||||
uint64_t Offset) {
|
||||
uint8_t Buffer[5];
|
||||
unsigned SizeLen = encodeSLEB128(X, Buffer, 5);
|
||||
|
@ -215,7 +213,7 @@ static void WritePatchableSLEB(raw_pwrite_stream &Stream, int32_t X,
|
|||
}
|
||||
|
||||
// Write X as a plain integer value at offset Offset in Stream.
|
||||
static void WriteI32(raw_pwrite_stream &Stream, uint32_t X, uint64_t Offset) {
|
||||
static void writeI32(raw_pwrite_stream &Stream, uint32_t X, uint64_t Offset) {
|
||||
uint8_t Buffer[4];
|
||||
support::endian::write32le(Buffer, X);
|
||||
Stream.pwrite((char *)Buffer, sizeof(Buffer), Offset);
|
||||
|
@ -282,8 +280,6 @@ public:
|
|||
raw_pwrite_stream &OS)
|
||||
: W(OS, support::little), TargetObjectWriter(std::move(MOTW)) {}
|
||||
|
||||
~WasmObjectWriter() override;
|
||||
|
||||
private:
|
||||
void reset() override {
|
||||
CodeRelocations.clear();
|
||||
|
@ -360,8 +356,6 @@ private:
|
|||
|
||||
} // end anonymous namespace
|
||||
|
||||
WasmObjectWriter::~WasmObjectWriter() {}
|
||||
|
||||
// Write out a section header and a patchable section size field.
|
||||
void WasmObjectWriter::startSection(SectionBookkeeping &Section,
|
||||
unsigned SectionId) {
|
||||
|
@ -412,7 +406,7 @@ void WasmObjectWriter::endSection(SectionBookkeeping &Section) {
|
|||
|
||||
// Write the final section size to the payload_len field, which follows
|
||||
// the section id byte.
|
||||
WritePatchableLEB(static_cast<raw_pwrite_stream &>(W.OS), Size,
|
||||
writePatchableLEB(static_cast<raw_pwrite_stream &>(W.OS), Size,
|
||||
Section.SizeOffset);
|
||||
}
|
||||
|
||||
|
@ -562,7 +556,7 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
|
|||
}
|
||||
}
|
||||
|
||||
static const MCSymbolWasm *ResolveSymbol(const MCSymbolWasm &Symbol) {
|
||||
static const MCSymbolWasm *resolveSymbol(const MCSymbolWasm &Symbol) {
|
||||
if (Symbol.isVariable()) {
|
||||
const MCExpr *Expr = Symbol.getVariableValue();
|
||||
auto *Inner = cast<MCSymbolRefExpr>(Expr);
|
||||
|
@ -581,7 +575,7 @@ WasmObjectWriter::getProvisionalValue(const WasmRelocationEntry &RelEntry) {
|
|||
case wasm::R_WASM_TABLE_INDEX_SLEB:
|
||||
case wasm::R_WASM_TABLE_INDEX_I32: {
|
||||
// Provisional value is table address of the resolved symbol itself
|
||||
const MCSymbolWasm *Sym = ResolveSymbol(*RelEntry.Symbol);
|
||||
const MCSymbolWasm *Sym = resolveSymbol(*RelEntry.Symbol);
|
||||
assert(Sym->isFunction());
|
||||
return TableIndices[Sym];
|
||||
}
|
||||
|
@ -606,7 +600,7 @@ WasmObjectWriter::getProvisionalValue(const WasmRelocationEntry &RelEntry) {
|
|||
case wasm::R_WASM_MEMORY_ADDR_I32:
|
||||
case wasm::R_WASM_MEMORY_ADDR_SLEB: {
|
||||
// Provisional value is address of the global
|
||||
const MCSymbolWasm *Sym = ResolveSymbol(*RelEntry.Symbol);
|
||||
const MCSymbolWasm *Sym = resolveSymbol(*RelEntry.Symbol);
|
||||
// For undefined symbols, use zero
|
||||
if (!Sym->isDefined())
|
||||
return 0;
|
||||
|
@ -689,17 +683,17 @@ void WasmObjectWriter::applyRelocations(
|
|||
case wasm::R_WASM_GLOBAL_INDEX_LEB:
|
||||
case wasm::R_WASM_MEMORY_ADDR_LEB:
|
||||
case wasm::R_WASM_EVENT_INDEX_LEB:
|
||||
WritePatchableLEB(Stream, Value, Offset);
|
||||
writePatchableLEB(Stream, Value, Offset);
|
||||
break;
|
||||
case wasm::R_WASM_TABLE_INDEX_I32:
|
||||
case wasm::R_WASM_MEMORY_ADDR_I32:
|
||||
case wasm::R_WASM_FUNCTION_OFFSET_I32:
|
||||
case wasm::R_WASM_SECTION_OFFSET_I32:
|
||||
WriteI32(Stream, Value, Offset);
|
||||
writeI32(Stream, Value, Offset);
|
||||
break;
|
||||
case wasm::R_WASM_TABLE_INDEX_SLEB:
|
||||
case wasm::R_WASM_MEMORY_ADDR_SLEB:
|
||||
WritePatchableSLEB(Stream, Value, Offset);
|
||||
writePatchableSLEB(Stream, Value, Offset);
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("invalid relocation type");
|
||||
|
@ -854,7 +848,7 @@ void WasmObjectWriter::writeElemSection(ArrayRef<uint32_t> TableElems) {
|
|||
|
||||
// init expr for starting offset
|
||||
W.OS << char(wasm::WASM_OPCODE_I32_CONST);
|
||||
encodeSLEB128(kInitialTableOffset, W.OS);
|
||||
encodeSLEB128(InitialTableOffset, W.OS);
|
||||
W.OS << char(wasm::WASM_OPCODE_END);
|
||||
|
||||
encodeULEB128(TableElems.size(), W.OS);
|
||||
|
@ -1087,7 +1081,7 @@ void WasmObjectWriter::registerFunctionType(const MCSymbolWasm &Symbol) {
|
|||
assert(Symbol.isFunction());
|
||||
|
||||
WasmSignature S;
|
||||
const MCSymbolWasm *ResolvedSym = ResolveSymbol(Symbol);
|
||||
const MCSymbolWasm *ResolvedSym = resolveSymbol(Symbol);
|
||||
if (auto *Sig = ResolvedSym->getSignature()) {
|
||||
S.Returns = Sig->Returns;
|
||||
S.Params = Sig->Params;
|
||||
|
@ -1164,7 +1158,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
// For now, always emit the memory import, since loads and stores are not
|
||||
// valid without it. In the future, we could perhaps be more clever and omit
|
||||
// it if there are no loads or stores.
|
||||
MCSymbolWasm *MemorySym =
|
||||
auto *MemorySym =
|
||||
cast<MCSymbolWasm>(Ctx.getOrCreateSymbol("__linear_memory"));
|
||||
wasm::WasmImport MemImport;
|
||||
MemImport.Module = MemorySym->getImportModule();
|
||||
|
@ -1175,7 +1169,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
// For now, always emit the table section, since indirect calls are not
|
||||
// valid without it. In the future, we could perhaps be more clever and omit
|
||||
// it if there are no indirect calls.
|
||||
MCSymbolWasm *TableSym =
|
||||
auto *TableSym =
|
||||
cast<MCSymbolWasm>(Ctx.getOrCreateSymbol("__indirect_function_table"));
|
||||
wasm::WasmImport TableImport;
|
||||
TableImport.Module = TableSym->getImportModule();
|
||||
|
@ -1325,7 +1319,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
report_fatal_error(
|
||||
"function sections must contain one function each");
|
||||
|
||||
if (WS.getSize() == 0)
|
||||
if (WS.getSize() == nullptr)
|
||||
report_fatal_error(
|
||||
"function symbols must have a size set with .size");
|
||||
|
||||
|
@ -1422,7 +1416,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
|
||||
// Find the target symbol of this weak alias and export that index
|
||||
const auto &WS = static_cast<const MCSymbolWasm &>(S);
|
||||
const MCSymbolWasm *ResolvedSym = ResolveSymbol(WS);
|
||||
const MCSymbolWasm *ResolvedSym = resolveSymbol(WS);
|
||||
LLVM_DEBUG(dbgs() << WS.getName() << ": weak alias of '" << *ResolvedSym
|
||||
<< "'\n");
|
||||
|
||||
|
@ -1446,7 +1440,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
for (const MCSymbol &S : Asm.symbols()) {
|
||||
const auto &WS = static_cast<const MCSymbolWasm &>(S);
|
||||
if (!isInSymtab(WS)) {
|
||||
WS.setIndex(INVALID_INDEX);
|
||||
WS.setIndex(InvalidIndex);
|
||||
continue;
|
||||
}
|
||||
LLVM_DEBUG(dbgs() << "adding to symtab: " << WS << "\n");
|
||||
|
@ -1485,9 +1479,9 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
Rel.Type != wasm::R_WASM_TABLE_INDEX_SLEB)
|
||||
return;
|
||||
assert(Rel.Symbol->isFunction());
|
||||
const MCSymbolWasm &WS = *ResolveSymbol(*Rel.Symbol);
|
||||
const MCSymbolWasm &WS = *resolveSymbol(*Rel.Symbol);
|
||||
uint32_t FunctionIndex = WasmIndices.find(&WS)->second;
|
||||
uint32_t TableIndex = TableElems.size() + kInitialTableOffset;
|
||||
uint32_t TableIndex = TableElems.size() + InitialTableOffset;
|
||||
if (TableIndices.try_emplace(&WS, TableIndex).second) {
|
||||
LLVM_DEBUG(dbgs() << " -> adding " << WS.getName()
|
||||
<< " to table: " << TableIndex << "\n");
|
||||
|
@ -1546,10 +1540,10 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
const auto &DataFrag = cast<MCDataFragment>(Frag);
|
||||
const SmallVectorImpl<char> &Contents = DataFrag.getContents();
|
||||
for (const uint8_t *
|
||||
p = (const uint8_t *)Contents.data(),
|
||||
*end = (const uint8_t *)Contents.data() + Contents.size();
|
||||
p != end; ++p) {
|
||||
if (*p != 0)
|
||||
P = (const uint8_t *)Contents.data(),
|
||||
*End = (const uint8_t *)Contents.data() + Contents.size();
|
||||
P != End; ++P) {
|
||||
if (*P != 0)
|
||||
report_fatal_error("non-symbolic data in .init_array section");
|
||||
}
|
||||
for (const MCFixup &Fixup : DataFrag.getFixups()) {
|
||||
|
@ -1561,7 +1555,7 @@ uint64_t WasmObjectWriter::writeObject(MCAssembler &Asm,
|
|||
report_fatal_error("fixups in .init_array should be symbol references");
|
||||
if (Sym->getKind() != MCSymbolRefExpr::VK_WebAssembly_FUNCTION)
|
||||
report_fatal_error("symbols in .init_array should be for functions");
|
||||
if (Sym->getSymbol().getIndex() == INVALID_INDEX)
|
||||
if (Sym->getSymbol().getIndex() == InvalidIndex)
|
||||
report_fatal_error("symbols in .init_array should exist in symbtab");
|
||||
InitFuncs.push_back(
|
||||
std::make_pair(Priority, Sym->getSymbol().getIndex()));
|
||||
|
|
|
@ -131,24 +131,24 @@ static int64_t readLEB128(WasmObjectFile::ReadContext &Ctx) {
|
|||
}
|
||||
|
||||
static uint8_t readVaruint1(WasmObjectFile::ReadContext &Ctx) {
|
||||
int64_t result = readLEB128(Ctx);
|
||||
if (result > VARUINT1_MAX || result < 0)
|
||||
int64_t Result = readLEB128(Ctx);
|
||||
if (Result > VARUINT1_MAX || Result < 0)
|
||||
report_fatal_error("LEB is outside Varuint1 range");
|
||||
return result;
|
||||
return Result;
|
||||
}
|
||||
|
||||
static int32_t readVarint32(WasmObjectFile::ReadContext &Ctx) {
|
||||
int64_t result = readLEB128(Ctx);
|
||||
if (result > INT32_MAX || result < INT32_MIN)
|
||||
int64_t Result = readLEB128(Ctx);
|
||||
if (Result > INT32_MAX || Result < INT32_MIN)
|
||||
report_fatal_error("LEB is outside Varint32 range");
|
||||
return result;
|
||||
return Result;
|
||||
}
|
||||
|
||||
static uint32_t readVaruint32(WasmObjectFile::ReadContext &Ctx) {
|
||||
uint64_t result = readULEB128(Ctx);
|
||||
if (result > UINT32_MAX)
|
||||
uint64_t Result = readULEB128(Ctx);
|
||||
if (Result > UINT32_MAX)
|
||||
report_fatal_error("LEB is outside Varuint32 range");
|
||||
return result;
|
||||
return Result;
|
||||
}
|
||||
|
||||
static int64_t readVarint64(WasmObjectFile::ReadContext &Ctx) {
|
||||
|
@ -418,17 +418,17 @@ Error WasmObjectFile::parseLinkingSection(ReadContext &Ctx) {
|
|||
if (Count > DataSegments.size())
|
||||
return make_error<GenericBinaryError>("Too many segment names",
|
||||
object_error::parse_failed);
|
||||
for (uint32_t i = 0; i < Count; i++) {
|
||||
DataSegments[i].Data.Name = readString(Ctx);
|
||||
DataSegments[i].Data.Alignment = readVaruint32(Ctx);
|
||||
DataSegments[i].Data.Flags = readVaruint32(Ctx);
|
||||
for (uint32_t I = 0; I < Count; I++) {
|
||||
DataSegments[I].Data.Name = readString(Ctx);
|
||||
DataSegments[I].Data.Alignment = readVaruint32(Ctx);
|
||||
DataSegments[I].Data.Flags = readVaruint32(Ctx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case wasm::WASM_INIT_FUNCS: {
|
||||
uint32_t Count = readVaruint32(Ctx);
|
||||
LinkingData.InitFunctions.reserve(Count);
|
||||
for (uint32_t i = 0; i < Count; i++) {
|
||||
for (uint32_t I = 0; I < Count; I++) {
|
||||
wasm::WasmInitFunc Init;
|
||||
Init.Priority = readVaruint32(Ctx);
|
||||
Init.Symbol = readVaruint32(Ctx);
|
||||
|
@ -662,7 +662,7 @@ Error WasmObjectFile::parseLinkingSectionComdat(ReadContext &Ctx) {
|
|||
Error WasmObjectFile::parseProducersSection(ReadContext &Ctx) {
|
||||
llvm::SmallSet<StringRef, 3> FieldsSeen;
|
||||
uint32_t Fields = readVaruint32(Ctx);
|
||||
for (size_t i = 0; i < Fields; ++i) {
|
||||
for (size_t I = 0; I < Fields; ++I) {
|
||||
StringRef FieldName = readString(Ctx);
|
||||
if (!FieldsSeen.insert(FieldName).second)
|
||||
return make_error<GenericBinaryError>(
|
||||
|
@ -683,7 +683,7 @@ Error WasmObjectFile::parseProducersSection(ReadContext &Ctx) {
|
|||
}
|
||||
uint32_t ValueCount = readVaruint32(Ctx);
|
||||
llvm::SmallSet<StringRef, 8> ProducersSeen;
|
||||
for (size_t j = 0; j < ValueCount; ++j) {
|
||||
for (size_t J = 0; J < ValueCount; ++J) {
|
||||
StringRef Name = readString(Ctx);
|
||||
StringRef Version = readString(Ctx);
|
||||
if (!ProducersSeen.insert(Name).second) {
|
||||
|
@ -843,7 +843,7 @@ Error WasmObjectFile::parseTypeSection(ReadContext &Ctx) {
|
|||
Error WasmObjectFile::parseImportSection(ReadContext &Ctx) {
|
||||
uint32_t Count = readVaruint32(Ctx);
|
||||
Imports.reserve(Count);
|
||||
for (uint32_t i = 0; i < Count; i++) {
|
||||
for (uint32_t I = 0; I < Count; I++) {
|
||||
wasm::WasmImport Im;
|
||||
Im.Module = readString(Ctx);
|
||||
Im.Field = readString(Ctx);
|
||||
|
@ -969,7 +969,7 @@ Error WasmObjectFile::parseEventSection(ReadContext &Ctx) {
|
|||
Error WasmObjectFile::parseExportSection(ReadContext &Ctx) {
|
||||
uint32_t Count = readVaruint32(Ctx);
|
||||
Exports.reserve(Count);
|
||||
for (uint32_t i = 0; i < Count; i++) {
|
||||
for (uint32_t I = 0; I < Count; I++) {
|
||||
wasm::WasmExport Ex;
|
||||
Ex.Name = readString(Ctx);
|
||||
Ex.Kind = readUint8(Ctx);
|
||||
|
|
|
@ -253,13 +253,13 @@ public:
|
|||
}
|
||||
|
||||
bool ensureEmptyNestingStack() {
|
||||
auto err = !NestingStack.empty();
|
||||
auto Err = !NestingStack.empty();
|
||||
while (!NestingStack.empty()) {
|
||||
error(Twine("Unmatched block construct(s) at function end: ") +
|
||||
nestingString(NestingStack.back()).first);
|
||||
NestingStack.pop_back();
|
||||
}
|
||||
return err;
|
||||
return Err;
|
||||
}
|
||||
|
||||
bool isNext(AsmToken::TokenKind Kind) {
|
||||
|
|
|
@ -52,15 +52,15 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
|
|||
// Print any additional variadic operands.
|
||||
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
|
||||
if (Desc.isVariadic())
|
||||
for (auto i = Desc.getNumOperands(), e = MI->getNumOperands(); i < e; ++i) {
|
||||
for (auto I = Desc.getNumOperands(), E = MI->getNumOperands(); I < E; ++I) {
|
||||
// FIXME: For CALL_INDIRECT_VOID, don't print a leading comma, because
|
||||
// we have an extra flags operand which is not currently printed, for
|
||||
// compatiblity reasons.
|
||||
if (i != 0 && ((MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID &&
|
||||
if (I != 0 && ((MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID &&
|
||||
MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID_S) ||
|
||||
i != Desc.getNumOperands()))
|
||||
I != Desc.getNumOperands()))
|
||||
OS << ", ";
|
||||
printOperand(MI, i, OS);
|
||||
printOperand(MI, I, OS);
|
||||
}
|
||||
|
||||
// Print any added annotation.
|
||||
|
@ -192,13 +192,13 @@ static std::string toString(const APFloat &FP) {
|
|||
|
||||
// Use C99's hexadecimal floating-point representation.
|
||||
static const size_t BufBytes = 128;
|
||||
char buf[BufBytes];
|
||||
char Buf[BufBytes];
|
||||
auto Written = FP.convertToHexString(
|
||||
buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven);
|
||||
Buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven);
|
||||
(void)Written;
|
||||
assert(Written != 0);
|
||||
assert(Written < BufBytes);
|
||||
return buf;
|
||||
return Buf;
|
||||
}
|
||||
|
||||
void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
|
||||
|
|
|
@ -35,7 +35,6 @@ class WebAssemblyAsmBackend final : public MCAsmBackend {
|
|||
public:
|
||||
explicit WebAssemblyAsmBackend(bool Is64Bit)
|
||||
: MCAsmBackend(support::little), Is64Bit(Is64Bit) {}
|
||||
~WebAssemblyAsmBackend() override {}
|
||||
|
||||
unsigned getNumFixupKinds() const override {
|
||||
return WebAssembly::NumTargetFixupKinds;
|
||||
|
@ -91,7 +90,7 @@ WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
|
|||
|
||||
bool WebAssemblyAsmBackend::writeNopData(raw_ostream &OS,
|
||||
uint64_t Count) const {
|
||||
for (uint64_t i = 0; i < Count; ++i)
|
||||
for (uint64_t I = 0; I < Count; ++I)
|
||||
OS << char(WebAssembly::Nop);
|
||||
|
||||
return true;
|
||||
|
@ -118,8 +117,8 @@ void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm,
|
|||
|
||||
// For each byte of the fragment that the fixup touches, mask in the
|
||||
// bits from the fixup value.
|
||||
for (unsigned i = 0; i != NumBytes; ++i)
|
||||
Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
|
||||
for (unsigned I = 0; I != NumBytes; ++I)
|
||||
Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff);
|
||||
}
|
||||
|
||||
std::unique_ptr<MCObjectTargetWriter>
|
||||
|
|
|
@ -19,7 +19,7 @@ using namespace llvm;
|
|||
|
||||
#define DEBUG_TYPE "wasm-mc-asm-info"
|
||||
|
||||
WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {}
|
||||
WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() = default; // anchor.
|
||||
|
||||
WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) {
|
||||
CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
|
||||
|
|
|
@ -48,7 +48,7 @@ class WebAssemblyMCCodeEmitter final : public MCCodeEmitter {
|
|||
const MCSubtargetInfo &STI) const override;
|
||||
|
||||
public:
|
||||
WebAssemblyMCCodeEmitter(const MCInstrInfo &mcii) : MCII(mcii) {}
|
||||
WebAssemblyMCCodeEmitter(const MCInstrInfo &MCII) : MCII(MCII) {}
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
||||
|
@ -81,14 +81,14 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
|
|||
encodeULEB128(MI.getNumOperands() - 2, OS);
|
||||
|
||||
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
|
||||
for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) {
|
||||
const MCOperand &MO = MI.getOperand(i);
|
||||
for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
|
||||
const MCOperand &MO = MI.getOperand(I);
|
||||
if (MO.isReg()) {
|
||||
/* nothing to encode */
|
||||
|
||||
} else if (MO.isImm()) {
|
||||
if (i < Desc.getNumOperands()) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[i];
|
||||
if (I < Desc.getNumOperands()) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[I];
|
||||
LLVM_DEBUG(dbgs() << "Encoding immediate: type="
|
||||
<< int(Info.OperandType) << "\n");
|
||||
switch (Info.OperandType) {
|
||||
|
@ -126,20 +126,20 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
|
|||
}
|
||||
|
||||
} else if (MO.isFPImm()) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[i];
|
||||
const MCOperandInfo &Info = Desc.OpInfo[I];
|
||||
if (Info.OperandType == WebAssembly::OPERAND_F32IMM) {
|
||||
// TODO: MC converts all floating point immediate operands to double.
|
||||
// This is fine for numeric values, but may cause NaNs to change bits.
|
||||
float f = float(MO.getFPImm());
|
||||
support::endian::write<float>(OS, f, support::little);
|
||||
auto F = float(MO.getFPImm());
|
||||
support::endian::write<float>(OS, F, support::little);
|
||||
} else {
|
||||
assert(Info.OperandType == WebAssembly::OPERAND_F64IMM);
|
||||
double d = MO.getFPImm();
|
||||
support::endian::write<double>(OS, d, support::little);
|
||||
double D = MO.getFPImm();
|
||||
support::endian::write<double>(OS, D, support::little);
|
||||
}
|
||||
|
||||
} else if (MO.isExpr()) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[i];
|
||||
const MCOperandInfo &Info = Desc.OpInfo[I];
|
||||
llvm::MCFixupKind FixupKind;
|
||||
size_t PaddedSize = 5;
|
||||
switch (Info.OperandType) {
|
||||
|
|
|
@ -39,13 +39,13 @@ static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/,
|
|||
}
|
||||
|
||||
static MCInstrInfo *createMCInstrInfo() {
|
||||
MCInstrInfo *X = new MCInstrInfo();
|
||||
auto *X = new MCInstrInfo();
|
||||
InitWebAssemblyMCInstrInfo(X);
|
||||
return X;
|
||||
}
|
||||
|
||||
static MCRegisterInfo *createMCRegisterInfo(const Triple & /*T*/) {
|
||||
MCRegisterInfo *X = new MCRegisterInfo();
|
||||
auto *X = new MCRegisterInfo();
|
||||
InitWebAssemblyMCRegisterInfo(X, 0);
|
||||
return X;
|
||||
}
|
||||
|
|
|
@ -43,25 +43,25 @@ WebAssemblyWasmObjectWriter::WebAssemblyWasmObjectWriter(bool Is64Bit)
|
|||
: MCWasmObjectTargetWriter(Is64Bit) {}
|
||||
|
||||
// Test whether the given expression computes a function address.
|
||||
static bool IsFunctionExpr(const MCExpr *Expr) {
|
||||
static bool isFunctionExpr(const MCExpr *Expr) {
|
||||
if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr))
|
||||
return cast<MCSymbolWasm>(SyExp->getSymbol()).isFunction();
|
||||
|
||||
if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr))
|
||||
return IsFunctionExpr(BinOp->getLHS()) != IsFunctionExpr(BinOp->getRHS());
|
||||
return isFunctionExpr(BinOp->getLHS()) != isFunctionExpr(BinOp->getRHS());
|
||||
|
||||
if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr))
|
||||
return IsFunctionExpr(UnOp->getSubExpr());
|
||||
return isFunctionExpr(UnOp->getSubExpr());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool IsFunctionType(const MCValue &Target) {
|
||||
static bool isFunctionType(const MCValue &Target) {
|
||||
const MCSymbolRefExpr *RefA = Target.getSymA();
|
||||
return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX;
|
||||
}
|
||||
|
||||
static const MCSection *GetFixupSection(const MCExpr *Expr) {
|
||||
static const MCSection *getFixupSection(const MCExpr *Expr) {
|
||||
if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr)) {
|
||||
if (SyExp->getSymbol().isInSection())
|
||||
return &SyExp->getSymbol().getSection();
|
||||
|
@ -69,23 +69,23 @@ static const MCSection *GetFixupSection(const MCExpr *Expr) {
|
|||
}
|
||||
|
||||
if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr)) {
|
||||
auto SectionLHS = GetFixupSection(BinOp->getLHS());
|
||||
auto SectionRHS = GetFixupSection(BinOp->getRHS());
|
||||
auto SectionLHS = getFixupSection(BinOp->getLHS());
|
||||
auto SectionRHS = getFixupSection(BinOp->getRHS());
|
||||
return SectionLHS == SectionRHS ? nullptr : SectionLHS;
|
||||
}
|
||||
|
||||
if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr))
|
||||
return GetFixupSection(UnOp->getSubExpr());
|
||||
return getFixupSection(UnOp->getSubExpr());
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static bool IsGlobalType(const MCValue &Target) {
|
||||
static bool isGlobalType(const MCValue &Target) {
|
||||
const MCSymbolRefExpr *RefA = Target.getSymA();
|
||||
return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_GLOBAL;
|
||||
}
|
||||
|
||||
static bool IsEventType(const MCValue &Target) {
|
||||
static bool isEventType(const MCValue &Target) {
|
||||
const MCSymbolRefExpr *RefA = Target.getSymA();
|
||||
return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_EVENT;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ unsigned WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target,
|
|||
const MCFixup &Fixup) const {
|
||||
// WebAssembly functions are not allocated in the data address space. To
|
||||
// resolve a pointer to a function, we must use a special relocation type.
|
||||
bool IsFunction = IsFunctionExpr(Fixup.getValue());
|
||||
bool IsFunction = isFunctionExpr(Fixup.getValue());
|
||||
|
||||
switch (unsigned(Fixup.getKind())) {
|
||||
case WebAssembly::fixup_code_sleb128_i32:
|
||||
|
@ -104,20 +104,20 @@ unsigned WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target,
|
|||
case WebAssembly::fixup_code_sleb128_i64:
|
||||
llvm_unreachable("fixup_sleb128_i64 not implemented yet");
|
||||
case WebAssembly::fixup_code_uleb128_i32:
|
||||
if (IsGlobalType(Target))
|
||||
if (isGlobalType(Target))
|
||||
return wasm::R_WASM_GLOBAL_INDEX_LEB;
|
||||
if (IsFunctionType(Target))
|
||||
if (isFunctionType(Target))
|
||||
return wasm::R_WASM_TYPE_INDEX_LEB;
|
||||
if (IsFunction)
|
||||
return wasm::R_WASM_FUNCTION_INDEX_LEB;
|
||||
if (IsEventType(Target))
|
||||
if (isEventType(Target))
|
||||
return wasm::R_WASM_EVENT_INDEX_LEB;
|
||||
return wasm::R_WASM_MEMORY_ADDR_LEB;
|
||||
case FK_Data_4:
|
||||
if (IsFunction)
|
||||
return wasm::R_WASM_TABLE_INDEX_I32;
|
||||
if (auto Section = static_cast<const MCSectionWasm *>(
|
||||
GetFixupSection(Fixup.getValue()))) {
|
||||
getFixupSection(Fixup.getValue()))) {
|
||||
if (Section->getKind().isText())
|
||||
return wasm::R_WASM_FUNCTION_OFFSET_I32;
|
||||
else if (!Section->isWasmData())
|
||||
|
|
|
@ -95,11 +95,11 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
|
|||
if (F.isDeclarationForLinker() && !F.isIntrinsic()) {
|
||||
SmallVector<MVT, 4> Results;
|
||||
SmallVector<MVT, 4> Params;
|
||||
ComputeSignatureVTs(F.getFunctionType(), F, TM, Params, Results);
|
||||
computeSignatureVTs(F.getFunctionType(), F, TM, Params, Results);
|
||||
auto *Sym = cast<MCSymbolWasm>(getSymbol(&F));
|
||||
Sym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
|
||||
if (!Sym->getSignature()) {
|
||||
auto Signature = SignatureFromMVTs(Results, Params);
|
||||
auto Signature = signatureFromMVTs(Results, Params);
|
||||
Sym->setSignature(Signature.get());
|
||||
addSignature(std::move(Signature));
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
|
|||
|
||||
if (const NamedMDNode *Named = M.getNamedMetadata("wasm.custom_sections")) {
|
||||
for (const Metadata *MD : Named->operands()) {
|
||||
const MDTuple *Tuple = dyn_cast<MDTuple>(MD);
|
||||
const auto *Tuple = dyn_cast<MDTuple>(MD);
|
||||
if (!Tuple || Tuple->getNumOperands() != 2)
|
||||
continue;
|
||||
const MDString *Name = dyn_cast<MDString>(Tuple->getOperand(0));
|
||||
|
@ -149,9 +149,9 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
|
|||
|
||||
OutStreamer->PushSection();
|
||||
std::string SectionName = (".custom_section." + Name->getString()).str();
|
||||
MCSectionWasm *mySection =
|
||||
MCSectionWasm *MySection =
|
||||
OutContext.getWasmSection(SectionName, SectionKind::getMetadata());
|
||||
OutStreamer->SwitchSection(mySection);
|
||||
OutStreamer->SwitchSection(MySection);
|
||||
OutStreamer->EmitBytes(Contents->getString());
|
||||
OutStreamer->PopSection();
|
||||
}
|
||||
|
@ -163,9 +163,9 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
|
|||
void WebAssemblyAsmPrinter::EmitProducerInfo(Module &M) {
|
||||
llvm::SmallVector<std::pair<std::string, std::string>, 4> Languages;
|
||||
if (const NamedMDNode *Debug = M.getNamedMetadata("llvm.dbg.cu")) {
|
||||
llvm::SmallSet<StringRef, 4> SeenLanguages;
|
||||
for (size_t i = 0, e = Debug->getNumOperands(); i < e; ++i) {
|
||||
const auto *CU = cast<DICompileUnit>(Debug->getOperand(i));
|
||||
llvm::SmallSet<StringRef, 4> SeenLanguages;
|
||||
for (size_t I = 0, E = Debug->getNumOperands(); I < E; ++I) {
|
||||
const auto *CU = cast<DICompileUnit>(Debug->getOperand(I));
|
||||
StringRef Language = dwarf::LanguageString(CU->getSourceLanguage());
|
||||
Language.consume_front("DW_LANG_");
|
||||
if (SeenLanguages.insert(Language).second)
|
||||
|
@ -176,8 +176,8 @@ void WebAssemblyAsmPrinter::EmitProducerInfo(Module &M) {
|
|||
llvm::SmallVector<std::pair<std::string, std::string>, 4> Tools;
|
||||
if (const NamedMDNode *Ident = M.getNamedMetadata("llvm.ident")) {
|
||||
llvm::SmallSet<StringRef, 4> SeenTools;
|
||||
for (size_t i = 0, e = Ident->getNumOperands(); i < e; ++i) {
|
||||
const auto *S = cast<MDString>(Ident->getOperand(i)->getOperand(0));
|
||||
for (size_t I = 0, E = Ident->getNumOperands(); I < E; ++I) {
|
||||
const auto *S = cast<MDString>(Ident->getOperand(I)->getOperand(0));
|
||||
std::pair<StringRef, StringRef> Field = S->getString().split("version");
|
||||
StringRef Name = Field.first.trim();
|
||||
StringRef Version = Field.second.trim();
|
||||
|
@ -224,8 +224,8 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
|
|||
const Function &F = MF->getFunction();
|
||||
SmallVector<MVT, 1> ResultVTs;
|
||||
SmallVector<MVT, 4> ParamVTs;
|
||||
ComputeSignatureVTs(F.getFunctionType(), F, TM, ParamVTs, ResultVTs);
|
||||
auto Signature = SignatureFromMVTs(ResultVTs, ParamVTs);
|
||||
computeSignatureVTs(F.getFunctionType(), F, TM, ParamVTs, ResultVTs);
|
||||
auto Signature = signatureFromMVTs(ResultVTs, ParamVTs);
|
||||
auto *WasmSym = cast<MCSymbolWasm>(CurrentFnSym);
|
||||
WasmSym->setSignature(Signature.get());
|
||||
addSignature(std::move(Signature));
|
||||
|
@ -243,7 +243,7 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
|
|||
}
|
||||
|
||||
SmallVector<wasm::ValType, 16> Locals;
|
||||
ValTypesFromMVTs(MFI->getLocals(), Locals);
|
||||
valTypesFromMVTs(MFI->getLocals(), Locals);
|
||||
getTargetStreamer()->emitLocal(Locals);
|
||||
|
||||
AsmPrinter::EmitFunctionBodyStart();
|
||||
|
@ -324,7 +324,7 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
|||
default: {
|
||||
WebAssemblyMCInstLower MCInstLowering(OutContext, *this);
|
||||
MCInst TmpInst;
|
||||
MCInstLowering.Lower(MI, TmpInst);
|
||||
MCInstLowering.lower(MI, TmpInst);
|
||||
EmitToStreamer(*OutStreamer, TmpInst);
|
||||
break;
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
|||
}
|
||||
|
||||
const MCExpr *WebAssemblyAsmPrinter::lowerConstant(const Constant *CV) {
|
||||
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
|
||||
if (const auto *GV = dyn_cast<GlobalValue>(CV))
|
||||
if (GV->getValueType()->isFunctionTy()) {
|
||||
return MCSymbolRefExpr::create(
|
||||
getSymbol(GV), MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext);
|
||||
|
|
|
@ -132,7 +132,7 @@ FunctionPass *llvm::createWebAssemblyCFGSort() {
|
|||
return new WebAssemblyCFGSort();
|
||||
}
|
||||
|
||||
static void MaybeUpdateTerminator(MachineBasicBlock *MBB) {
|
||||
static void maybeUpdateTerminator(MachineBasicBlock *MBB) {
|
||||
#ifndef NDEBUG
|
||||
bool AnyBarrier = false;
|
||||
#endif
|
||||
|
@ -227,7 +227,7 @@ struct Entry {
|
|||
/// interrupted by blocks not dominated by their header.
|
||||
/// TODO: There are many opportunities for improving the heuristics here.
|
||||
/// Explore them.
|
||||
static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
|
||||
static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
|
||||
const WebAssemblyExceptionInfo &WEI,
|
||||
const MachineDominatorTree &MDT) {
|
||||
// Prepare for a topological sort: Record the number of predecessors each
|
||||
|
@ -319,7 +319,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
|
|||
if (!Next) {
|
||||
// If there are no more blocks to process, we're done.
|
||||
if (Ready.empty()) {
|
||||
MaybeUpdateTerminator(MBB);
|
||||
maybeUpdateTerminator(MBB);
|
||||
break;
|
||||
}
|
||||
for (;;) {
|
||||
|
@ -337,7 +337,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
|
|||
}
|
||||
// Move the next block into place and iterate.
|
||||
Next->moveAfter(MBB);
|
||||
MaybeUpdateTerminator(MBB);
|
||||
maybeUpdateTerminator(MBB);
|
||||
MBB = Next;
|
||||
}
|
||||
assert(Entries.empty() && "Active sort region list not finished");
|
||||
|
@ -403,7 +403,7 @@ bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
|
|||
MF.getRegInfo().invalidateLiveness();
|
||||
|
||||
// Sort the blocks, with contiguous sort regions.
|
||||
SortBlocks(MF, MLI, WEI, MDT);
|
||||
sortBlocks(MF, MLI, WEI, MDT);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ FunctionPass *llvm::createWebAssemblyCFGStackify() {
|
|||
/// code) for a branch instruction to both branch to a block and fallthrough
|
||||
/// to it, so we check the actual branch operands to see if there are any
|
||||
/// explicit mentions.
|
||||
static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred,
|
||||
static bool explicitlyBranchesTo(MachineBasicBlock *Pred,
|
||||
MachineBasicBlock *MBB) {
|
||||
for (MachineInstr &MI : Pred->terminators())
|
||||
for (MachineOperand &MO : MI.explicit_operands())
|
||||
|
@ -123,7 +123,7 @@ static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred,
|
|||
// ones that should go after the marker. In this function, AfterSet is only
|
||||
// used for sanity checking.
|
||||
static MachineBasicBlock::iterator
|
||||
GetEarliestInsertPos(MachineBasicBlock *MBB,
|
||||
getEarliestInsertPos(MachineBasicBlock *MBB,
|
||||
const SmallPtrSet<const MachineInstr *, 4> &BeforeSet,
|
||||
const SmallPtrSet<const MachineInstr *, 4> &AfterSet) {
|
||||
auto InsertPos = MBB->end();
|
||||
|
@ -147,7 +147,7 @@ GetEarliestInsertPos(MachineBasicBlock *MBB,
|
|||
// ones that should go after the marker. In this function, BeforeSet is only
|
||||
// used for sanity checking.
|
||||
static MachineBasicBlock::iterator
|
||||
GetLatestInsertPos(MachineBasicBlock *MBB,
|
||||
getLatestInsertPos(MachineBasicBlock *MBB,
|
||||
const SmallPtrSet<const MachineInstr *, 4> &BeforeSet,
|
||||
const SmallPtrSet<const MachineInstr *, 4> &AfterSet) {
|
||||
auto InsertPos = MBB->begin();
|
||||
|
@ -222,7 +222,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
|
|||
for (MachineBasicBlock *Pred : MBB.predecessors()) {
|
||||
if (Pred->getNumber() < MBBNumber) {
|
||||
Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
|
||||
if (ExplicitlyBranchesTo(Pred, &MBB)) {
|
||||
if (explicitlyBranchesTo(Pred, &MBB)) {
|
||||
IsBranchedTo = true;
|
||||
if (Pred->getFirstTerminator()->getOpcode() == WebAssembly::BR_ON_EXN) {
|
||||
IsBrOnExn = true;
|
||||
|
@ -322,7 +322,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
|
|||
ReturnType = WebAssembly::ExprType::I32;
|
||||
}
|
||||
|
||||
auto InsertPos = GetLatestInsertPos(Header, BeforeSet, AfterSet);
|
||||
auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet);
|
||||
MachineInstr *Begin =
|
||||
BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos),
|
||||
TII.get(WebAssembly::BLOCK))
|
||||
|
@ -355,7 +355,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
|
|||
}
|
||||
|
||||
// Mark the end of the block.
|
||||
InsertPos = GetEarliestInsertPos(&MBB, BeforeSet, AfterSet);
|
||||
InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet);
|
||||
MachineInstr *End = BuildMI(MBB, InsertPos, MBB.findPrevDebugLoc(InsertPos),
|
||||
TII.get(WebAssembly::END_BLOCK));
|
||||
registerScope(Begin, End);
|
||||
|
@ -405,7 +405,7 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) {
|
|||
}
|
||||
|
||||
// Mark the beginning of the loop.
|
||||
auto InsertPos = GetEarliestInsertPos(&MBB, BeforeSet, AfterSet);
|
||||
auto InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet);
|
||||
MachineInstr *Begin = BuildMI(MBB, InsertPos, MBB.findDebugLoc(InsertPos),
|
||||
TII.get(WebAssembly::LOOP))
|
||||
.addImm(int64_t(WebAssembly::ExprType::Void));
|
||||
|
@ -422,7 +422,7 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) {
|
|||
|
||||
// Mark the end of the loop (using arbitrary debug location that branched to
|
||||
// the loop end as its location).
|
||||
InsertPos = GetEarliestInsertPos(AfterLoop, BeforeSet, AfterSet);
|
||||
InsertPos = getEarliestInsertPos(AfterLoop, BeforeSet, AfterSet);
|
||||
DebugLoc EndDL = (*AfterLoop->pred_rbegin())->findBranchDebugLoc();
|
||||
MachineInstr *End =
|
||||
BuildMI(*AfterLoop, InsertPos, EndDL, TII.get(WebAssembly::END_LOOP));
|
||||
|
@ -451,7 +451,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) {
|
|||
for (auto *Pred : MBB.predecessors()) {
|
||||
if (Pred->getNumber() < MBBNumber) {
|
||||
Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
|
||||
assert(!ExplicitlyBranchesTo(Pred, &MBB) &&
|
||||
assert(!explicitlyBranchesTo(Pred, &MBB) &&
|
||||
"Explicit branch to an EH pad!");
|
||||
}
|
||||
}
|
||||
|
@ -559,7 +559,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) {
|
|||
}
|
||||
|
||||
// Add the TRY.
|
||||
auto InsertPos = GetLatestInsertPos(Header, BeforeSet, AfterSet);
|
||||
auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet);
|
||||
MachineInstr *Begin =
|
||||
BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos),
|
||||
TII.get(WebAssembly::TRY))
|
||||
|
@ -595,7 +595,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) {
|
|||
}
|
||||
|
||||
// Mark the end of the TRY.
|
||||
InsertPos = GetEarliestInsertPos(AfterTry, BeforeSet, AfterSet);
|
||||
InsertPos = getEarliestInsertPos(AfterTry, BeforeSet, AfterSet);
|
||||
MachineInstr *End =
|
||||
BuildMI(*AfterTry, InsertPos, Bottom->findBranchDebugLoc(),
|
||||
TII.get(WebAssembly::END_TRY));
|
||||
|
@ -609,7 +609,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) {
|
|||
}
|
||||
|
||||
static unsigned
|
||||
GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
|
||||
getDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
|
||||
const MachineBasicBlock *MBB) {
|
||||
unsigned Depth = 0;
|
||||
for (auto X : reverse(Stack)) {
|
||||
|
@ -635,19 +635,19 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
|
|||
if (MFI.getResults().empty())
|
||||
return;
|
||||
|
||||
WebAssembly::ExprType retType;
|
||||
WebAssembly::ExprType RetType;
|
||||
switch (MFI.getResults().front().SimpleTy) {
|
||||
case MVT::i32:
|
||||
retType = WebAssembly::ExprType::I32;
|
||||
RetType = WebAssembly::ExprType::I32;
|
||||
break;
|
||||
case MVT::i64:
|
||||
retType = WebAssembly::ExprType::I64;
|
||||
RetType = WebAssembly::ExprType::I64;
|
||||
break;
|
||||
case MVT::f32:
|
||||
retType = WebAssembly::ExprType::F32;
|
||||
RetType = WebAssembly::ExprType::F32;
|
||||
break;
|
||||
case MVT::f64:
|
||||
retType = WebAssembly::ExprType::F64;
|
||||
RetType = WebAssembly::ExprType::F64;
|
||||
break;
|
||||
case MVT::v16i8:
|
||||
case MVT::v8i16:
|
||||
|
@ -655,10 +655,10 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
|
|||
case MVT::v2i64:
|
||||
case MVT::v4f32:
|
||||
case MVT::v2f64:
|
||||
retType = WebAssembly::ExprType::V128;
|
||||
RetType = WebAssembly::ExprType::V128;
|
||||
break;
|
||||
case MVT::ExceptRef:
|
||||
retType = WebAssembly::ExprType::ExceptRef;
|
||||
RetType = WebAssembly::ExprType::ExceptRef;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected return type");
|
||||
|
@ -669,11 +669,11 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
|
|||
if (MI.isPosition() || MI.isDebugInstr())
|
||||
continue;
|
||||
if (MI.getOpcode() == WebAssembly::END_BLOCK) {
|
||||
EndToBegin[&MI]->getOperand(0).setImm(int32_t(retType));
|
||||
EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType));
|
||||
continue;
|
||||
}
|
||||
if (MI.getOpcode() == WebAssembly::END_LOOP) {
|
||||
EndToBegin[&MI]->getOperand(0).setImm(int32_t(retType));
|
||||
EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType));
|
||||
continue;
|
||||
}
|
||||
// Something other than an `end`. We're done.
|
||||
|
@ -684,7 +684,7 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
|
|||
|
||||
// WebAssembly functions end with an end instruction, as if the function body
|
||||
// were a block.
|
||||
static void AppendEndToFunction(MachineFunction &MF,
|
||||
static void appendEndToFunction(MachineFunction &MF,
|
||||
const WebAssemblyInstrInfo &TII) {
|
||||
BuildMI(MF.back(), MF.back().end(),
|
||||
MF.back().findPrevDebugLoc(MF.back().end()),
|
||||
|
@ -753,7 +753,7 @@ void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) {
|
|||
MI.RemoveOperand(MI.getNumOperands() - 1);
|
||||
for (auto MO : Ops) {
|
||||
if (MO.isMBB())
|
||||
MO = MachineOperand::CreateImm(GetDepth(Stack, MO.getMBB()));
|
||||
MO = MachineOperand::CreateImm(getDepth(Stack, MO.getMBB()));
|
||||
MI.addOperand(MF, MO);
|
||||
}
|
||||
}
|
||||
|
@ -798,7 +798,7 @@ bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
if (!MF.getSubtarget<WebAssemblySubtarget>()
|
||||
.getTargetTriple()
|
||||
.isOSBinFormatELF())
|
||||
AppendEndToFunction(MF, TII);
|
||||
appendEndToFunction(MF, TII);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ FunctionPass *llvm::createWebAssemblyCallIndirectFixup() {
|
|||
return new WebAssemblyCallIndirectFixup();
|
||||
}
|
||||
|
||||
static unsigned GetNonPseudoCallIndirectOpcode(const MachineInstr &MI) {
|
||||
static unsigned getNonPseudoCallIndirectOpcode(const MachineInstr &MI) {
|
||||
switch (MI.getOpcode()) {
|
||||
using namespace WebAssembly;
|
||||
case PCALL_INDIRECT_VOID:
|
||||
|
@ -90,8 +90,8 @@ static unsigned GetNonPseudoCallIndirectOpcode(const MachineInstr &MI) {
|
|||
}
|
||||
}
|
||||
|
||||
static bool IsPseudoCallIndirect(const MachineInstr &MI) {
|
||||
return GetNonPseudoCallIndirectOpcode(MI) !=
|
||||
static bool isPseudoCallIndirect(const MachineInstr &MI) {
|
||||
return getNonPseudoCallIndirectOpcode(MI) !=
|
||||
WebAssembly::INSTRUCTION_LIST_END;
|
||||
}
|
||||
|
||||
|
@ -105,11 +105,11 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
|
|||
|
||||
for (MachineBasicBlock &MBB : MF) {
|
||||
for (MachineInstr &MI : MBB) {
|
||||
if (IsPseudoCallIndirect(MI)) {
|
||||
if (isPseudoCallIndirect(MI)) {
|
||||
LLVM_DEBUG(dbgs() << "Found call_indirect: " << MI << '\n');
|
||||
|
||||
// Rewrite pseudo to non-pseudo
|
||||
const MCInstrDesc &Desc = TII->get(GetNonPseudoCallIndirectOpcode(MI));
|
||||
const MCInstrDesc &Desc = TII->get(getNonPseudoCallIndirectOpcode(MI));
|
||||
MI.setDesc(Desc);
|
||||
|
||||
// Rewrite argument order
|
||||
|
|
|
@ -49,22 +49,22 @@ class WebAssemblyFastISel final : public FastISel {
|
|||
// All possible address modes.
|
||||
class Address {
|
||||
public:
|
||||
typedef enum { RegBase, FrameIndexBase } BaseKind;
|
||||
using BaseKind = enum { RegBase, FrameIndexBase };
|
||||
|
||||
private:
|
||||
BaseKind Kind;
|
||||
BaseKind Kind = RegBase;
|
||||
union {
|
||||
unsigned Reg;
|
||||
int FI;
|
||||
} Base;
|
||||
|
||||
int64_t Offset;
|
||||
int64_t Offset = 0;
|
||||
|
||||
const GlobalValue *GV;
|
||||
const GlobalValue *GV = nullptr;
|
||||
|
||||
public:
|
||||
// Innocuous defaults for our address.
|
||||
Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
|
||||
Address() { Base.Reg = 0; }
|
||||
void setKind(BaseKind K) {
|
||||
assert(!isSet() && "Can't change kind with non-zero base");
|
||||
Kind = K;
|
||||
|
@ -91,9 +91,9 @@ class WebAssemblyFastISel final : public FastISel {
|
|||
return Base.FI;
|
||||
}
|
||||
|
||||
void setOffset(int64_t Offset_) {
|
||||
assert(Offset_ >= 0 && "Offsets must be non-negative");
|
||||
Offset = Offset_;
|
||||
void setOffset(int64_t NewOffset) {
|
||||
assert(NewOffset >= 0 && "Offsets must be non-negative");
|
||||
Offset = NewOffset;
|
||||
}
|
||||
int64_t getOffset() const { return Offset; }
|
||||
void setGlobalValue(const GlobalValue *G) { GV = G; }
|
||||
|
@ -210,7 +210,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
|
||||
const User *U = nullptr;
|
||||
unsigned Opcode = Instruction::UserOp1;
|
||||
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
|
||||
if (const auto *I = dyn_cast<Instruction>(Obj)) {
|
||||
// Don't walk into other basic blocks unless the object is an alloca from
|
||||
// another block, otherwise it may not have a virtual register assigned.
|
||||
if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
|
||||
|
@ -218,7 +218,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
Opcode = I->getOpcode();
|
||||
U = I;
|
||||
}
|
||||
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
|
||||
} else if (const auto *C = dyn_cast<ConstantExpr>(Obj)) {
|
||||
Opcode = C->getOpcode();
|
||||
U = C;
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
// address spaces.
|
||||
return false;
|
||||
|
||||
if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
|
||||
if (const auto *GV = dyn_cast<GlobalValue>(Obj)) {
|
||||
if (Addr.getGlobalValue())
|
||||
return false;
|
||||
Addr.setGlobalValue(GV);
|
||||
|
@ -274,7 +274,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
} else {
|
||||
uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
|
||||
for (;;) {
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(Op)) {
|
||||
// Constant-offset addressing.
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
break;
|
||||
|
@ -289,8 +289,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
}
|
||||
if (canFoldAddIntoGEP(U, Op)) {
|
||||
// A compatible add with a constant operand. Fold the constant.
|
||||
ConstantInt *CI =
|
||||
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
||||
auto *CI = cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
||||
TmpOffset += CI->getSExtValue() * S;
|
||||
// Iterate on the other operand.
|
||||
Op = cast<AddOperator>(Op)->getOperand(0);
|
||||
|
@ -314,7 +313,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
break;
|
||||
}
|
||||
case Instruction::Alloca: {
|
||||
const AllocaInst *AI = cast<AllocaInst>(Obj);
|
||||
const auto *AI = cast<AllocaInst>(Obj);
|
||||
DenseMap<const AllocaInst *, int>::iterator SI =
|
||||
FuncInfo.StaticAllocaMap.find(AI);
|
||||
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
||||
|
@ -335,7 +334,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
if (isa<ConstantInt>(LHS))
|
||||
std::swap(LHS, RHS);
|
||||
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue();
|
||||
if (int64_t(TmpOffset) >= 0) {
|
||||
Addr.setOffset(TmpOffset);
|
||||
|
@ -355,7 +354,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||
const Value *LHS = U->getOperand(0);
|
||||
const Value *RHS = U->getOperand(1);
|
||||
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue();
|
||||
if (TmpOffset >= 0) {
|
||||
Addr.setOffset(TmpOffset);
|
||||
|
@ -415,7 +414,7 @@ unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
|
|||
}
|
||||
|
||||
unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) {
|
||||
if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(V))
|
||||
if (const auto *ICmp = dyn_cast<ICmpInst>(V))
|
||||
if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
|
||||
if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32)) {
|
||||
Not = ICmp->isTrueWhenEqual();
|
||||
|
@ -605,7 +604,7 @@ unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
|
|||
}
|
||||
|
||||
unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
|
||||
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
|
||||
if (const auto *GV = dyn_cast<GlobalValue>(C)) {
|
||||
unsigned ResultReg =
|
||||
createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
|
||||
: &WebAssembly::I32RegClass);
|
||||
|
@ -628,14 +627,14 @@ bool WebAssemblyFastISel::fastLowerArguments() {
|
|||
if (F->isVarArg())
|
||||
return false;
|
||||
|
||||
unsigned i = 0;
|
||||
unsigned I = 0;
|
||||
for (auto const &Arg : F->args()) {
|
||||
const AttributeList &Attrs = F->getAttributes();
|
||||
if (Attrs.hasParamAttribute(i, Attribute::ByVal) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::SwiftSelf) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::SwiftError) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::InAlloca) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::Nest))
|
||||
if (Attrs.hasParamAttribute(I, Attribute::ByVal) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::SwiftSelf) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::SwiftError) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::InAlloca) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::Nest))
|
||||
return false;
|
||||
|
||||
Type *ArgTy = Arg.getType();
|
||||
|
@ -699,10 +698,10 @@ bool WebAssemblyFastISel::fastLowerArguments() {
|
|||
}
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
||||
.addImm(i);
|
||||
.addImm(I);
|
||||
updateValueMap(&Arg, ResultReg);
|
||||
|
||||
++i;
|
||||
++I;
|
||||
}
|
||||
|
||||
MRI.addLiveIn(WebAssembly::ARGUMENTS);
|
||||
|
@ -731,7 +730,7 @@ bool WebAssemblyFastISel::fastLowerArguments() {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectCall(const Instruction *I) {
|
||||
const CallInst *Call = cast<CallInst>(I);
|
||||
const auto *Call = cast<CallInst>(I);
|
||||
|
||||
if (Call->isMustTailCall() || Call->isInlineAsm() ||
|
||||
Call->getFunctionType()->isVarArg())
|
||||
|
@ -817,25 +816,25 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
|
|||
}
|
||||
|
||||
SmallVector<unsigned, 8> Args;
|
||||
for (unsigned i = 0, e = Call->getNumArgOperands(); i < e; ++i) {
|
||||
Value *V = Call->getArgOperand(i);
|
||||
for (unsigned I = 0, E = Call->getNumArgOperands(); I < E; ++I) {
|
||||
Value *V = Call->getArgOperand(I);
|
||||
MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
|
||||
if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
|
||||
return false;
|
||||
|
||||
const AttributeList &Attrs = Call->getAttributes();
|
||||
if (Attrs.hasParamAttribute(i, Attribute::ByVal) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::SwiftSelf) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::SwiftError) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::InAlloca) ||
|
||||
Attrs.hasParamAttribute(i, Attribute::Nest))
|
||||
if (Attrs.hasParamAttribute(I, Attribute::ByVal) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::SwiftSelf) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::SwiftError) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::InAlloca) ||
|
||||
Attrs.hasParamAttribute(I, Attribute::Nest))
|
||||
return false;
|
||||
|
||||
unsigned Reg;
|
||||
|
||||
if (Attrs.hasParamAttribute(i, Attribute::SExt))
|
||||
if (Attrs.hasParamAttribute(I, Attribute::SExt))
|
||||
Reg = getRegForSignedValue(V);
|
||||
else if (Attrs.hasParamAttribute(i, Attribute::ZExt))
|
||||
else if (Attrs.hasParamAttribute(I, Attribute::ZExt))
|
||||
Reg = getRegForUnsignedValue(V);
|
||||
else
|
||||
Reg = getRegForValue(V);
|
||||
|
@ -869,7 +868,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
|
||||
const SelectInst *Select = cast<SelectInst>(I);
|
||||
const auto *Select = cast<SelectInst>(I);
|
||||
|
||||
bool Not;
|
||||
unsigned CondReg = getRegForI1Value(Select->getCondition(), Not);
|
||||
|
@ -928,7 +927,7 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
|
||||
const TruncInst *Trunc = cast<TruncInst>(I);
|
||||
const auto *Trunc = cast<TruncInst>(I);
|
||||
|
||||
unsigned Reg = getRegForValue(Trunc->getOperand(0));
|
||||
if (Reg == 0)
|
||||
|
@ -947,7 +946,7 @@ bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
|
||||
const ZExtInst *ZExt = cast<ZExtInst>(I);
|
||||
const auto *ZExt = cast<ZExtInst>(I);
|
||||
|
||||
const Value *Op = ZExt->getOperand(0);
|
||||
MVT::SimpleValueType From = getSimpleType(Op->getType());
|
||||
|
@ -964,7 +963,7 @@ bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
|
||||
const SExtInst *SExt = cast<SExtInst>(I);
|
||||
const auto *SExt = cast<SExtInst>(I);
|
||||
|
||||
const Value *Op = SExt->getOperand(0);
|
||||
MVT::SimpleValueType From = getSimpleType(Op->getType());
|
||||
|
@ -981,11 +980,11 @@ bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
|
||||
const ICmpInst *ICmp = cast<ICmpInst>(I);
|
||||
const auto *ICmp = cast<ICmpInst>(I);
|
||||
|
||||
bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
|
||||
unsigned Opc;
|
||||
bool isSigned = false;
|
||||
bool IsSigned = false;
|
||||
switch (ICmp->getPredicate()) {
|
||||
case ICmpInst::ICMP_EQ:
|
||||
Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
|
||||
|
@ -1007,29 +1006,29 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
|
|||
break;
|
||||
case ICmpInst::ICMP_SGT:
|
||||
Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
|
||||
isSigned = true;
|
||||
IsSigned = true;
|
||||
break;
|
||||
case ICmpInst::ICMP_SGE:
|
||||
Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
|
||||
isSigned = true;
|
||||
IsSigned = true;
|
||||
break;
|
||||
case ICmpInst::ICMP_SLT:
|
||||
Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
|
||||
isSigned = true;
|
||||
IsSigned = true;
|
||||
break;
|
||||
case ICmpInst::ICMP_SLE:
|
||||
Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
|
||||
isSigned = true;
|
||||
IsSigned = true;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned);
|
||||
unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), IsSigned);
|
||||
if (LHS == 0)
|
||||
return false;
|
||||
|
||||
unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned);
|
||||
unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), IsSigned);
|
||||
if (RHS == 0)
|
||||
return false;
|
||||
|
||||
|
@ -1042,7 +1041,7 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
|
||||
const FCmpInst *FCmp = cast<FCmpInst>(I);
|
||||
const auto *FCmp = cast<FCmpInst>(I);
|
||||
|
||||
unsigned LHS = getRegForValue(FCmp->getOperand(0));
|
||||
if (LHS == 0)
|
||||
|
@ -1138,7 +1137,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
|
||||
const LoadInst *Load = cast<LoadInst>(I);
|
||||
const auto *Load = cast<LoadInst>(I);
|
||||
if (Load->isAtomic())
|
||||
return false;
|
||||
if (!Subtarget->hasSIMD128() && Load->getType()->isVectorTy())
|
||||
|
@ -1195,7 +1194,7 @@ bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectStore(const Instruction *I) {
|
||||
const StoreInst *Store = cast<StoreInst>(I);
|
||||
const auto *Store = cast<StoreInst>(I);
|
||||
if (Store->isAtomic())
|
||||
return false;
|
||||
if (!Subtarget->hasSIMD128() &&
|
||||
|
@ -1251,7 +1250,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) {
|
|||
}
|
||||
|
||||
bool WebAssemblyFastISel::selectBr(const Instruction *I) {
|
||||
const BranchInst *Br = cast<BranchInst>(I);
|
||||
const auto *Br = cast<BranchInst>(I);
|
||||
if (Br->isUnconditional()) {
|
||||
MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
|
||||
fastEmitBranch(MSucc, Br->getDebugLoc());
|
||||
|
@ -1282,7 +1281,7 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) {
|
|||
if (!FuncInfo.CanLowerReturn)
|
||||
return false;
|
||||
|
||||
const ReturnInst *Ret = cast<ReturnInst>(I);
|
||||
const auto *Ret = cast<ReturnInst>(I);
|
||||
|
||||
if (Ret->getNumOperands() == 0) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
|
|
|
@ -64,12 +64,12 @@ ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() {
|
|||
|
||||
// Recursively descend the def-use lists from V to find non-bitcast users of
|
||||
// bitcasts of V.
|
||||
static void FindUses(Value *V, Function &F,
|
||||
static void findUses(Value *V, Function &F,
|
||||
SmallVectorImpl<std::pair<Use *, Function *>> &Uses,
|
||||
SmallPtrSetImpl<Constant *> &ConstantBCs) {
|
||||
for (Use &U : V->uses()) {
|
||||
if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser()))
|
||||
FindUses(BC, F, Uses, ConstantBCs);
|
||||
if (auto *BC = dyn_cast<BitCastOperator>(U.getUser()))
|
||||
findUses(BC, F, Uses, ConstantBCs);
|
||||
else if (U.get()->getType() != F.getType()) {
|
||||
CallSite CS(U.getUser());
|
||||
if (!CS)
|
||||
|
@ -81,8 +81,8 @@ static void FindUses(Value *V, Function &F,
|
|||
continue;
|
||||
if (isa<Constant>(U.get())) {
|
||||
// Only add constant bitcasts to the list once; they get RAUW'd
|
||||
auto c = ConstantBCs.insert(cast<Constant>(U.get()));
|
||||
if (!c.second)
|
||||
auto C = ConstantBCs.insert(cast<Constant>(U.get()));
|
||||
if (!C.second)
|
||||
continue;
|
||||
}
|
||||
Uses.push_back(std::make_pair(&U, &F));
|
||||
|
@ -113,7 +113,7 @@ static void FindUses(Value *V, Function &F,
|
|||
// For bitcasts that involve struct types we don't know at this stage if they
|
||||
// would be equivalent at the wasm level and so we can't know if we need to
|
||||
// generate a wrapper.
|
||||
static Function *CreateWrapper(Function *F, FunctionType *Ty) {
|
||||
static Function *createWrapper(Function *F, FunctionType *Ty) {
|
||||
Module *M = F->getParent();
|
||||
|
||||
Function *Wrapper = Function::Create(Ty, Function::PrivateLinkage,
|
||||
|
@ -151,11 +151,11 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) {
|
|||
BB->getInstList().push_back(PtrCast);
|
||||
Args.push_back(PtrCast);
|
||||
} else if (ArgType->isStructTy() || ParamType->isStructTy()) {
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: struct param type in bitcast: "
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: struct param type in bitcast: "
|
||||
<< F->getName() << "\n");
|
||||
WrapperNeeded = false;
|
||||
} else {
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: arg type mismatch calling: "
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: arg type mismatch calling: "
|
||||
<< F->getName() << "\n");
|
||||
LLVM_DEBUG(dbgs() << "Arg[" << Args.size() << "] Expected: "
|
||||
<< *ParamType << " Got: " << *ArgType << "\n");
|
||||
|
@ -191,11 +191,11 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) {
|
|||
BB->getInstList().push_back(Cast);
|
||||
ReturnInst::Create(M->getContext(), Cast, BB);
|
||||
} else if (RtnType->isStructTy() || ExpectedRtnType->isStructTy()) {
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: struct return type in bitcast: "
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: struct return type in bitcast: "
|
||||
<< F->getName() << "\n");
|
||||
WrapperNeeded = false;
|
||||
} else {
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: return type mismatch calling: "
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: return type mismatch calling: "
|
||||
<< F->getName() << "\n");
|
||||
LLVM_DEBUG(dbgs() << "Expected: " << *ExpectedRtnType
|
||||
<< " Got: " << *RtnType << "\n");
|
||||
|
@ -212,18 +212,18 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) {
|
|||
new UnreachableInst(M->getContext(), BB);
|
||||
Wrapper->setName(F->getName() + "_bitcast_invalid");
|
||||
} else if (!WrapperNeeded) {
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: no wrapper needed: " << F->getName()
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: no wrapper needed: " << F->getName()
|
||||
<< "\n");
|
||||
Wrapper->eraseFromParent();
|
||||
return nullptr;
|
||||
}
|
||||
LLVM_DEBUG(dbgs() << "CreateWrapper: " << F->getName() << "\n");
|
||||
LLVM_DEBUG(dbgs() << "createWrapper: " << F->getName() << "\n");
|
||||
return Wrapper;
|
||||
}
|
||||
|
||||
// Test whether a main function with type FuncTy should be rewritten to have
|
||||
// type MainTy.
|
||||
bool ShouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) {
|
||||
bool shouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) {
|
||||
// Only fix the main function if it's the standard zero-arg form. That way,
|
||||
// the standard cases will work as expected, and users will see signature
|
||||
// mismatches from the linker for non-standard cases.
|
||||
|
@ -242,7 +242,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
|
|||
|
||||
// Collect all the places that need wrappers.
|
||||
for (Function &F : M) {
|
||||
FindUses(&F, F, Uses, ConstantBCs);
|
||||
findUses(&F, F, Uses, ConstantBCs);
|
||||
|
||||
// If we have a "main" function, and its type isn't
|
||||
// "int main(int argc, char *argv[])", create an artificial call with it
|
||||
|
@ -255,7 +255,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
|
|||
PointerType::get(Type::getInt8PtrTy(C), 0)};
|
||||
FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys,
|
||||
/*isVarArg=*/false);
|
||||
if (ShouldFixMainFunction(F.getFunctionType(), MainTy)) {
|
||||
if (shouldFixMainFunction(F.getFunctionType(), MainTy)) {
|
||||
LLVM_DEBUG(dbgs() << "Found `main` function with incorrect type: "
|
||||
<< *F.getFunctionType() << "\n");
|
||||
Value *Args[] = {UndefValue::get(MainArgTys[0]),
|
||||
|
@ -274,8 +274,8 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
|
|||
for (auto &UseFunc : Uses) {
|
||||
Use *U = UseFunc.first;
|
||||
Function *F = UseFunc.second;
|
||||
PointerType *PTy = cast<PointerType>(U->get()->getType());
|
||||
FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType());
|
||||
auto *PTy = cast<PointerType>(U->get()->getType());
|
||||
auto *Ty = dyn_cast<FunctionType>(PTy->getElementType());
|
||||
|
||||
// If the function is casted to something like i8* as a "generic pointer"
|
||||
// to be later casted to something else, we can't generate a wrapper for it.
|
||||
|
@ -285,7 +285,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
|
|||
|
||||
auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
|
||||
if (Pair.second)
|
||||
Pair.first->second = CreateWrapper(F, Ty);
|
||||
Pair.first->second = createWrapper(F, Ty);
|
||||
|
||||
Function *Wrapper = Pair.first->second;
|
||||
if (!Wrapper)
|
||||
|
@ -301,7 +301,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
|
|||
// one that gets called from startup.
|
||||
if (CallMain) {
|
||||
Main->setName("__original_main");
|
||||
Function *MainWrapper =
|
||||
auto *MainWrapper =
|
||||
cast<Function>(CallMain->getCalledValue()->stripPointerCasts());
|
||||
delete CallMain;
|
||||
if (Main->isDeclaration()) {
|
||||
|
|
|
@ -37,9 +37,9 @@ class WebAssemblyDAGToDAGISel final : public SelectionDAGISel {
|
|||
bool ForCodeSize;
|
||||
|
||||
public:
|
||||
WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &tm,
|
||||
WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &TM,
|
||||
CodeGenOpt::Level OptLevel)
|
||||
: SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), ForCodeSize(false) {
|
||||
: SelectionDAGISel(TM, OptLevel), Subtarget(nullptr), ForCodeSize(false) {
|
||||
}
|
||||
|
||||
StringRef getPassName() const override {
|
||||
|
|
|
@ -319,11 +319,11 @@ static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
|
|||
auto &Context = BB->getParent()->getFunction().getContext();
|
||||
Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
|
||||
|
||||
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
||||
const BasicBlock *LLVMBB = BB->getBasicBlock();
|
||||
MachineFunction *F = BB->getParent();
|
||||
MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
|
||||
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
|
||||
MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
|
||||
|
||||
MachineFunction::iterator It = ++BB->getIterator();
|
||||
F->insert(It, FalseMBB);
|
||||
|
@ -573,14 +573,14 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|||
// Lowering Code
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) {
|
||||
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
DAG.getContext()->diagnose(
|
||||
DiagnosticInfoUnsupported(MF.getFunction(), msg, DL.getDebugLoc()));
|
||||
DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
|
||||
}
|
||||
|
||||
// Test whether the given calling convention is supported.
|
||||
static bool CallingConvSupported(CallingConv::ID CallConv) {
|
||||
static bool callingConvSupported(CallingConv::ID CallConv) {
|
||||
// We currently support the language-independent target-independent
|
||||
// conventions. We don't yet have a way to annotate calls with properties like
|
||||
// "cold", and we don't have any call-clobbered registers, so these are mostly
|
||||
|
@ -603,7 +603,7 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
auto Layout = MF.getDataLayout();
|
||||
|
||||
CallingConv::ID CallConv = CLI.CallConv;
|
||||
if (!CallingConvSupported(CallConv))
|
||||
if (!callingConvSupported(CallConv))
|
||||
fail(DL, DAG,
|
||||
"WebAssembly doesn't support language-specific or target-specific "
|
||||
"calling conventions yet");
|
||||
|
@ -625,9 +625,9 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
unsigned NumFixedArgs = 0;
|
||||
for (unsigned i = 0; i < Outs.size(); ++i) {
|
||||
const ISD::OutputArg &Out = Outs[i];
|
||||
SDValue &OutVal = OutVals[i];
|
||||
for (unsigned I = 0; I < Outs.size(); ++I) {
|
||||
const ISD::OutputArg &Out = Outs[I];
|
||||
SDValue &OutVal = OutVals[I];
|
||||
if (Out.Flags.isNest())
|
||||
fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
|
||||
if (Out.Flags.isInAlloca())
|
||||
|
@ -763,7 +763,7 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
|
|||
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
|
||||
SelectionDAG &DAG) const {
|
||||
assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
|
||||
if (!CallingConvSupported(CallConv))
|
||||
if (!callingConvSupported(CallConv))
|
||||
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
|
||||
|
||||
SmallVector<SDValue, 4> RetOps(1, Chain);
|
||||
|
@ -790,7 +790,7 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments(
|
|||
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
|
||||
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
|
||||
if (!CallingConvSupported(CallConv))
|
||||
if (!callingConvSupported(CallConv))
|
||||
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
|
||||
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
|
@ -837,7 +837,7 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments(
|
|||
// Record the number and types of arguments and results.
|
||||
SmallVector<MVT, 4> Params;
|
||||
SmallVector<MVT, 4> Results;
|
||||
ComputeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
|
||||
computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
|
||||
DAG.getTarget(), Params, Results);
|
||||
for (MVT VT : Results)
|
||||
MFI->addResult(VT);
|
||||
|
@ -1054,7 +1054,7 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
|
|||
|
||||
switch (IntNo) {
|
||||
default:
|
||||
return {}; // Don't custom lower most intrinsics.
|
||||
return SDValue(); // Don't custom lower most intrinsics.
|
||||
|
||||
case Intrinsic::wasm_lsda: {
|
||||
EVT VT = Op.getValueType();
|
||||
|
@ -1223,11 +1223,10 @@ WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
|
|||
Ops[OpIdx++] = Op.getOperand(1);
|
||||
|
||||
// Expand mask indices to byte indices and materialize them as operands
|
||||
for (size_t I = 0, Lanes = Mask.size(); I < Lanes; ++I) {
|
||||
for (int M : Mask) {
|
||||
for (size_t J = 0; J < LaneBytes; ++J) {
|
||||
// Lower undefs (represented by -1 in mask) to zero
|
||||
uint64_t ByteIndex =
|
||||
Mask[I] == -1 ? 0 : (uint64_t)Mask[I] * LaneBytes + J;
|
||||
uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
|
||||
Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
|
||||
}
|
||||
}
|
||||
|
@ -1247,7 +1246,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
static SDValue UnrollVectorShift(SDValue Op, SelectionDAG &DAG) {
|
||||
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
|
||||
EVT LaneT = Op.getSimpleValueType().getVectorElementType();
|
||||
// 32-bit and 64-bit unrolled shifts will have proper semantics
|
||||
if (LaneT.bitsGE(MVT::i32))
|
||||
|
@ -1282,17 +1281,17 @@ SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
|
|||
// Expand all vector shifts until V8 fixes its implementation
|
||||
// TODO: remove this once V8 is fixed
|
||||
if (!Subtarget->hasUnimplementedSIMD128())
|
||||
return UnrollVectorShift(Op, DAG);
|
||||
return unrollVectorShift(Op, DAG);
|
||||
|
||||
// Unroll non-splat vector shifts
|
||||
BuildVectorSDNode *ShiftVec;
|
||||
SDValue SplatVal;
|
||||
if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
|
||||
!(SplatVal = ShiftVec->getSplatValue()))
|
||||
return UnrollVectorShift(Op, DAG);
|
||||
return unrollVectorShift(Op, DAG);
|
||||
|
||||
// All splats except i64x2 const splats are handled by patterns
|
||||
ConstantSDNode *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
|
||||
auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
|
||||
if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
|
||||
return Op;
|
||||
|
||||
|
|
|
@ -239,16 +239,16 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
|
|||
bool EnableEH; // Enable exception handling
|
||||
bool EnableSjLj; // Enable setjmp/longjmp handling
|
||||
|
||||
GlobalVariable *ThrewGV;
|
||||
GlobalVariable *ThrewValueGV;
|
||||
Function *GetTempRet0Func;
|
||||
Function *SetTempRet0Func;
|
||||
Function *ResumeF;
|
||||
Function *EHTypeIDF;
|
||||
Function *EmLongjmpF;
|
||||
Function *EmLongjmpJmpbufF;
|
||||
Function *SaveSetjmpF;
|
||||
Function *TestSetjmpF;
|
||||
GlobalVariable *ThrewGV = nullptr;
|
||||
GlobalVariable *ThrewValueGV = nullptr;
|
||||
Function *GetTempRet0Func = nullptr;
|
||||
Function *SetTempRet0Func = nullptr;
|
||||
Function *ResumeF = nullptr;
|
||||
Function *EHTypeIDF = nullptr;
|
||||
Function *EmLongjmpF = nullptr;
|
||||
Function *EmLongjmpJmpbufF = nullptr;
|
||||
Function *SaveSetjmpF = nullptr;
|
||||
Function *TestSetjmpF = nullptr;
|
||||
|
||||
// __cxa_find_matching_catch_N functions.
|
||||
// Indexed by the number of clauses in an original landingpad instruction.
|
||||
|
@ -281,11 +281,7 @@ public:
|
|||
static char ID;
|
||||
|
||||
WebAssemblyLowerEmscriptenEHSjLj(bool EnableEH = true, bool EnableSjLj = true)
|
||||
: ModulePass(ID), EnableEH(EnableEH), EnableSjLj(EnableSjLj),
|
||||
ThrewGV(nullptr), ThrewValueGV(nullptr), GetTempRet0Func(nullptr),
|
||||
SetTempRet0Func(nullptr), ResumeF(nullptr), EHTypeIDF(nullptr),
|
||||
EmLongjmpF(nullptr), EmLongjmpJmpbufF(nullptr), SaveSetjmpF(nullptr),
|
||||
TestSetjmpF(nullptr) {
|
||||
: ModulePass(ID), EnableEH(EnableEH), EnableSjLj(EnableSjLj) {
|
||||
EHWhitelistSet.insert(EHWhitelist.begin(), EHWhitelist.end());
|
||||
}
|
||||
bool runOnModule(Module &M) override;
|
||||
|
@ -432,8 +428,8 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
|
|||
// No attributes for the callee pointer.
|
||||
ArgAttributes.push_back(AttributeSet());
|
||||
// Copy the argument attributes from the original
|
||||
for (unsigned i = 0, e = CI->getNumArgOperands(); i < e; ++i)
|
||||
ArgAttributes.push_back(InvokeAL.getParamAttributes(i));
|
||||
for (unsigned I = 0, E = CI->getNumArgOperands(); I < E; ++I)
|
||||
ArgAttributes.push_back(InvokeAL.getParamAttributes(I));
|
||||
|
||||
// Reconstruct the AttributesList based on the vector we constructed.
|
||||
AttributeList NewCallAL =
|
||||
|
@ -606,11 +602,11 @@ void WebAssemblyLowerEmscriptenEHSjLj::rebuildSSA(Function &F) {
|
|||
++UI;
|
||||
SSA.Initialize(I.getType(), I.getName());
|
||||
SSA.AddAvailableValue(&BB, &I);
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
auto *User = cast<Instruction>(U.getUser());
|
||||
if (User->getParent() == &BB)
|
||||
continue;
|
||||
|
||||
if (PHINode *UserPN = dyn_cast<PHINode>(User))
|
||||
if (auto *UserPN = dyn_cast<PHINode>(User))
|
||||
if (UserPN->getIncomingBlock(U) == &BB)
|
||||
continue;
|
||||
|
||||
|
@ -837,15 +833,15 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
|
|||
for (LandingPadInst *LPI : LandingPads) {
|
||||
IRB.SetInsertPoint(LPI);
|
||||
SmallVector<Value *, 16> FMCArgs;
|
||||
for (unsigned i = 0, e = LPI->getNumClauses(); i < e; ++i) {
|
||||
Constant *Clause = LPI->getClause(i);
|
||||
for (unsigned I = 0, E = LPI->getNumClauses(); I < E; ++I) {
|
||||
Constant *Clause = LPI->getClause(I);
|
||||
// As a temporary workaround for the lack of aggregate varargs support
|
||||
// in the interface between JS and wasm, break out filter operands into
|
||||
// their component elements.
|
||||
if (LPI->isFilter(i)) {
|
||||
if (LPI->isFilter(I)) {
|
||||
auto *ATy = cast<ArrayType>(Clause->getType());
|
||||
for (unsigned j = 0, e = ATy->getNumElements(); j < e; ++j) {
|
||||
Value *EV = IRB.CreateExtractValue(Clause, makeArrayRef(j), "filter");
|
||||
for (unsigned J = 0, E = ATy->getNumElements(); J < E; ++J) {
|
||||
Value *EV = IRB.CreateExtractValue(Clause, makeArrayRef(J), "filter");
|
||||
FMCArgs.push_back(EV);
|
||||
}
|
||||
} else
|
||||
|
@ -955,8 +951,8 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
|
|||
BBs.push_back(&BB);
|
||||
|
||||
// BBs.size() will change within the loop, so we query it every time
|
||||
for (unsigned i = 0; i < BBs.size(); i++) {
|
||||
BasicBlock *BB = BBs[i];
|
||||
for (unsigned I = 0; I < BBs.size(); I++) {
|
||||
BasicBlock *BB = BBs[I];
|
||||
for (Instruction &I : *BB) {
|
||||
assert(!isa<InvokeInst>(&I));
|
||||
auto *CI = dyn_cast<CallInst>(&I);
|
||||
|
@ -1029,9 +1025,9 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
|
|||
// switch case). 0 means a longjmp that is not ours to handle, needs a
|
||||
// rethrow. Otherwise the index is the same as the index in P+1 (to avoid
|
||||
// 0).
|
||||
for (unsigned i = 0; i < SetjmpRetPHIs.size(); i++) {
|
||||
SI->addCase(IRB.getInt32(i + 1), SetjmpRetPHIs[i]->getParent());
|
||||
SetjmpRetPHIs[i]->addIncoming(LongjmpResult, EndBB);
|
||||
for (unsigned I = 0; I < SetjmpRetPHIs.size(); I++) {
|
||||
SI->addCase(IRB.getInt32(I + 1), SetjmpRetPHIs[I]->getParent());
|
||||
SetjmpRetPHIs[I]->addIncoming(LongjmpResult, EndBB);
|
||||
}
|
||||
|
||||
// We are splitting the block here, and must continue to find other calls
|
||||
|
@ -1078,7 +1074,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
|
|||
Use &U = *UI;
|
||||
// Increment the iterator before removing the use from the list.
|
||||
++UI;
|
||||
if (Instruction *I = dyn_cast<Instruction>(U.getUser()))
|
||||
if (auto *I = dyn_cast<Instruction>(U.getUser()))
|
||||
if (I->getParent() != &EntryBB)
|
||||
SetjmpTableSSA.RewriteUse(U);
|
||||
}
|
||||
|
@ -1086,7 +1082,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
|
|||
UI != UE;) {
|
||||
Use &U = *UI;
|
||||
++UI;
|
||||
if (Instruction *I = dyn_cast<Instruction>(U.getUser()))
|
||||
if (auto *I = dyn_cast<Instruction>(U.getUser()))
|
||||
if (I->getParent() != &EntryBB)
|
||||
SetjmpTableSizeSSA.RewriteUse(U);
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ bool LowerGlobalDtors::runOnModule(Module &M) {
|
|||
return false;
|
||||
|
||||
// Sanity-check @llvm.global_dtor's type.
|
||||
StructType *ETy = dyn_cast<StructType>(InitList->getType()->getElementType());
|
||||
auto *ETy = dyn_cast<StructType>(InitList->getType()->getElementType());
|
||||
if (!ETy || ETy->getNumElements() != 3 ||
|
||||
!ETy->getTypeAtIndex(0U)->isIntegerTy() ||
|
||||
!ETy->getTypeAtIndex(1U)->isPointerTy() ||
|
||||
|
@ -80,11 +80,11 @@ bool LowerGlobalDtors::runOnModule(Module &M) {
|
|||
// associated symbol.
|
||||
std::map<uint16_t, MapVector<Constant *, std::vector<Constant *>>> DtorFuncs;
|
||||
for (Value *O : InitList->operands()) {
|
||||
ConstantStruct *CS = dyn_cast<ConstantStruct>(O);
|
||||
auto *CS = dyn_cast<ConstantStruct>(O);
|
||||
if (!CS)
|
||||
continue; // Malformed.
|
||||
|
||||
ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
|
||||
auto *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
|
||||
if (!Priority)
|
||||
continue; // Malformed.
|
||||
uint16_t PriorityValue = Priority->getLimitedValue(UINT16_MAX);
|
||||
|
|
|
@ -47,7 +47,7 @@ static void removeRegisterOperands(const MachineInstr *MI, MCInst &OutMI);
|
|||
MCSymbol *
|
||||
WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
|
||||
const GlobalValue *Global = MO.getGlobal();
|
||||
MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global));
|
||||
auto *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global));
|
||||
|
||||
if (const auto *FuncTy = dyn_cast<FunctionType>(Global->getValueType())) {
|
||||
const MachineFunction &MF = *MO.getParent()->getParent()->getParent();
|
||||
|
@ -56,9 +56,9 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
|
|||
|
||||
SmallVector<MVT, 1> ResultMVTs;
|
||||
SmallVector<MVT, 4> ParamMVTs;
|
||||
ComputeSignatureVTs(FuncTy, CurrentFunc, TM, ParamMVTs, ResultMVTs);
|
||||
computeSignatureVTs(FuncTy, CurrentFunc, TM, ParamMVTs, ResultMVTs);
|
||||
|
||||
auto Signature = SignatureFromMVTs(ResultMVTs, ParamMVTs);
|
||||
auto Signature = signatureFromMVTs(ResultMVTs, ParamMVTs);
|
||||
WasmSym->setSignature(Signature.get());
|
||||
Printer.addSignature(std::move(Signature));
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
|
||||
|
@ -70,8 +70,7 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
|
|||
MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol(
|
||||
const MachineOperand &MO) const {
|
||||
const char *Name = MO.getSymbolName();
|
||||
MCSymbolWasm *WasmSym =
|
||||
cast<MCSymbolWasm>(Printer.GetExternalSymbolSymbol(Name));
|
||||
auto *WasmSym = cast<MCSymbolWasm>(Printer.GetExternalSymbolSymbol(Name));
|
||||
const WebAssemblySubtarget &Subtarget = Printer.getSubtarget();
|
||||
|
||||
// Except for the two exceptions (__stack_pointer and __cpp_exception), all
|
||||
|
@ -109,7 +108,7 @@ MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol(
|
|||
: wasm::ValType::I32);
|
||||
} else { // Function symbols
|
||||
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
|
||||
GetLibcallSignature(Subtarget, Name, Returns, Params);
|
||||
getLibcallSignature(Subtarget, Name, Returns, Params);
|
||||
}
|
||||
auto Signature =
|
||||
make_unique<wasm::WasmSignature>(std::move(Returns), std::move(Params));
|
||||
|
@ -119,7 +118,7 @@ MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol(
|
|||
return WasmSym;
|
||||
}
|
||||
|
||||
MCOperand WebAssemblyMCInstLower::LowerSymbolOperand(MCSymbol *Sym,
|
||||
MCOperand WebAssemblyMCInstLower::lowerSymbolOperand(MCSymbol *Sym,
|
||||
int64_t Offset,
|
||||
bool IsFunc, bool IsGlob,
|
||||
bool IsEvent) const {
|
||||
|
@ -160,13 +159,13 @@ static wasm::ValType getType(const TargetRegisterClass *RC) {
|
|||
llvm_unreachable("Unexpected register class");
|
||||
}
|
||||
|
||||
void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
||||
void WebAssemblyMCInstLower::lower(const MachineInstr *MI,
|
||||
MCInst &OutMI) const {
|
||||
OutMI.setOpcode(MI->getOpcode());
|
||||
|
||||
const MCInstrDesc &Desc = MI->getDesc();
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
|
||||
const MachineOperand &MO = MI->getOperand(I);
|
||||
|
||||
MCOperand MCOp;
|
||||
switch (MO.getType()) {
|
||||
|
@ -187,8 +186,8 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
|||
break;
|
||||
}
|
||||
case MachineOperand::MO_Immediate:
|
||||
if (i < Desc.NumOperands) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[i];
|
||||
if (I < Desc.NumOperands) {
|
||||
const MCOperandInfo &Info = Desc.OpInfo[I];
|
||||
if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
|
||||
MCSymbol *Sym = Printer.createTempSymbol("typeindex");
|
||||
|
||||
|
@ -208,7 +207,7 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
|||
if (WebAssembly::isCallIndirect(*MI))
|
||||
Params.pop_back();
|
||||
|
||||
MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym);
|
||||
auto *WasmSym = cast<MCSymbolWasm>(Sym);
|
||||
auto Signature = make_unique<wasm::WasmSignature>(std::move(Returns),
|
||||
std::move(Params));
|
||||
WasmSym->setSignature(Signature.get());
|
||||
|
@ -238,7 +237,7 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
|||
case MachineOperand::MO_GlobalAddress:
|
||||
assert(MO.getTargetFlags() == WebAssemblyII::MO_NO_FLAG &&
|
||||
"WebAssembly does not use target flags on GlobalAddresses");
|
||||
MCOp = LowerSymbolOperand(GetGlobalAddressSymbol(MO), MO.getOffset(),
|
||||
MCOp = lowerSymbolOperand(GetGlobalAddressSymbol(MO), MO.getOffset(),
|
||||
MO.getGlobal()->getValueType()->isFunctionTy(),
|
||||
false, false);
|
||||
break;
|
||||
|
@ -247,7 +246,7 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
|||
// variable or a function.
|
||||
assert((MO.getTargetFlags() & ~WebAssemblyII::MO_SYMBOL_MASK) == 0 &&
|
||||
"WebAssembly uses only symbol flags on ExternalSymbols");
|
||||
MCOp = LowerSymbolOperand(
|
||||
MCOp = lowerSymbolOperand(
|
||||
GetExternalSymbolSymbol(MO), /*Offset=*/0,
|
||||
(MO.getTargetFlags() & WebAssemblyII::MO_SYMBOL_FUNCTION) != 0,
|
||||
(MO.getTargetFlags() & WebAssemblyII::MO_SYMBOL_GLOBAL) != 0,
|
||||
|
@ -258,7 +257,7 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
|
|||
// because global addresses or other external symbols are handled above.
|
||||
assert(MO.getTargetFlags() == 0 &&
|
||||
"WebAssembly does not use target flags on MCSymbol");
|
||||
MCOp = LowerSymbolOperand(MO.getMCSymbol(), /*Offset=*/0, false, false,
|
||||
MCOp = lowerSymbolOperand(MO.getMCSymbol(), /*Offset=*/0, false, false,
|
||||
false);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -32,13 +32,13 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyMCInstLower {
|
|||
|
||||
MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
|
||||
MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
|
||||
MCOperand LowerSymbolOperand(MCSymbol *Sym, int64_t Offset, bool IsFunc,
|
||||
MCOperand lowerSymbolOperand(MCSymbol *Sym, int64_t Offset, bool IsFunc,
|
||||
bool IsGlob, bool IsEvent) const;
|
||||
|
||||
public:
|
||||
WebAssemblyMCInstLower(MCContext &ctx, WebAssemblyAsmPrinter &printer)
|
||||
: Ctx(ctx), Printer(printer) {}
|
||||
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
void lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
};
|
||||
} // end namespace llvm
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "llvm/CodeGen/Analysis.h"
|
||||
using namespace llvm;
|
||||
|
||||
WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() {}
|
||||
WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() = default; // anchor.
|
||||
|
||||
void WebAssemblyFunctionInfo::initWARegs() {
|
||||
assert(WARegs.empty());
|
||||
|
@ -26,7 +26,7 @@ void WebAssemblyFunctionInfo::initWARegs() {
|
|||
WARegs.resize(MF.getRegInfo().getNumVirtRegs(), Reg);
|
||||
}
|
||||
|
||||
void llvm::ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
|
||||
void llvm::computeLegalValueVTs(const Function &F, const TargetMachine &TM,
|
||||
Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
|
||||
const DataLayout &DL(F.getParent()->getDataLayout());
|
||||
const WebAssemblyTargetLowering &TLI =
|
||||
|
@ -37,16 +37,16 @@ void llvm::ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
|
|||
for (EVT VT : VTs) {
|
||||
unsigned NumRegs = TLI.getNumRegisters(F.getContext(), VT);
|
||||
MVT RegisterVT = TLI.getRegisterType(F.getContext(), VT);
|
||||
for (unsigned i = 0; i != NumRegs; ++i)
|
||||
for (unsigned I = 0; I != NumRegs; ++I)
|
||||
ValueVTs.push_back(RegisterVT);
|
||||
}
|
||||
}
|
||||
|
||||
void llvm::ComputeSignatureVTs(const FunctionType *Ty, const Function &F,
|
||||
void llvm::computeSignatureVTs(const FunctionType *Ty, const Function &F,
|
||||
const TargetMachine &TM,
|
||||
SmallVectorImpl<MVT> &Params,
|
||||
SmallVectorImpl<MVT> &Results) {
|
||||
ComputeLegalValueVTs(F, TM, Ty->getReturnType(), Results);
|
||||
computeLegalValueVTs(F, TM, Ty->getReturnType(), Results);
|
||||
|
||||
MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
|
||||
if (Results.size() > 1) {
|
||||
|
@ -58,22 +58,22 @@ void llvm::ComputeSignatureVTs(const FunctionType *Ty, const Function &F,
|
|||
}
|
||||
|
||||
for (auto *Param : Ty->params())
|
||||
ComputeLegalValueVTs(F, TM, Param, Params);
|
||||
computeLegalValueVTs(F, TM, Param, Params);
|
||||
if (Ty->isVarArg())
|
||||
Params.push_back(PtrVT);
|
||||
}
|
||||
|
||||
void llvm::ValTypesFromMVTs(const ArrayRef<MVT> &In,
|
||||
void llvm::valTypesFromMVTs(const ArrayRef<MVT> &In,
|
||||
SmallVectorImpl<wasm::ValType> &Out) {
|
||||
for (MVT Ty : In)
|
||||
Out.push_back(WebAssembly::toValType(Ty));
|
||||
}
|
||||
|
||||
std::unique_ptr<wasm::WasmSignature>
|
||||
llvm::SignatureFromMVTs(const SmallVectorImpl<MVT> &Results,
|
||||
llvm::signatureFromMVTs(const SmallVectorImpl<MVT> &Results,
|
||||
const SmallVectorImpl<MVT> &Params) {
|
||||
auto Sig = make_unique<wasm::WasmSignature>();
|
||||
ValTypesFromMVTs(Results, Sig->Returns);
|
||||
ValTypesFromMVTs(Params, Sig->Params);
|
||||
valTypesFromMVTs(Results, Sig->Returns);
|
||||
valTypesFromMVTs(Params, Sig->Params);
|
||||
return Sig;
|
||||
}
|
||||
|
|
|
@ -119,20 +119,20 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
void ComputeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty,
|
||||
void computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty,
|
||||
SmallVectorImpl<MVT> &ValueVTs);
|
||||
|
||||
// Compute the signature for a given FunctionType (Ty). Note that it's not the
|
||||
// signature for F (F is just used to get varous context)
|
||||
void ComputeSignatureVTs(const FunctionType *Ty, const Function &F,
|
||||
void computeSignatureVTs(const FunctionType *Ty, const Function &F,
|
||||
const TargetMachine &TM, SmallVectorImpl<MVT> &Params,
|
||||
SmallVectorImpl<MVT> &Results);
|
||||
|
||||
void ValTypesFromMVTs(const ArrayRef<MVT> &In,
|
||||
void valTypesFromMVTs(const ArrayRef<MVT> &In,
|
||||
SmallVectorImpl<wasm::ValType> &Out);
|
||||
|
||||
std::unique_ptr<wasm::WasmSignature>
|
||||
SignatureFromMVTs(const SmallVectorImpl<MVT> &Results,
|
||||
signatureFromMVTs(const SmallVectorImpl<MVT> &Results,
|
||||
const SmallVectorImpl<MVT> &Params);
|
||||
|
||||
} // end namespace llvm
|
||||
|
|
|
@ -81,7 +81,7 @@ FunctionPass *llvm::createWebAssemblyMemIntrinsicResults() {
|
|||
}
|
||||
|
||||
// Replace uses of FromReg with ToReg if they are dominated by MI.
|
||||
static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
|
||||
static bool replaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
|
||||
unsigned FromReg, unsigned ToReg,
|
||||
const MachineRegisterInfo &MRI,
|
||||
MachineDominatorTree &MDT,
|
||||
|
@ -156,10 +156,10 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
|
|||
return false;
|
||||
|
||||
StringRef Name(Op1.getSymbolName());
|
||||
bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
|
||||
bool CallReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
|
||||
Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
|
||||
Name == TLI.getLibcallName(RTLIB::MEMSET);
|
||||
if (!callReturnsInput)
|
||||
if (!CallReturnsInput)
|
||||
return false;
|
||||
|
||||
LibFunc Func;
|
||||
|
@ -171,7 +171,7 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
|
|||
if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
|
||||
report_fatal_error("Memory Intrinsic results: call to builtin function "
|
||||
"with wrong signature, from/to mismatch");
|
||||
return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
|
||||
return replaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
|
||||
}
|
||||
|
||||
bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) {
|
||||
|
@ -181,11 +181,11 @@ bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) {
|
|||
});
|
||||
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
|
||||
auto &MDT = getAnalysis<MachineDominatorTree>();
|
||||
const WebAssemblyTargetLowering &TLI =
|
||||
*MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
|
||||
const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
|
||||
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
|
||||
auto &LIS = getAnalysis<LiveIntervals>();
|
||||
bool Changed = false;
|
||||
|
||||
// We don't preserve SSA form.
|
||||
|
|
|
@ -71,7 +71,7 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
|
|||
<< MF.getName() << '\n');
|
||||
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
|
||||
auto &LIS = getAnalysis<LiveIntervals>();
|
||||
|
||||
// We don't preserve SSA form.
|
||||
MRI.leaveSSA();
|
||||
|
@ -80,8 +80,8 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
|
|||
|
||||
// Split multiple-VN LiveIntervals into multiple LiveIntervals.
|
||||
SmallVector<LiveInterval *, 4> SplitLIs;
|
||||
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
|
||||
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
|
||||
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
|
||||
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
|
||||
if (MRI.reg_nodbg_empty(Reg))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -36,11 +36,11 @@ class OptimizeReturned final : public FunctionPass,
|
|||
|
||||
bool runOnFunction(Function &F) override;
|
||||
|
||||
DominatorTree *DT;
|
||||
DominatorTree *DT = nullptr;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
OptimizeReturned() : FunctionPass(ID), DT(nullptr) {}
|
||||
OptimizeReturned() : FunctionPass(ID) {}
|
||||
|
||||
void visitCallSite(CallSite CS);
|
||||
};
|
||||
|
@ -56,10 +56,10 @@ FunctionPass *llvm::createWebAssemblyOptimizeReturned() {
|
|||
}
|
||||
|
||||
void OptimizeReturned::visitCallSite(CallSite CS) {
|
||||
for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i)
|
||||
if (CS.paramHasAttr(i, Attribute::Returned)) {
|
||||
for (unsigned I = 0, E = CS.getNumArgOperands(); I < E; ++I)
|
||||
if (CS.paramHasAttr(I, Attribute::Returned)) {
|
||||
Instruction *Inst = CS.getInstruction();
|
||||
Value *Arg = CS.getArgOperand(i);
|
||||
Value *Arg = CS.getArgOperand(I);
|
||||
// Ignore constants, globals, undef, etc.
|
||||
if (isa<Constant>(Arg))
|
||||
continue;
|
||||
|
|
|
@ -57,7 +57,7 @@ FunctionPass *llvm::createWebAssemblyPeephole() {
|
|||
}
|
||||
|
||||
/// If desirable, rewrite NewReg to a drop register.
|
||||
static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
|
||||
static bool maybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
|
||||
MachineOperand &MO, WebAssemblyFunctionInfo &MFI,
|
||||
MachineRegisterInfo &MRI) {
|
||||
bool Changed = false;
|
||||
|
@ -71,7 +71,7 @@ static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
|
|||
return Changed;
|
||||
}
|
||||
|
||||
static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
|
||||
static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
|
||||
const MachineFunction &MF,
|
||||
WebAssemblyFunctionInfo &MFI,
|
||||
MachineRegisterInfo &MRI,
|
||||
|
@ -149,7 +149,7 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
|
|||
if (MRI.getRegClass(NewReg) != MRI.getRegClass(OldReg))
|
||||
report_fatal_error("Peephole: call to builtin function with "
|
||||
"wrong signature, from/to mismatch");
|
||||
Changed |= MaybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
|
||||
Changed |= maybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -157,57 +157,57 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
|
|||
}
|
||||
// Optimize away an explicit void return at the end of the function.
|
||||
case WebAssembly::RETURN_I32:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I32,
|
||||
WebAssembly::COPY_I32);
|
||||
break;
|
||||
case WebAssembly::RETURN_I64:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I64,
|
||||
WebAssembly::COPY_I64);
|
||||
break;
|
||||
case WebAssembly::RETURN_F32:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F32,
|
||||
WebAssembly::COPY_F32);
|
||||
break;
|
||||
case WebAssembly::RETURN_F64:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F64,
|
||||
WebAssembly::COPY_F64);
|
||||
break;
|
||||
case WebAssembly::RETURN_v16i8:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v16i8,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_v8i16:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v8i16,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_v4i32:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4i32,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_v2i64:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2i64,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_v4f32:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4f32,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_v2f64:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2f64,
|
||||
WebAssembly::COPY_V128);
|
||||
break;
|
||||
case WebAssembly::RETURN_VOID:
|
||||
Changed |= MaybeRewriteToFallthrough(
|
||||
Changed |= maybeRewriteToFallthrough(
|
||||
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_VOID,
|
||||
WebAssembly::INSTRUCTION_LIST_END);
|
||||
break;
|
||||
|
|
|
@ -62,7 +62,7 @@ FunctionPass *llvm::createWebAssemblyPrepareForLiveIntervals() {
|
|||
}
|
||||
|
||||
// Test whether the given register has an ARGUMENT def.
|
||||
static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
|
||||
static bool hasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
|
||||
for (const auto &Def : MRI.def_instructions(Reg))
|
||||
if (WebAssembly::isArgument(Def))
|
||||
return true;
|
||||
|
@ -94,15 +94,15 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(
|
|||
//
|
||||
// TODO: This is fairly heavy-handed; find a better approach.
|
||||
//
|
||||
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
|
||||
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
|
||||
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
|
||||
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
|
||||
|
||||
// Skip unused registers.
|
||||
if (MRI.use_nodbg_empty(Reg))
|
||||
continue;
|
||||
|
||||
// Skip registers that have an ARGUMENT definition.
|
||||
if (HasArgumentDef(Reg, MRI))
|
||||
if (hasArgumentDef(Reg, MRI))
|
||||
continue;
|
||||
|
||||
BuildMI(Entry, Entry.begin(), DebugLoc(),
|
||||
|
|
|
@ -65,11 +65,11 @@ FunctionPass *llvm::createWebAssemblyRegColoring() {
|
|||
static float computeWeight(const MachineRegisterInfo *MRI,
|
||||
const MachineBlockFrequencyInfo *MBFI,
|
||||
unsigned VReg) {
|
||||
float weight = 0.0f;
|
||||
float Weight = 0.0f;
|
||||
for (MachineOperand &MO : MRI->reg_nodbg_operands(VReg))
|
||||
weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI,
|
||||
Weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI,
|
||||
*MO.getParent());
|
||||
return weight;
|
||||
return Weight;
|
||||
}
|
||||
|
||||
bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
|
||||
|
@ -97,8 +97,8 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
|
|||
SortedIntervals.reserve(NumVRegs);
|
||||
|
||||
LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
|
||||
for (unsigned i = 0; i < NumVRegs; ++i) {
|
||||
unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
|
||||
for (unsigned I = 0; I < NumVRegs; ++I) {
|
||||
unsigned VReg = TargetRegisterInfo::index2VirtReg(I);
|
||||
if (MFI.isVRegStackified(VReg))
|
||||
continue;
|
||||
// Skip unused registers, which can use $drop.
|
||||
|
@ -133,10 +133,10 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
|
|||
SortedIntervals.size());
|
||||
BitVector UsedColors(SortedIntervals.size());
|
||||
bool Changed = false;
|
||||
for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) {
|
||||
LiveInterval *LI = SortedIntervals[i];
|
||||
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
|
||||
LiveInterval *LI = SortedIntervals[I];
|
||||
unsigned Old = LI->reg;
|
||||
size_t Color = i;
|
||||
size_t Color = I;
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(Old);
|
||||
|
||||
// Check if it's possible to reuse any of the used colors.
|
||||
|
@ -153,7 +153,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
|
|||
}
|
||||
|
||||
unsigned New = SortedIntervals[Color]->reg;
|
||||
SlotMapping[i] = New;
|
||||
SlotMapping[I] = New;
|
||||
Changed |= Old != New;
|
||||
UsedColors.set(Color);
|
||||
Assignments[Color].push_back(LI);
|
||||
|
@ -165,9 +165,9 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
|
|||
return false;
|
||||
|
||||
// Rewrite register operands.
|
||||
for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) {
|
||||
unsigned Old = SortedIntervals[i]->reg;
|
||||
unsigned New = SlotMapping[i];
|
||||
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
|
||||
unsigned Old = SortedIntervals[I]->reg;
|
||||
unsigned New = SlotMapping[I];
|
||||
if (Old != New)
|
||||
MRI->replaceRegWith(Old, New);
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ FunctionPass *llvm::createWebAssemblyRegStackify() {
|
|||
// Decorate the given instruction with implicit operands that enforce the
|
||||
// expression stack ordering constraints for an instruction which is on
|
||||
// the expression stack.
|
||||
static void ImposeStackOrdering(MachineInstr *MI) {
|
||||
static void imposeStackOrdering(MachineInstr *MI) {
|
||||
// Write the opaque VALUE_STACK register.
|
||||
if (!MI->definesRegister(WebAssembly::VALUE_STACK))
|
||||
MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
|
||||
|
@ -95,7 +95,7 @@ static void ImposeStackOrdering(MachineInstr *MI) {
|
|||
|
||||
// Convert an IMPLICIT_DEF instruction into an instruction which defines
|
||||
// a constant zero value.
|
||||
static void ConvertImplicitDefToConstZero(MachineInstr *MI,
|
||||
static void convertImplicitDefToConstZero(MachineInstr *MI,
|
||||
MachineRegisterInfo &MRI,
|
||||
const TargetInstrInfo *TII,
|
||||
MachineFunction &MF,
|
||||
|
@ -111,12 +111,12 @@ static void ConvertImplicitDefToConstZero(MachineInstr *MI,
|
|||
MI->addOperand(MachineOperand::CreateImm(0));
|
||||
} else if (RegClass == &WebAssembly::F32RegClass) {
|
||||
MI->setDesc(TII->get(WebAssembly::CONST_F32));
|
||||
ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue(
|
||||
auto *Val = cast<ConstantFP>(Constant::getNullValue(
|
||||
Type::getFloatTy(MF.getFunction().getContext())));
|
||||
MI->addOperand(MachineOperand::CreateFPImm(Val));
|
||||
} else if (RegClass == &WebAssembly::F64RegClass) {
|
||||
MI->setDesc(TII->get(WebAssembly::CONST_F64));
|
||||
ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue(
|
||||
auto *Val = cast<ConstantFP>(Constant::getNullValue(
|
||||
Type::getDoubleTy(MF.getFunction().getContext())));
|
||||
MI->addOperand(MachineOperand::CreateFPImm(Val));
|
||||
} else if (RegClass == &WebAssembly::V128RegClass) {
|
||||
|
@ -135,7 +135,7 @@ static void ConvertImplicitDefToConstZero(MachineInstr *MI,
|
|||
// Determine whether a call to the callee referenced by
|
||||
// MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side
|
||||
// effects.
|
||||
static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
|
||||
static void queryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
|
||||
bool &Write, bool &Effects, bool &StackPointer) {
|
||||
// All calls can use the stack pointer.
|
||||
StackPointer = true;
|
||||
|
@ -143,11 +143,11 @@ static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
|
|||
const MachineOperand &MO = MI.getOperand(CalleeOpNo);
|
||||
if (MO.isGlobal()) {
|
||||
const Constant *GV = MO.getGlobal();
|
||||
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
|
||||
if (const auto *GA = dyn_cast<GlobalAlias>(GV))
|
||||
if (!GA->isInterposable())
|
||||
GV = GA->getAliasee();
|
||||
|
||||
if (const Function *F = dyn_cast<Function>(GV)) {
|
||||
if (const auto *F = dyn_cast<Function>(GV)) {
|
||||
if (!F->doesNotThrow())
|
||||
Effects = true;
|
||||
if (F->doesNotAccessMemory())
|
||||
|
@ -167,7 +167,7 @@ static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
|
|||
|
||||
// Determine whether MI reads memory, writes memory, has side effects,
|
||||
// and/or uses the stack pointer value.
|
||||
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read,
|
||||
static void query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read,
|
||||
bool &Write, bool &Effects, bool &StackPointer) {
|
||||
assert(!MI.isTerminator());
|
||||
|
||||
|
@ -253,12 +253,12 @@ static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read,
|
|||
// Analyze calls.
|
||||
if (MI.isCall()) {
|
||||
unsigned CalleeOpNo = WebAssembly::getCalleeOpNo(MI);
|
||||
QueryCallee(MI, CalleeOpNo, Read, Write, Effects, StackPointer);
|
||||
queryCallee(MI, CalleeOpNo, Read, Write, Effects, StackPointer);
|
||||
}
|
||||
}
|
||||
|
||||
// Test whether Def is safe and profitable to rematerialize.
|
||||
static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
|
||||
static bool shouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
|
||||
const WebAssemblyInstrInfo *TII) {
|
||||
return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def, &AA);
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
|
|||
// Identify the definition for this register at this point. This is a
|
||||
// generalization of MachineRegisterInfo::getUniqueVRegDef that uses
|
||||
// LiveIntervals to handle complex cases.
|
||||
static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert,
|
||||
static MachineInstr *getVRegDef(unsigned Reg, const MachineInstr *Insert,
|
||||
const MachineRegisterInfo &MRI,
|
||||
const LiveIntervals &LIS) {
|
||||
// Most registers are in SSA form here so we try a quick MRI query first.
|
||||
|
@ -284,7 +284,7 @@ static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert,
|
|||
// Test whether Reg, as defined at Def, has exactly one use. This is a
|
||||
// generalization of MachineRegisterInfo::hasOneUse that uses LiveIntervals
|
||||
// to handle complex cases.
|
||||
static bool HasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI,
|
||||
static bool hasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI,
|
||||
MachineDominatorTree &MDT, LiveIntervals &LIS) {
|
||||
// Most registers are in SSA form here so we try a quick MRI query first.
|
||||
if (MRI.hasOneUse(Reg))
|
||||
|
@ -313,7 +313,7 @@ static bool HasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI,
|
|||
// walking the block.
|
||||
// TODO: Compute memory dependencies in a way that uses AliasAnalysis to be
|
||||
// more precise.
|
||||
static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
|
||||
static bool isSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
|
||||
AliasAnalysis &AA, const MachineRegisterInfo &MRI) {
|
||||
assert(Def->getParent() == Insert->getParent());
|
||||
|
||||
|
@ -361,7 +361,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
|
|||
}
|
||||
|
||||
bool Read = false, Write = false, Effects = false, StackPointer = false;
|
||||
Query(*Def, AA, Read, Write, Effects, StackPointer);
|
||||
query(*Def, AA, Read, Write, Effects, StackPointer);
|
||||
|
||||
// If the instruction does not access memory and has no side effects, it has
|
||||
// no additional dependencies.
|
||||
|
@ -376,7 +376,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
|
|||
bool InterveningWrite = false;
|
||||
bool InterveningEffects = false;
|
||||
bool InterveningStackPointer = false;
|
||||
Query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects,
|
||||
query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects,
|
||||
InterveningStackPointer);
|
||||
if (Effects && InterveningEffects)
|
||||
return false;
|
||||
|
@ -397,7 +397,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
|
|||
}
|
||||
|
||||
/// Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
|
||||
static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
|
||||
static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
|
||||
const MachineBasicBlock &MBB,
|
||||
const MachineRegisterInfo &MRI,
|
||||
const MachineDominatorTree &MDT,
|
||||
|
@ -456,7 +456,7 @@ static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
|
|||
}
|
||||
|
||||
/// Get the appropriate tee opcode for the given register class.
|
||||
static unsigned GetTeeOpcode(const TargetRegisterClass *RC) {
|
||||
static unsigned getTeeOpcode(const TargetRegisterClass *RC) {
|
||||
if (RC == &WebAssembly::I32RegClass)
|
||||
return WebAssembly::TEE_I32;
|
||||
if (RC == &WebAssembly::I64RegClass)
|
||||
|
@ -471,7 +471,7 @@ static unsigned GetTeeOpcode(const TargetRegisterClass *RC) {
|
|||
}
|
||||
|
||||
// Shrink LI to its uses, cleaning up LI.
|
||||
static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
|
||||
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
|
||||
if (LIS.shrinkToUses(&LI)) {
|
||||
SmallVector<LiveInterval *, 4> SplitLIs;
|
||||
LIS.splitSeparateComponents(LI, SplitLIs);
|
||||
|
@ -480,7 +480,7 @@ static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
|
|||
|
||||
/// A single-use def in the same block with no intervening memory or register
|
||||
/// dependencies; move the def down and nest it with the current instruction.
|
||||
static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand &Op,
|
||||
static MachineInstr *moveForSingleUse(unsigned Reg, MachineOperand &Op,
|
||||
MachineInstr *Def, MachineBasicBlock &MBB,
|
||||
MachineInstr *Insert, LiveIntervals &LIS,
|
||||
WebAssemblyFunctionInfo &MFI,
|
||||
|
@ -519,13 +519,13 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand &Op,
|
|||
LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
|
||||
}
|
||||
|
||||
ImposeStackOrdering(Def);
|
||||
imposeStackOrdering(Def);
|
||||
return Def;
|
||||
}
|
||||
|
||||
/// A trivially cloneable instruction; clone it and nest the new copy with the
|
||||
/// current instruction.
|
||||
static MachineInstr *RematerializeCheapDef(
|
||||
static MachineInstr *rematerializeCheapDef(
|
||||
unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
|
||||
WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
|
||||
|
@ -542,7 +542,7 @@ static MachineInstr *RematerializeCheapDef(
|
|||
LIS.InsertMachineInstrInMaps(*Clone);
|
||||
LIS.createAndComputeVirtRegInterval(NewReg);
|
||||
MFI.stackifyVReg(NewReg);
|
||||
ImposeStackOrdering(Clone);
|
||||
imposeStackOrdering(Clone);
|
||||
|
||||
LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump());
|
||||
|
||||
|
@ -550,7 +550,7 @@ static MachineInstr *RematerializeCheapDef(
|
|||
bool IsDead = MRI.use_empty(Reg);
|
||||
if (!IsDead) {
|
||||
LiveInterval &LI = LIS.getInterval(Reg);
|
||||
ShrinkToUses(LI, LIS);
|
||||
shrinkToUses(LI, LIS);
|
||||
IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot());
|
||||
}
|
||||
|
||||
|
@ -593,7 +593,7 @@ static MachineInstr *RematerializeCheapDef(
|
|||
///
|
||||
/// with DefReg and TeeReg stackified. This eliminates a local.get from the
|
||||
/// resulting code.
|
||||
static MachineInstr *MoveAndTeeForMultiUse(
|
||||
static MachineInstr *moveAndTeeForMultiUse(
|
||||
unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
|
||||
MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
|
||||
MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
|
||||
|
@ -611,7 +611,7 @@ static MachineInstr *MoveAndTeeForMultiUse(
|
|||
unsigned DefReg = MRI.createVirtualRegister(RegClass);
|
||||
MachineOperand &DefMO = Def->getOperand(0);
|
||||
MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(),
|
||||
TII->get(GetTeeOpcode(RegClass)), TeeReg)
|
||||
TII->get(getTeeOpcode(RegClass)), TeeReg)
|
||||
.addReg(Reg, RegState::Define)
|
||||
.addReg(DefReg, getUndefRegState(DefMO.isDead()));
|
||||
Op.setReg(TeeReg);
|
||||
|
@ -627,15 +627,15 @@ static MachineInstr *MoveAndTeeForMultiUse(
|
|||
VNInfo *ValNo = LI.getVNInfoAt(DefIdx);
|
||||
I->start = TeeIdx;
|
||||
ValNo->def = TeeIdx;
|
||||
ShrinkToUses(LI, LIS);
|
||||
shrinkToUses(LI, LIS);
|
||||
|
||||
// Finish stackifying the new regs.
|
||||
LIS.createAndComputeVirtRegInterval(TeeReg);
|
||||
LIS.createAndComputeVirtRegInterval(DefReg);
|
||||
MFI.stackifyVReg(DefReg);
|
||||
MFI.stackifyVReg(TeeReg);
|
||||
ImposeStackOrdering(Def);
|
||||
ImposeStackOrdering(Tee);
|
||||
imposeStackOrdering(Def);
|
||||
imposeStackOrdering(Tee);
|
||||
|
||||
DefDIs.clone(Tee, DefReg);
|
||||
DefDIs.clone(Insert, TeeReg);
|
||||
|
@ -649,9 +649,9 @@ namespace {
|
|||
/// A stack for walking the tree of instructions being built, visiting the
|
||||
/// MachineOperands in DFS order.
|
||||
class TreeWalkerState {
|
||||
typedef MachineInstr::mop_iterator mop_iterator;
|
||||
typedef std::reverse_iterator<mop_iterator> mop_reverse_iterator;
|
||||
typedef iterator_range<mop_reverse_iterator> RangeTy;
|
||||
using mop_iterator = MachineInstr::mop_iterator;
|
||||
using mop_reverse_iterator = std::reverse_iterator<mop_iterator>;
|
||||
using RangeTy = iterator_range<mop_reverse_iterator>;
|
||||
SmallVector<RangeTy, 4> Worklist;
|
||||
|
||||
public:
|
||||
|
@ -661,9 +661,9 @@ public:
|
|||
Worklist.push_back(reverse(Range));
|
||||
}
|
||||
|
||||
bool Done() const { return Worklist.empty(); }
|
||||
bool done() const { return Worklist.empty(); }
|
||||
|
||||
MachineOperand &Pop() {
|
||||
MachineOperand &pop() {
|
||||
RangeTy &Range = Worklist.back();
|
||||
MachineOperand &Op = *Range.begin();
|
||||
Range = drop_begin(Range, 1);
|
||||
|
@ -676,7 +676,7 @@ public:
|
|||
}
|
||||
|
||||
/// Push Instr's operands onto the stack to be visited.
|
||||
void PushOperands(MachineInstr *Instr) {
|
||||
void pushOperands(MachineInstr *Instr) {
|
||||
const iterator_range<mop_iterator> &Range(Instr->explicit_uses());
|
||||
if (Range.begin() != Range.end())
|
||||
Worklist.push_back(reverse(Range));
|
||||
|
@ -684,8 +684,8 @@ public:
|
|||
|
||||
/// Some of Instr's operands are on the top of the stack; remove them and
|
||||
/// re-insert them starting from the beginning (because we've commuted them).
|
||||
void ResetTopOperands(MachineInstr *Instr) {
|
||||
assert(HasRemainingOperands(Instr) &&
|
||||
void resetTopOperands(MachineInstr *Instr) {
|
||||
assert(hasRemainingOperands(Instr) &&
|
||||
"Reseting operands should only be done when the instruction has "
|
||||
"an operand still on the stack");
|
||||
Worklist.back() = reverse(Instr->explicit_uses());
|
||||
|
@ -693,7 +693,7 @@ public:
|
|||
|
||||
/// Test whether Instr has operands remaining to be visited at the top of
|
||||
/// the stack.
|
||||
bool HasRemainingOperands(const MachineInstr *Instr) const {
|
||||
bool hasRemainingOperands(const MachineInstr *Instr) const {
|
||||
if (Worklist.empty())
|
||||
return false;
|
||||
const RangeTy &Range = Worklist.back();
|
||||
|
@ -706,7 +706,7 @@ public:
|
|||
///
|
||||
/// This is needed as a consequence of using implicit local.gets for
|
||||
/// uses and implicit local.sets for defs.
|
||||
bool IsOnStack(unsigned Reg) const {
|
||||
bool isOnStack(unsigned Reg) const {
|
||||
for (const RangeTy &Range : Worklist)
|
||||
for (const MachineOperand &MO : Range)
|
||||
if (MO.isReg() && MO.getReg() == Reg)
|
||||
|
@ -723,20 +723,18 @@ class CommutingState {
|
|||
/// state where we've commuted the operands of the current instruction and are
|
||||
/// revisiting it, and the declined state where we've reverted the operands
|
||||
/// back to their original order and will no longer commute it further.
|
||||
bool TentativelyCommuting;
|
||||
bool Declined;
|
||||
bool TentativelyCommuting = false;
|
||||
bool Declined = false;
|
||||
|
||||
/// During the tentative state, these hold the operand indices of the commuted
|
||||
/// operands.
|
||||
unsigned Operand0, Operand1;
|
||||
|
||||
public:
|
||||
CommutingState() : TentativelyCommuting(false), Declined(false) {}
|
||||
|
||||
/// Stackification for an operand was not successful due to ordering
|
||||
/// constraints. If possible, and if we haven't already tried it and declined
|
||||
/// it, commute Insert's operands and prepare to revisit it.
|
||||
void MaybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
|
||||
void maybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
|
||||
const WebAssemblyInstrInfo *TII) {
|
||||
if (TentativelyCommuting) {
|
||||
assert(!Declined &&
|
||||
|
@ -745,13 +743,13 @@ public:
|
|||
TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
|
||||
TentativelyCommuting = false;
|
||||
Declined = true;
|
||||
} else if (!Declined && TreeWalker.HasRemainingOperands(Insert)) {
|
||||
} else if (!Declined && TreeWalker.hasRemainingOperands(Insert)) {
|
||||
Operand0 = TargetInstrInfo::CommuteAnyOperandIndex;
|
||||
Operand1 = TargetInstrInfo::CommuteAnyOperandIndex;
|
||||
if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
|
||||
// Tentatively commute the operands and try again.
|
||||
TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
|
||||
TreeWalker.ResetTopOperands(Insert);
|
||||
TreeWalker.resetTopOperands(Insert);
|
||||
TentativelyCommuting = true;
|
||||
Declined = false;
|
||||
}
|
||||
|
@ -760,7 +758,7 @@ public:
|
|||
|
||||
/// Stackification for some operand was successful. Reset to the default
|
||||
/// state.
|
||||
void Reset() {
|
||||
void reset() {
|
||||
TentativelyCommuting = false;
|
||||
Declined = false;
|
||||
}
|
||||
|
@ -778,8 +776,8 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
|
||||
const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
|
||||
AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
|
||||
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
|
||||
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
|
||||
auto &MDT = getAnalysis<MachineDominatorTree>();
|
||||
auto &LIS = getAnalysis<LiveIntervals>();
|
||||
|
||||
// Walk the instructions from the bottom up. Currently we don't look past
|
||||
// block boundaries, and the blocks aren't ordered so the block visitation
|
||||
|
@ -802,8 +800,8 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
// operands off the stack in LIFO order.
|
||||
CommutingState Commuting;
|
||||
TreeWalkerState TreeWalker(Insert);
|
||||
while (!TreeWalker.Done()) {
|
||||
MachineOperand &Op = TreeWalker.Pop();
|
||||
while (!TreeWalker.done()) {
|
||||
MachineOperand &Op = TreeWalker.pop();
|
||||
|
||||
// We're only interested in explicit virtual register operands.
|
||||
if (!Op.isReg())
|
||||
|
@ -817,7 +815,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
continue;
|
||||
|
||||
// Identify the definition for this register at this point.
|
||||
MachineInstr *Def = GetVRegDef(Reg, Insert, MRI, LIS);
|
||||
MachineInstr *Def = getVRegDef(Reg, Insert, MRI, LIS);
|
||||
if (!Def)
|
||||
continue;
|
||||
|
||||
|
@ -856,23 +854,23 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
// supports intra-block moves) and it's MachineSink's job to catch all
|
||||
// the sinking opportunities anyway.
|
||||
bool SameBlock = Def->getParent() == &MBB;
|
||||
bool CanMove = SameBlock && IsSafeToMove(Def, Insert, AA, MRI) &&
|
||||
!TreeWalker.IsOnStack(Reg);
|
||||
if (CanMove && HasOneUse(Reg, Def, MRI, MDT, LIS)) {
|
||||
Insert = MoveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI);
|
||||
} else if (ShouldRematerialize(*Def, AA, TII)) {
|
||||
bool CanMove = SameBlock && isSafeToMove(Def, Insert, AA, MRI) &&
|
||||
!TreeWalker.isOnStack(Reg);
|
||||
if (CanMove && hasOneUse(Reg, Def, MRI, MDT, LIS)) {
|
||||
Insert = moveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI);
|
||||
} else if (shouldRematerialize(*Def, AA, TII)) {
|
||||
Insert =
|
||||
RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(),
|
||||
rematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(),
|
||||
LIS, MFI, MRI, TII, TRI);
|
||||
} else if (CanMove &&
|
||||
OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) {
|
||||
Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI,
|
||||
oneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) {
|
||||
Insert = moveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI,
|
||||
MRI, TII);
|
||||
} else {
|
||||
// We failed to stackify the operand. If the problem was ordering
|
||||
// constraints, Commuting may be able to help.
|
||||
if (!CanMove && SameBlock)
|
||||
Commuting.MaybeCommute(Insert, TreeWalker, TII);
|
||||
Commuting.maybeCommute(Insert, TreeWalker, TII);
|
||||
// Proceed to the next operand.
|
||||
continue;
|
||||
}
|
||||
|
@ -881,18 +879,18 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
|
|||
// to a constant 0 so that the def is explicit, and the push/pop
|
||||
// correspondence is maintained.
|
||||
if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF)
|
||||
ConvertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS);
|
||||
convertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS);
|
||||
|
||||
// We stackified an operand. Add the defining instruction's operands to
|
||||
// the worklist stack now to continue to build an ever deeper tree.
|
||||
Commuting.Reset();
|
||||
TreeWalker.PushOperands(Insert);
|
||||
Commuting.reset();
|
||||
TreeWalker.pushOperands(Insert);
|
||||
}
|
||||
|
||||
// If we stackified any operands, skip over the tree to start looking for
|
||||
// the next instruction we can build a tree on.
|
||||
if (Insert != &*MII) {
|
||||
ImposeStackOrdering(&*MII);
|
||||
imposeStackOrdering(&*MII);
|
||||
MII = MachineBasicBlock::iterator(Insert).getReverse();
|
||||
Changed = true;
|
||||
}
|
||||
|
|
|
@ -484,14 +484,14 @@ struct StaticLibcallNameMap {
|
|||
|
||||
} // end anonymous namespace
|
||||
|
||||
void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
RTLIB::Libcall LC,
|
||||
SmallVectorImpl<wasm::ValType> &Rets,
|
||||
SmallVectorImpl<wasm::ValType> &Params) {
|
||||
assert(Rets.empty());
|
||||
assert(Params.empty());
|
||||
|
||||
wasm::ValType iPTR =
|
||||
wasm::ValType PtrTy =
|
||||
Subtarget.hasAddr64() ? wasm::ValType::I64 : wasm::ValType::I32;
|
||||
|
||||
auto &Table = RuntimeLibcallSignatures->Table;
|
||||
|
@ -599,13 +599,13 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
break;
|
||||
case func_f32_iPTR_iPTR:
|
||||
Params.push_back(wasm::ValType::F32);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case func_f64_iPTR_iPTR:
|
||||
Params.push_back(wasm::ValType::F64);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case i16_func_i16_i16:
|
||||
Rets.push_back(wasm::ValType::I32);
|
||||
|
@ -631,7 +631,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I32);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case i64_func_i64_i64:
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
|
@ -642,14 +642,14 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case i64_i64_func_f32:
|
||||
#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::F32);
|
||||
break;
|
||||
|
@ -658,7 +658,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::F64);
|
||||
break;
|
||||
|
@ -667,7 +667,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I32);
|
||||
Rets.push_back(wasm::ValType::I32);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
|
@ -677,7 +677,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I32);
|
||||
Rets.push_back(wasm::ValType::I32);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
|
@ -687,7 +687,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
|
@ -697,7 +697,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
|
@ -709,13 +709,13 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case i64_i64_i64_i64_func_i64_i64_i64_i64:
|
||||
#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
|
||||
|
@ -724,7 +724,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
|
@ -738,23 +738,23 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
Rets.push_back(wasm::ValType::I64);
|
||||
Rets.push_back(wasm::ValType::I64);
|
||||
#else
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
#endif
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
break;
|
||||
case iPTR_func_iPTR_i32_iPTR:
|
||||
Rets.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Rets.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case iPTR_func_iPTR_iPTR_iPTR:
|
||||
Rets.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Rets.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case f32_func_f32_f32_f32:
|
||||
Rets.push_back(wasm::ValType::F32);
|
||||
|
@ -771,39 +771,39 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
case func_i64_i64_iPTR_iPTR:
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(PtrTy);
|
||||
break;
|
||||
case func_iPTR_f32:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::F32);
|
||||
break;
|
||||
case func_iPTR_f64:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::F64);
|
||||
break;
|
||||
case func_iPTR_i32:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I32);
|
||||
break;
|
||||
case func_iPTR_i64:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
break;
|
||||
case func_iPTR_i64_i64:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
break;
|
||||
case func_iPTR_i64_i64_i64_i64:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
break;
|
||||
case func_iPTR_i64_i64_i64_i64_i64_i64:
|
||||
Params.push_back(iPTR);
|
||||
Params.push_back(PtrTy);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
Params.push_back(wasm::ValType::I64);
|
||||
|
@ -831,12 +831,12 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
|||
static ManagedStatic<StaticLibcallNameMap> LibcallNameMap;
|
||||
// TODO: If the RTLIB::Libcall-taking flavor of GetSignature remains unsed
|
||||
// other than here, just roll its logic into this version.
|
||||
void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
const char *Name,
|
||||
SmallVectorImpl<wasm::ValType> &Rets,
|
||||
SmallVectorImpl<wasm::ValType> &Params) {
|
||||
auto &Map = LibcallNameMap->Map;
|
||||
auto val = Map.find(Name);
|
||||
assert(val != Map.end() && "unexpected runtime library name");
|
||||
return GetLibcallSignature(Subtarget, val->second, Rets, Params);
|
||||
auto Val = Map.find(Name);
|
||||
assert(Val != Map.end() && "unexpected runtime library name");
|
||||
return getLibcallSignature(Subtarget, Val->second, Rets, Params);
|
||||
}
|
||||
|
|
|
@ -22,12 +22,12 @@ namespace llvm {
|
|||
|
||||
class WebAssemblySubtarget;
|
||||
|
||||
extern void GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
RTLIB::Libcall LC,
|
||||
SmallVectorImpl<wasm::ValType> &Rets,
|
||||
SmallVectorImpl<wasm::ValType> &Params);
|
||||
|
||||
extern void GetLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget,
|
||||
const char *Name,
|
||||
SmallVectorImpl<wasm::ValType> &Rets,
|
||||
SmallVectorImpl<wasm::ValType> &Params);
|
||||
|
|
|
@ -16,4 +16,4 @@ using namespace llvm;
|
|||
|
||||
#define DEBUG_TYPE "wasm-selectiondag-info"
|
||||
|
||||
WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() {}
|
||||
WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() = default; // anchor
|
||||
|
|
|
@ -53,7 +53,7 @@ FunctionPass *llvm::createWebAssemblySetP2AlignOperands() {
|
|||
return new WebAssemblySetP2AlignOperands();
|
||||
}
|
||||
|
||||
static void RewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
|
||||
static void rewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
|
||||
assert(MI.getOperand(OperandNo).getImm() == 0 &&
|
||||
"ISel should set p2align operands to 0");
|
||||
assert(MI.hasOneMemOperand() &&
|
||||
|
@ -163,7 +163,7 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
|
|||
case WebAssembly::ATOMIC_NOTIFY:
|
||||
case WebAssembly::ATOMIC_WAIT_I32:
|
||||
case WebAssembly::ATOMIC_WAIT_I64:
|
||||
RewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo);
|
||||
rewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo);
|
||||
break;
|
||||
case WebAssembly::STORE_I32:
|
||||
case WebAssembly::STORE_I64:
|
||||
|
@ -187,7 +187,7 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
|
|||
case WebAssembly::ATOMIC_STORE8_I64:
|
||||
case WebAssembly::ATOMIC_STORE16_I64:
|
||||
case WebAssembly::ATOMIC_STORE32_I64:
|
||||
RewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo);
|
||||
rewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -120,7 +120,7 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine(
|
|||
// splitting and tail merging.
|
||||
}
|
||||
|
||||
WebAssemblyTargetMachine::~WebAssemblyTargetMachine() {}
|
||||
WebAssemblyTargetMachine::~WebAssemblyTargetMachine() = default; // anchor.
|
||||
|
||||
const WebAssemblySubtarget *
|
||||
WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const {
|
||||
|
|
|
@ -50,7 +50,7 @@ unsigned WebAssemblyTTIImpl::getArithmeticInstrCost(
|
|||
unsigned Cost = BasicTTIImplBase<WebAssemblyTTIImpl>::getArithmeticInstrCost(
|
||||
Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
|
||||
|
||||
if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
if (auto *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
switch (Opcode) {
|
||||
case Instruction::LShr:
|
||||
case Instruction::AShr:
|
||||
|
|
|
@ -18,7 +18,7 @@ using namespace llvm;
|
|||
using namespace object;
|
||||
|
||||
void llvm::printWasmFileHeader(const object::ObjectFile *Obj) {
|
||||
const WasmObjectFile *File = dyn_cast<const WasmObjectFile>(Obj);
|
||||
const auto *File = dyn_cast<const WasmObjectFile>(Obj);
|
||||
|
||||
outs() << "Program Header:\n";
|
||||
outs() << "Version: 0x";
|
||||
|
|
|
@ -219,7 +219,7 @@ namespace llvm {
|
|||
std::error_code createWasmDumper(const object::ObjectFile *Obj,
|
||||
ScopedPrinter &Writer,
|
||||
std::unique_ptr<ObjDumper> &Result) {
|
||||
const WasmObjectFile *WasmObj = dyn_cast<WasmObjectFile>(Obj);
|
||||
const auto *WasmObj = dyn_cast<WasmObjectFile>(Obj);
|
||||
assert(WasmObj && "createWasmDumper called with non-wasm object");
|
||||
|
||||
Result.reset(new WasmDumper(WasmObj, Writer));
|
||||
|
|
|
@ -31,7 +31,7 @@ public:
|
|||
|
||||
} // namespace
|
||||
|
||||
static WasmYAML::Table make_table(const wasm::WasmTable &Table) {
|
||||
static WasmYAML::Table makeTable(const wasm::WasmTable &Table) {
|
||||
WasmYAML::Table T;
|
||||
T.ElemType = Table.ElemType;
|
||||
T.TableLimits.Flags = Table.Limits.Flags;
|
||||
|
@ -40,7 +40,7 @@ static WasmYAML::Table make_table(const wasm::WasmTable &Table) {
|
|||
return T;
|
||||
}
|
||||
|
||||
static WasmYAML::Limits make_limits(const wasm::WasmLimits &Limits) {
|
||||
static WasmYAML::Limits makeLimits(const wasm::WasmLimits &Limits) {
|
||||
WasmYAML::Limits L;
|
||||
L.Flags = Limits.Flags;
|
||||
L.Initial = Limits.Initial;
|
||||
|
@ -194,7 +194,7 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
|
|||
if (FunctionSig.Returns.size())
|
||||
Sig.ReturnType = static_cast<uint32_t>(FunctionSig.Returns[0]);
|
||||
for (const auto &ParamType : FunctionSig.Params)
|
||||
Sig.ParamTypes.push_back(static_cast<uint32_t>(ParamType));
|
||||
Sig.ParamTypes.emplace_back(static_cast<uint32_t>(ParamType));
|
||||
TypeSec->Signatures.push_back(Sig);
|
||||
}
|
||||
S = std::move(TypeSec);
|
||||
|
@ -220,10 +220,10 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
|
|||
Im.EventImport.SigIndex = Import.Event.SigIndex;
|
||||
break;
|
||||
case wasm::WASM_EXTERNAL_TABLE:
|
||||
Im.TableImport = make_table(Import.Table);
|
||||
Im.TableImport = makeTable(Import.Table);
|
||||
break;
|
||||
case wasm::WASM_EXTERNAL_MEMORY:
|
||||
Im.Memory = make_limits(Import.Memory);
|
||||
Im.Memory = makeLimits(Import.Memory);
|
||||
break;
|
||||
}
|
||||
ImportSec->Imports.push_back(Im);
|
||||
|
@ -242,7 +242,7 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
|
|||
case wasm::WASM_SEC_TABLE: {
|
||||
auto TableSec = make_unique<WasmYAML::TableSection>();
|
||||
for (const wasm::WasmTable &Table : Obj.tables()) {
|
||||
TableSec->Tables.push_back(make_table(Table));
|
||||
TableSec->Tables.push_back(makeTable(Table));
|
||||
}
|
||||
S = std::move(TableSec);
|
||||
break;
|
||||
|
@ -250,7 +250,7 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
|
|||
case wasm::WASM_SEC_MEMORY: {
|
||||
auto MemorySec = make_unique<WasmYAML::MemorySection>();
|
||||
for (const wasm::WasmLimits &Memory : Obj.memories()) {
|
||||
MemorySec->Memories.push_back(make_limits(Memory));
|
||||
MemorySec->Memories.push_back(makeLimits(Memory));
|
||||
}
|
||||
S = std::move(MemorySec);
|
||||
break;
|
||||
|
|
|
@ -124,14 +124,14 @@ class SubSectionWriter {
|
|||
public:
|
||||
SubSectionWriter(raw_ostream &OS) : OS(OS), StringStream(OutString) {}
|
||||
|
||||
void Done() {
|
||||
void done() {
|
||||
StringStream.flush();
|
||||
encodeULEB128(OutString.size(), OS);
|
||||
OS << OutString;
|
||||
OutString.clear();
|
||||
}
|
||||
|
||||
raw_ostream &GetStream() { return StringStream; }
|
||||
raw_ostream &getStream() { return StringStream; }
|
||||
};
|
||||
|
||||
int WasmWriter::writeSectionContent(raw_ostream &OS,
|
||||
|
@ -159,78 +159,78 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
|
|||
if (Section.SymbolTable.size()) {
|
||||
writeUint8(OS, wasm::WASM_SYMBOL_TABLE);
|
||||
|
||||
encodeULEB128(Section.SymbolTable.size(), SubSection.GetStream());
|
||||
encodeULEB128(Section.SymbolTable.size(), SubSection.getStream());
|
||||
#ifndef NDEBUG
|
||||
uint32_t SymbolIndex = 0;
|
||||
#endif
|
||||
for (const WasmYAML::SymbolInfo &Info : Section.SymbolTable) {
|
||||
assert(Info.Index == SymbolIndex++);
|
||||
writeUint8(SubSection.GetStream(), Info.Kind);
|
||||
encodeULEB128(Info.Flags, SubSection.GetStream());
|
||||
writeUint8(SubSection.getStream(), Info.Kind);
|
||||
encodeULEB128(Info.Flags, SubSection.getStream());
|
||||
switch (Info.Kind) {
|
||||
case wasm::WASM_SYMBOL_TYPE_FUNCTION:
|
||||
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
|
||||
case wasm::WASM_SYMBOL_TYPE_EVENT:
|
||||
encodeULEB128(Info.ElementIndex, SubSection.GetStream());
|
||||
encodeULEB128(Info.ElementIndex, SubSection.getStream());
|
||||
if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0)
|
||||
writeStringRef(Info.Name, SubSection.GetStream());
|
||||
writeStringRef(Info.Name, SubSection.getStream());
|
||||
break;
|
||||
case wasm::WASM_SYMBOL_TYPE_DATA:
|
||||
writeStringRef(Info.Name, SubSection.GetStream());
|
||||
writeStringRef(Info.Name, SubSection.getStream());
|
||||
if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0) {
|
||||
encodeULEB128(Info.DataRef.Segment, SubSection.GetStream());
|
||||
encodeULEB128(Info.DataRef.Offset, SubSection.GetStream());
|
||||
encodeULEB128(Info.DataRef.Size, SubSection.GetStream());
|
||||
encodeULEB128(Info.DataRef.Segment, SubSection.getStream());
|
||||
encodeULEB128(Info.DataRef.Offset, SubSection.getStream());
|
||||
encodeULEB128(Info.DataRef.Size, SubSection.getStream());
|
||||
}
|
||||
break;
|
||||
case wasm::WASM_SYMBOL_TYPE_SECTION:
|
||||
encodeULEB128(Info.ElementIndex, SubSection.GetStream());
|
||||
encodeULEB128(Info.ElementIndex, SubSection.getStream());
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected kind");
|
||||
}
|
||||
}
|
||||
|
||||
SubSection.Done();
|
||||
SubSection.done();
|
||||
}
|
||||
|
||||
// SEGMENT_NAMES subsection
|
||||
if (Section.SegmentInfos.size()) {
|
||||
writeUint8(OS, wasm::WASM_SEGMENT_INFO);
|
||||
encodeULEB128(Section.SegmentInfos.size(), SubSection.GetStream());
|
||||
encodeULEB128(Section.SegmentInfos.size(), SubSection.getStream());
|
||||
for (const WasmYAML::SegmentInfo &SegmentInfo : Section.SegmentInfos) {
|
||||
writeStringRef(SegmentInfo.Name, SubSection.GetStream());
|
||||
encodeULEB128(SegmentInfo.Alignment, SubSection.GetStream());
|
||||
encodeULEB128(SegmentInfo.Flags, SubSection.GetStream());
|
||||
writeStringRef(SegmentInfo.Name, SubSection.getStream());
|
||||
encodeULEB128(SegmentInfo.Alignment, SubSection.getStream());
|
||||
encodeULEB128(SegmentInfo.Flags, SubSection.getStream());
|
||||
}
|
||||
SubSection.Done();
|
||||
SubSection.done();
|
||||
}
|
||||
|
||||
// INIT_FUNCS subsection
|
||||
if (Section.InitFunctions.size()) {
|
||||
writeUint8(OS, wasm::WASM_INIT_FUNCS);
|
||||
encodeULEB128(Section.InitFunctions.size(), SubSection.GetStream());
|
||||
encodeULEB128(Section.InitFunctions.size(), SubSection.getStream());
|
||||
for (const WasmYAML::InitFunction &Func : Section.InitFunctions) {
|
||||
encodeULEB128(Func.Priority, SubSection.GetStream());
|
||||
encodeULEB128(Func.Symbol, SubSection.GetStream());
|
||||
encodeULEB128(Func.Priority, SubSection.getStream());
|
||||
encodeULEB128(Func.Symbol, SubSection.getStream());
|
||||
}
|
||||
SubSection.Done();
|
||||
SubSection.done();
|
||||
}
|
||||
|
||||
// COMDAT_INFO subsection
|
||||
if (Section.Comdats.size()) {
|
||||
writeUint8(OS, wasm::WASM_COMDAT_INFO);
|
||||
encodeULEB128(Section.Comdats.size(), SubSection.GetStream());
|
||||
encodeULEB128(Section.Comdats.size(), SubSection.getStream());
|
||||
for (const auto &C : Section.Comdats) {
|
||||
writeStringRef(C.Name, SubSection.GetStream());
|
||||
encodeULEB128(0, SubSection.GetStream()); // flags for future use
|
||||
encodeULEB128(C.Entries.size(), SubSection.GetStream());
|
||||
writeStringRef(C.Name, SubSection.getStream());
|
||||
encodeULEB128(0, SubSection.getStream()); // flags for future use
|
||||
encodeULEB128(C.Entries.size(), SubSection.getStream());
|
||||
for (const WasmYAML::ComdatEntry &Entry : C.Entries) {
|
||||
writeUint8(SubSection.GetStream(), Entry.Kind);
|
||||
encodeULEB128(Entry.Index, SubSection.GetStream());
|
||||
writeUint8(SubSection.getStream(), Entry.Kind);
|
||||
encodeULEB128(Entry.Index, SubSection.getStream());
|
||||
}
|
||||
}
|
||||
SubSection.Done();
|
||||
SubSection.done();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -244,13 +244,13 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
|
|||
|
||||
SubSectionWriter SubSection(OS);
|
||||
|
||||
encodeULEB128(Section.FunctionNames.size(), SubSection.GetStream());
|
||||
encodeULEB128(Section.FunctionNames.size(), SubSection.getStream());
|
||||
for (const WasmYAML::NameEntry &NameEntry : Section.FunctionNames) {
|
||||
encodeULEB128(NameEntry.Index, SubSection.GetStream());
|
||||
writeStringRef(NameEntry.Name, SubSection.GetStream());
|
||||
encodeULEB128(NameEntry.Index, SubSection.getStream());
|
||||
writeStringRef(NameEntry.Name, SubSection.getStream());
|
||||
}
|
||||
|
||||
SubSection.Done();
|
||||
SubSection.done();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue