llvm-project/lld/wasm/Writer.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1520 lines
51 KiB
C++
Raw Normal View History

//===- Writer.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Writer.h"
#include "Config.h"
#include "InputChunks.h"
#include "InputElement.h"
#include "MapFile.h"
#include "OutputSections.h"
#include "OutputSegment.h"
#include "Relocations.h"
#include "SymbolTable.h"
#include "SyntheticSections.h"
#include "WriterUtils.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/BinaryFormat/WasmTraits.h"
#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Parallel.h"
#include <cstdarg>
#include <map>
#define DEBUG_TYPE "lld"
using namespace llvm;
using namespace llvm::wasm;
namespace lld {
namespace wasm {
static constexpr int stackAlignment = 16;
namespace {
// The writer writes a SymbolTable result to a file.
class Writer {
public:
void run();
private:
void openFile();
bool needsPassiveInitialization(const OutputSegment *segment);
bool hasPassiveInitializedSegments();
void createSyntheticInitFunctions();
void createInitMemoryFunction();
void createStartFunction();
void createApplyDataRelocationsFunction();
void createApplyGlobalRelocationsFunction();
void createCallCtorsFunction();
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
void createInitTLSFunction();
void createCommandExportWrappers();
void createCommandExportWrapper(uint32_t functionIndex, DefinedFunction *f);
void assignIndexes();
void populateSymtab();
void populateProducers();
void populateTargetFeatures();
void calculateInitFunctions();
void calculateImports();
void calculateExports();
void calculateCustomSections();
void calculateTypes();
void createOutputSegments();
void combineOutputSegments();
void layoutMemory();
void createHeader();
void addSection(OutputSection *sec);
void addSections();
void createCustomSections();
void createSyntheticSections();
void createSyntheticSectionsPostLayout();
void finalizeSections();
// Custom sections
void createRelocSections();
void writeHeader();
void writeSections();
uint64_t fileSize = 0;
std::vector<WasmInitEntry> initFunctions;
llvm::StringMap<std::vector<InputSection *>> customSectionMapping;
// Stable storage for command export wrapper function name strings.
std::list<std::string> commandExportWrapperNames;
// Elements that are used to construct the final output
std::string header;
std::vector<OutputSection *> outputSections;
std::unique_ptr<FileOutputBuffer> buffer;
std::vector<OutputSegment *> segments;
llvm::SmallDenseMap<StringRef, OutputSegment *> segmentMap;
};
} // anonymous namespace
void Writer::calculateCustomSections() {
log("calculateCustomSections");
bool stripDebug = config->stripDebug || config->stripAll;
for (ObjFile *file : symtab->objectFiles) {
for (InputSection *section : file->customSections) {
// Exclude COMDAT sections that are not selected for inclusion
if (section->discarded)
continue;
StringRef name = section->getName();
// These custom sections are known the linker and synthesized rather than
// blindly copied.
if (name == "linking" || name == "name" || name == "producers" ||
name == "target_features" || name.startswith("reloc."))
continue;
// These custom sections are generated by `clang -fembed-bitcode`.
// These are used by the rust toolchain to ship LTO data along with
// compiled object code, but they don't want this included in the linker
// output.
if (name == ".llvmbc" || name == ".llvmcmd")
continue;
// Strip debug section in that option was specified.
if (stripDebug && name.startswith(".debug_"))
continue;
// Otherwise include custom sections by default and concatenate their
// contents.
customSectionMapping[name].push_back(section);
}
}
}
void Writer::createCustomSections() {
log("createCustomSections");
for (auto &pair : customSectionMapping) {
StringRef name = pair.first();
LLVM_DEBUG(dbgs() << "createCustomSection: " << name << "\n");
OutputSection *sec = make<CustomSection>(std::string(name), pair.second);
if (config->relocatable || config->emitRelocs) {
auto *sym = make<OutputSectionSymbol>(sec);
out.linkingSec->addToSymtab(sym);
sec->sectionSym = sym;
}
addSection(sec);
}
}
// Create relocations sections in the final output.
// These are only created when relocatable output is requested.
void Writer::createRelocSections() {
log("createRelocSections");
// Don't use iterator here since we are adding to OutputSection
size_t origSize = outputSections.size();
2018-04-25 07:09:57 +08:00
for (size_t i = 0; i < origSize; i++) {
LLVM_DEBUG(dbgs() << "check section " << i << "\n");
OutputSection *sec = outputSections[i];
// Count the number of needed sections.
uint32_t count = sec->getNumRelocations();
if (!count)
continue;
StringRef name;
if (sec->type == WASM_SEC_DATA)
name = "reloc.DATA";
else if (sec->type == WASM_SEC_CODE)
name = "reloc.CODE";
else if (sec->type == WASM_SEC_CUSTOM)
name = saver.save("reloc." + sec->name);
else
llvm_unreachable(
"relocations only supported for code, data, or custom sections");
addSection(make<RelocSection>(name, sec));
}
}
void Writer::populateProducers() {
for (ObjFile *file : symtab->objectFiles) {
const WasmProducerInfo &info = file->getWasmObj()->getProducerInfo();
out.producersSec->addInfo(info);
}
}
void Writer::writeHeader() {
memcpy(buffer->getBufferStart(), header.data(), header.size());
}
void Writer::writeSections() {
uint8_t *buf = buffer->getBufferStart();
parallelForEach(outputSections, [buf](OutputSection *s) {
assert(s->isNeeded());
s->writeTo(buf);
});
}
static void setGlobalPtr(DefinedGlobal *g, uint64_t memoryPtr) {
g->global->setPointerValue(memoryPtr);
}
// Fix the memory layout of the output binary. This assigns memory offsets
// to each of the input data sections as well as the explicit stack region.
// The default memory layout is as follows, from low to high.
//
// - initialized data (starting at Config->globalBase)
// - BSS data (not currently implemented in llvm)
// - explicit stack (Config->ZStackSize)
// - heap start / unallocated
//
// The --stack-first option means that stack is placed before any static data.
// This can be useful since it means that stack overflow traps immediately
// rather than overwriting global data, but also increases code size since all
// static data loads and stores requires larger offsets.
void Writer::layoutMemory() {
uint64_t memoryPtr = 0;
auto placeStack = [&]() {
if (config->relocatable || config->isPic)
return;
memoryPtr = alignTo(memoryPtr, stackAlignment);
if (config->zStackSize != alignTo(config->zStackSize, stackAlignment))
error("stack size must be " + Twine(stackAlignment) + "-byte aligned");
log("mem: stack size = " + Twine(config->zStackSize));
log("mem: stack base = " + Twine(memoryPtr));
memoryPtr += config->zStackSize;
setGlobalPtr(cast<DefinedGlobal>(WasmSym::stackPointer), memoryPtr);
log("mem: stack top = " + Twine(memoryPtr));
};
if (config->stackFirst) {
placeStack();
} else {
memoryPtr = config->globalBase;
log("mem: global base = " + Twine(config->globalBase));
}
if (WasmSym::globalBase)
WasmSym::globalBase->setVirtualAddress(memoryPtr);
uint64_t dataStart = memoryPtr;
// Arbitrarily set __dso_handle handle to point to the start of the data
// segments.
if (WasmSym::dsoHandle)
WasmSym::dsoHandle->setVirtualAddress(dataStart);
out.dylinkSec->memAlign = 0;
for (OutputSegment *seg : segments) {
out.dylinkSec->memAlign = std::max(out.dylinkSec->memAlign, seg->alignment);
memoryPtr = alignTo(memoryPtr, 1ULL << seg->alignment);
seg->startVA = memoryPtr;
log(formatv("mem: {0,-15} offset={1,-8} size={2,-8} align={3}", seg->name,
memoryPtr, seg->size, seg->alignment));
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
if (!config->relocatable && seg->name == ".tdata") {
if (config->sharedMemory) {
auto *tlsSize = cast<DefinedGlobal>(WasmSym::tlsSize);
setGlobalPtr(tlsSize, seg->size);
auto *tlsAlign = cast<DefinedGlobal>(WasmSym::tlsAlign);
setGlobalPtr(tlsAlign, int64_t{1} << seg->alignment);
} else {
auto *tlsBase = cast<DefinedGlobal>(WasmSym::tlsBase);
setGlobalPtr(tlsBase, memoryPtr);
}
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
}
memoryPtr += seg->size;
}
// Make space for the memory initialization flag
if (config->sharedMemory && hasPassiveInitializedSegments()) {
memoryPtr = alignTo(memoryPtr, 4);
WasmSym::initMemoryFlag = symtab->addSyntheticDataSymbol(
"__wasm_init_memory_flag", WASM_SYMBOL_VISIBILITY_HIDDEN);
WasmSym::initMemoryFlag->markLive();
WasmSym::initMemoryFlag->setVirtualAddress(memoryPtr);
log(formatv("mem: {0,-15} offset={1,-8} size={2,-8} align={3}",
"__wasm_init_memory_flag", memoryPtr, 4, 4));
memoryPtr += 4;
}
if (WasmSym::dataEnd)
WasmSym::dataEnd->setVirtualAddress(memoryPtr);
uint64_t staticDataSize = memoryPtr - dataStart;
log("mem: static data = " + Twine(staticDataSize));
if (config->isPic)
out.dylinkSec->memSize = staticDataSize;
if (!config->stackFirst)
placeStack();
if (WasmSym::heapBase) {
// Set `__heap_base` to directly follow the end of the stack or global data.
// The fact that this comes last means that a malloc/brk implementation
// can grow the heap at runtime.
log("mem: heap base = " + Twine(memoryPtr));
WasmSym::heapBase->setVirtualAddress(memoryPtr);
}
uint64_t maxMemorySetting = 1ULL
<< (config->is64.getValueOr(false) ? 48 : 32);
2020-06-30 08:53:09 +08:00
if (config->initialMemory != 0) {
if (config->initialMemory != alignTo(config->initialMemory, WasmPageSize))
error("initial memory must be " + Twine(WasmPageSize) + "-byte aligned");
if (memoryPtr > config->initialMemory)
error("initial memory too small, " + Twine(memoryPtr) + " bytes needed");
2020-06-30 08:53:09 +08:00
if (config->initialMemory > maxMemorySetting)
error("initial memory too large, cannot be greater than " +
Twine(maxMemorySetting));
memoryPtr = config->initialMemory;
}
out.memorySec->numMemoryPages =
alignTo(memoryPtr, WasmPageSize) / WasmPageSize;
log("mem: total pages = " + Twine(out.memorySec->numMemoryPages));
if (config->maxMemory != 0) {
if (config->maxMemory != alignTo(config->maxMemory, WasmPageSize))
error("maximum memory must be " + Twine(WasmPageSize) + "-byte aligned");
if (memoryPtr > config->maxMemory)
error("maximum memory too small, " + Twine(memoryPtr) + " bytes needed");
2020-06-30 08:53:09 +08:00
if (config->maxMemory > maxMemorySetting)
error("maximum memory too large, cannot be greater than " +
Twine(maxMemorySetting));
}
// Check max if explicitly supplied or required by shared memory
if (config->maxMemory != 0 || config->sharedMemory) {
uint64_t max = config->maxMemory;
if (max == 0) {
// If no maxMemory config was supplied but we are building with
// shared memory, we need to pick a sensible upper limit.
if (config->isPic)
max = maxMemorySetting;
else
max = alignTo(memoryPtr, WasmPageSize);
}
out.memorySec->maxMemoryPages = max / WasmPageSize;
log("mem: max pages = " + Twine(out.memorySec->maxMemoryPages));
}
}
void Writer::addSection(OutputSection *sec) {
if (!sec->isNeeded())
return;
log("addSection: " + toString(*sec));
sec->sectionIndex = outputSections.size();
outputSections.push_back(sec);
}
// If a section name is valid as a C identifier (which is rare because of
// the leading '.'), linkers are expected to define __start_<secname> and
// __stop_<secname> symbols. They are at beginning and end of the section,
// respectively. This is not requested by the ELF standard, but GNU ld and
// gold provide the feature, and used by many programs.
static void addStartStopSymbols(const OutputSegment *seg) {
StringRef name = seg->name;
if (!isValidCIdentifier(name))
return;
LLVM_DEBUG(dbgs() << "addStartStopSymbols: " << name << "\n");
uint64_t start = seg->startVA;
uint64_t stop = start + seg->size;
symtab->addOptionalDataSymbol(saver.save("__start_" + name), start);
symtab->addOptionalDataSymbol(saver.save("__stop_" + name), stop);
}
void Writer::addSections() {
addSection(out.dylinkSec);
addSection(out.typeSec);
addSection(out.importSec);
addSection(out.functionSec);
addSection(out.tableSec);
addSection(out.memorySec);
addSection(out.eventSec);
addSection(out.globalSec);
addSection(out.exportSec);
addSection(out.startSec);
addSection(out.elemSec);
addSection(out.dataCountSec);
addSection(make<CodeSection>(out.functionSec->inputFunctions));
addSection(make<DataSection>(segments));
createCustomSections();
addSection(out.linkingSec);
if (config->emitRelocs || config->relocatable) {
createRelocSections();
}
addSection(out.nameSec);
addSection(out.producersSec);
addSection(out.targetFeaturesSec);
}
void Writer::finalizeSections() {
for (OutputSection *s : outputSections) {
s->setOffset(fileSize);
s->finalizeContents();
fileSize += s->getSize();
}
}
void Writer::populateTargetFeatures() {
StringMap<std::string> used;
StringMap<std::string> required;
StringMap<std::string> disallowed;
SmallSet<std::string, 8> &allowed = out.targetFeaturesSec->features;
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
bool tlsUsed = false;
// Only infer used features if user did not specify features
bool inferFeatures = !config->features.hasValue();
if (!inferFeatures) {
auto &explicitFeatures = config->features.getValue();
allowed.insert(explicitFeatures.begin(), explicitFeatures.end());
if (!config->checkFeatures)
return;
}
// Find the sets of used, required, and disallowed features
for (ObjFile *file : symtab->objectFiles) {
StringRef fileName(file->getName());
for (auto &feature : file->getWasmObj()->getTargetFeatures()) {
switch (feature.Prefix) {
case WASM_FEATURE_PREFIX_USED:
used.insert({feature.Name, std::string(fileName)});
break;
case WASM_FEATURE_PREFIX_REQUIRED:
used.insert({feature.Name, std::string(fileName)});
required.insert({feature.Name, std::string(fileName)});
break;
case WASM_FEATURE_PREFIX_DISALLOWED:
disallowed.insert({feature.Name, std::string(fileName)});
break;
default:
error("Unrecognized feature policy prefix " +
std::to_string(feature.Prefix));
}
}
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
// Find TLS data segments
auto isTLS = [](InputSegment *segment) {
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
StringRef name = segment->getName();
return segment->live &&
(name.startswith(".tdata") || name.startswith(".tbss"));
};
tlsUsed = tlsUsed ||
std::any_of(file->segments.begin(), file->segments.end(), isTLS);
}
if (inferFeatures)
for (const auto &key : used.keys())
allowed.insert(std::string(key));
if (!config->checkFeatures)
return;
if (!config->relocatable && allowed.count("mutable-globals") == 0) {
for (const Symbol *sym : out.importSec->importedSymbols) {
if (auto *global = dyn_cast<GlobalSymbol>(sym)) {
if (global->getGlobalType()->Mutable) {
error(Twine("mutable global imported but 'mutable-globals' feature "
"not present in inputs: `") +
toString(*sym) + "`. Use --no-check-features to suppress.");
}
}
}
for (const Symbol *sym : out.exportSec->exportedSymbols) {
if (isa<GlobalSymbol>(sym)) {
error(Twine("mutable global exported but 'mutable-globals' feature "
"not present in inputs: `") +
toString(*sym) + "`. Use --no-check-features to suppress.");
}
}
}
[WebAssembly] Disallow 'shared-mem' rather than 'atomics' Summary: The WebAssembly backend automatically lowers atomic operations and TLS to nonatomic operations and non-TLS data when either are present and the atomics or bulk-memory features are not present, respectively. The resulting object is no longer thread-safe, so the linker has to be told not to allow it to be linked into a module with shared memory. This was previously done by disallowing the 'atomics' feature, which prevented any objct with its atomic operations or TLS removed from being linked with any object containing atomics or TLS, and therefore preventing it from being linked into a module with shared memory since shared memory requires atomics. However, as of https://github.com/WebAssembly/threads/issues/144, the validation rules are relaxed to allow atomic operations to validate with unshared memories, which makes it perfectly safe to link an object with stripped atomics and TLS with another object that still contains TLS and atomics as long as the resulting module has an unshared memory. To allow this kind of link, this patch disallows a pseudo-feature 'shared-mem' rather than 'atomics' to communicate to the linker that the object is not thread-safe. This means that the 'atomics' feature is available to accurately reflect whether or not an object has atomics enabled. As a drive-by tweak, this change also requires that bulk-memory be enabled in addition to atomics in order to use shared memory. This is because initializing shared memories requires bulk-memory operations. Reviewers: aheejin, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D79542
2020-05-07 10:33:24 +08:00
if (config->sharedMemory) {
if (disallowed.count("shared-mem"))
error("--shared-memory is disallowed by " + disallowed["shared-mem"] +
" because it was not compiled with 'atomics' or 'bulk-memory' "
"features.");
for (auto feature : {"atomics", "bulk-memory"})
if (!allowed.count(feature))
error(StringRef("'") + feature +
"' feature must be used in order to use shared memory");
}
[WebAssembly] Disallow 'shared-mem' rather than 'atomics' Summary: The WebAssembly backend automatically lowers atomic operations and TLS to nonatomic operations and non-TLS data when either are present and the atomics or bulk-memory features are not present, respectively. The resulting object is no longer thread-safe, so the linker has to be told not to allow it to be linked into a module with shared memory. This was previously done by disallowing the 'atomics' feature, which prevented any objct with its atomic operations or TLS removed from being linked with any object containing atomics or TLS, and therefore preventing it from being linked into a module with shared memory since shared memory requires atomics. However, as of https://github.com/WebAssembly/threads/issues/144, the validation rules are relaxed to allow atomic operations to validate with unshared memories, which makes it perfectly safe to link an object with stripped atomics and TLS with another object that still contains TLS and atomics as long as the resulting module has an unshared memory. To allow this kind of link, this patch disallows a pseudo-feature 'shared-mem' rather than 'atomics' to communicate to the linker that the object is not thread-safe. This means that the 'atomics' feature is available to accurately reflect whether or not an object has atomics enabled. As a drive-by tweak, this change also requires that bulk-memory be enabled in addition to atomics in order to use shared memory. This is because initializing shared memories requires bulk-memory operations. Reviewers: aheejin, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D79542
2020-05-07 10:33:24 +08:00
if (tlsUsed) {
for (auto feature : {"atomics", "bulk-memory"})
if (!allowed.count(feature))
error(StringRef("'") + feature +
"' feature must be used in order to use thread-local storage");
}
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
// Validate that used features are allowed in output
if (!inferFeatures) {
for (auto &feature : used.keys()) {
if (!allowed.count(std::string(feature)))
error(Twine("Target feature '") + feature + "' used by " +
used[feature] + " is not allowed.");
}
}
// Validate the required and disallowed constraints for each file
for (ObjFile *file : symtab->objectFiles) {
StringRef fileName(file->getName());
SmallSet<std::string, 8> objectFeatures;
for (auto &feature : file->getWasmObj()->getTargetFeatures()) {
if (feature.Prefix == WASM_FEATURE_PREFIX_DISALLOWED)
continue;
objectFeatures.insert(feature.Name);
if (disallowed.count(feature.Name))
error(Twine("Target feature '") + feature.Name + "' used in " +
fileName + " is disallowed by " + disallowed[feature.Name] +
". Use --no-check-features to suppress.");
}
for (auto &feature : required.keys()) {
if (!objectFeatures.count(std::string(feature)))
error(Twine("Missing target feature '") + feature + "' in " + fileName +
", required by " + required[feature] +
". Use --no-check-features to suppress.");
}
}
}
static bool shouldImport(Symbol *sym) {
// We don't generate imports for data symbols. They however can be imported
// as GOT entries.
if (isa<DataSymbol>(sym))
return false;
if (config->relocatable ||
config->unresolvedSymbols == UnresolvedPolicy::ImportFuncs)
return true;
if (config->allowUndefinedSymbols.count(sym->getName()) != 0)
return true;
if (auto *g = dyn_cast<UndefinedGlobal>(sym))
return g->importName.hasValue();
if (auto *f = dyn_cast<UndefinedFunction>(sym))
return f->importName.hasValue();
if (auto *t = dyn_cast<UndefinedTable>(sym))
return t->importName.hasValue();
return false;
}
void Writer::calculateImports() {
for (Symbol *sym : symtab->getSymbols()) {
if (!sym->isUndefined())
continue;
if (!sym->isLive())
continue;
if (!sym->isUsedInRegularObj)
continue;
if (shouldImport(sym)) {
LLVM_DEBUG(dbgs() << "import: " << sym->getName() << "\n");
out.importSec->addImport(sym);
}
}
}
void Writer::calculateExports() {
if (config->relocatable)
return;
if (!config->relocatable && !config->importMemory)
out.exportSec->exports.push_back(
WasmExport{"memory", WASM_EXTERNAL_MEMORY, 0});
unsigned globalIndex =
out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals();
for (Symbol *sym : symtab->getSymbols()) {
if (!sym->isExported())
continue;
if (!sym->isLive())
continue;
StringRef name = sym->getName();
WasmExport export_;
if (auto *f = dyn_cast<DefinedFunction>(sym)) {
if (Optional<StringRef> exportName = f->function->getExportName()) {
name = *exportName;
}
export_ = {name, WASM_EXTERNAL_FUNCTION, f->getFunctionIndex()};
} else if (auto *g = dyn_cast<DefinedGlobal>(sym)) {
if (g->getGlobalType()->Mutable && !g->getFile() && !g->forceExport) {
// Avoid exporting mutable globals are linker synthesized (e.g.
// __stack_pointer or __tls_base) unless they are explicitly exported
// from the command line.
// Without this check `--export-all` would cause any program using the
// stack pointer to export a mutable global even if none of the input
// files were built with the `mutable-globals` feature.
continue;
}
export_ = {name, WASM_EXTERNAL_GLOBAL, g->getGlobalIndex()};
} else if (auto *e = dyn_cast<DefinedEvent>(sym)) {
export_ = {name, WASM_EXTERNAL_EVENT, e->getEventIndex()};
} else if (auto *d = dyn_cast<DefinedData>(sym)) {
out.globalSec->dataAddressGlobals.push_back(d);
export_ = {name, WASM_EXTERNAL_GLOBAL, globalIndex++};
} else {
auto *t = cast<DefinedTable>(sym);
export_ = {name, WASM_EXTERNAL_TABLE, t->getTableNumber()};
}
LLVM_DEBUG(dbgs() << "Export: " << name << "\n");
out.exportSec->exports.push_back(export_);
out.exportSec->exportedSymbols.push_back(sym);
}
}
void Writer::populateSymtab() {
if (!config->relocatable && !config->emitRelocs)
return;
for (Symbol *sym : symtab->getSymbols())
if (sym->isUsedInRegularObj && sym->isLive())
out.linkingSec->addToSymtab(sym);
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "Local symtab entries: " << file->getName() << "\n");
for (Symbol *sym : file->getSymbols())
if (sym->isLocal() && !isa<SectionSymbol>(sym) && sym->isLive())
out.linkingSec->addToSymtab(sym);
}
}
void Writer::calculateTypes() {
// The output type section is the union of the following sets:
// 1. Any signature used in the TYPE relocation
// 2. The signatures of all imported functions
// 3. The signatures of all defined functions
// 4. The signatures of all imported events
// 5. The signatures of all defined events
for (ObjFile *file : symtab->objectFiles) {
ArrayRef<WasmSignature> types = file->getWasmObj()->types();
for (uint32_t i = 0; i < types.size(); i++)
if (file->typeIsUsed[i])
file->typeMap[i] = out.typeSec->registerType(types[i]);
}
for (const Symbol *sym : out.importSec->importedSymbols) {
if (auto *f = dyn_cast<FunctionSymbol>(sym))
out.typeSec->registerType(*f->signature);
else if (auto *e = dyn_cast<EventSymbol>(sym))
out.typeSec->registerType(*e->signature);
}
for (const InputFunction *f : out.functionSec->inputFunctions)
out.typeSec->registerType(f->signature);
for (const InputEvent *e : out.eventSec->inputEvents)
out.typeSec->registerType(e->signature);
}
// In a command-style link, create a wrapper for each exported symbol
// which calls the constructors and destructors.
void Writer::createCommandExportWrappers() {
// This logic doesn't currently support Emscripten-style PIC mode.
assert(!config->isPic);
// If there are no ctors and there's no libc `__wasm_call_dtors` to
// call, don't wrap the exports.
if (initFunctions.empty() && WasmSym::callDtors == NULL)
return;
std::vector<DefinedFunction *> toWrap;
for (Symbol *sym : symtab->getSymbols())
if (sym->isExported())
if (auto *f = dyn_cast<DefinedFunction>(sym))
toWrap.push_back(f);
for (auto *f : toWrap) {
auto funcNameStr = (f->getName() + ".command_export").str();
commandExportWrapperNames.push_back(funcNameStr);
const std::string &funcName = commandExportWrapperNames.back();
auto func = make<SyntheticFunction>(*f->getSignature(), funcName);
if (f->function->getExportName().hasValue())
func->setExportName(f->function->getExportName()->str());
else
func->setExportName(f->getName().str());
DefinedFunction *def =
symtab->addSyntheticFunction(funcName, f->flags, func);
def->markLive();
def->flags |= WASM_SYMBOL_EXPORTED;
def->flags &= ~WASM_SYMBOL_VISIBILITY_HIDDEN;
def->forceExport = f->forceExport;
f->flags |= WASM_SYMBOL_VISIBILITY_HIDDEN;
f->flags &= ~WASM_SYMBOL_EXPORTED;
f->forceExport = false;
out.functionSec->addFunction(func);
createCommandExportWrapper(f->getFunctionIndex(), def);
}
}
static void finalizeIndirectFunctionTable() {
if (!WasmSym::indirectFunctionTable)
return;
uint32_t tableSize = config->tableBase + out.elemSec->numEntries();
WasmLimits limits = {0, tableSize, 0};
if (WasmSym::indirectFunctionTable->isDefined() && !config->growableTable) {
limits.Flags |= WASM_LIMITS_FLAG_HAS_MAX;
limits.Maximum = limits.Initial;
}
WasmSym::indirectFunctionTable->setLimits(limits);
}
static void scanRelocations() {
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "scanRelocations: " << file->getName() << "\n");
for (InputChunk *chunk : file->functions)
scanRelocations(chunk);
for (InputChunk *chunk : file->segments)
scanRelocations(chunk);
for (auto &p : file->customSections)
scanRelocations(p);
}
}
void Writer::assignIndexes() {
// Seal the import section, since other index spaces such as function and
// global are effected by the number of imports.
out.importSec->seal();
for (InputFunction *func : symtab->syntheticFunctions)
out.functionSec->addFunction(func);
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "Functions: " << file->getName() << "\n");
for (InputFunction *func : file->functions)
out.functionSec->addFunction(func);
}
for (InputGlobal *global : symtab->syntheticGlobals)
out.globalSec->addGlobal(global);
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "Globals: " << file->getName() << "\n");
for (InputGlobal *global : file->globals)
out.globalSec->addGlobal(global);
}
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "Events: " << file->getName() << "\n");
for (InputEvent *event : file->events)
out.eventSec->addEvent(event);
}
for (ObjFile *file : symtab->objectFiles) {
LLVM_DEBUG(dbgs() << "Tables: " << file->getName() << "\n");
for (InputTable *table : file->tables)
out.tableSec->addTable(table);
}
for (InputTable *table : symtab->syntheticTables)
out.tableSec->addTable(table);
out.globalSec->assignIndexes();
}
static StringRef getOutputDataSegmentName(StringRef name) {
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
// We only support one thread-local segment, so we must merge the segments
// despite --no-merge-data-segments.
// We also need to merge .tbss into .tdata so they share the same offsets.
if (name.startswith(".tdata") || name.startswith(".tbss"))
return ".tdata";
if (!config->mergeDataSegments)
return name;
if (name.startswith(".text."))
return ".text";
if (name.startswith(".data."))
return ".data";
if (name.startswith(".bss."))
return ".bss";
if (name.startswith(".rodata."))
return ".rodata";
return name;
}
void Writer::createOutputSegments() {
for (ObjFile *file : symtab->objectFiles) {
for (InputSegment *segment : file->segments) {
if (!segment->live)
continue;
StringRef name = getOutputDataSegmentName(segment->getName());
OutputSegment *&s = segmentMap[name];
if (s == nullptr) {
LLVM_DEBUG(dbgs() << "new segment: " << name << "\n");
s = make<OutputSegment>(name);
if (config->sharedMemory)
s->initFlags = WASM_DATA_SEGMENT_IS_PASSIVE;
// Exported memories are guaranteed to be zero-initialized, so no need
// to emit data segments for bss sections.
// TODO: consider initializing bss sections with memory.fill
// instructions when memory is imported and bulk-memory is available.
if (!config->importMemory && !config->relocatable &&
name.startswith(".bss"))
s->isBss = true;
segments.push_back(s);
}
s->addInputSegment(segment);
LLVM_DEBUG(dbgs() << "added data: " << name << ": " << s->size << "\n");
}
}
// Sort segments by type, placing .bss last
std::stable_sort(segments.begin(), segments.end(),
[](const OutputSegment *a, const OutputSegment *b) {
auto order = [](StringRef name) {
return StringSwitch<int>(name)
.StartsWith(".tdata", 0)
.StartsWith(".rodata", 1)
.StartsWith(".data", 2)
.StartsWith(".bss", 4)
.Default(3);
};
return order(a->name) < order(b->name);
});
for (size_t i = 0; i < segments.size(); ++i)
segments[i]->index = i;
}
void Writer::combineOutputSegments() {
// With PIC code we currently only support a single data segment since
// we only have a single __memory_base to use as our base address.
// This pass combines all non-TLS data segments into a single .data
// segment.
// This restructions can be relaxed once we have extended constant
// expressions available:
// https://github.com/WebAssembly/extended-const
assert(config->isPic);
if (segments.size() <= 1)
return;
OutputSegment *combined = nullptr;
std::vector<OutputSegment *> new_segments;
for (OutputSegment *s : segments) {
if (s->name == ".tdata") {
new_segments.push_back(s);
} else {
if (!combined) {
combined = make<OutputSegment>(".data");
combined->startVA = s->startVA;
if (config->sharedMemory)
combined->initFlags = WASM_DATA_SEGMENT_IS_PASSIVE;
}
bool first = true;
for (InputSegment *inSeg : s->inputSegments) {
uint32_t alignment = first ? s->alignment : 0;
first = false;
#ifndef NDEBUG
uint64_t oldVA = inSeg->getVA();
#endif
combined->addInputSegment(inSeg, alignment);
#ifndef NDEBUG
uint64_t newVA = inSeg->getVA();
assert(oldVA == newVA);
#endif
}
}
}
if (combined) {
new_segments.push_back(combined);
segments = new_segments;
for (size_t i = 0; i < segments.size(); ++i)
segments[i]->index = i;
}
}
static void createFunction(DefinedFunction *func, StringRef bodyContent) {
std::string functionBody;
{
raw_string_ostream os(functionBody);
writeUleb128(os, bodyContent.size(), "function size");
os << bodyContent;
}
ArrayRef<uint8_t> body = arrayRefFromStringRef(saver.save(functionBody));
cast<SyntheticFunction>(func->function)->setBody(body);
}
bool Writer::needsPassiveInitialization(const OutputSegment *segment) {
return segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE &&
segment->name != ".tdata" && !segment->isBss;
}
bool Writer::hasPassiveInitializedSegments() {
return std::find_if(segments.begin(), segments.end(),
[this](const OutputSegment *s) {
return this->needsPassiveInitialization(s);
}) != segments.end();
}
void Writer::createSyntheticInitFunctions() {
if (config->relocatable)
return;
static WasmSignature nullSignature = {{}, {}};
// Passive segments are used to avoid memory being reinitialized on each
// thread's instantiation. These passive segments are initialized and
// dropped in __wasm_init_memory, which is registered as the start function
if (config->sharedMemory && hasPassiveInitializedSegments()) {
WasmSym::initMemory = symtab->addSyntheticFunction(
"__wasm_init_memory", WASM_SYMBOL_VISIBILITY_HIDDEN,
make<SyntheticFunction>(nullSignature, "__wasm_init_memory"));
WasmSym::initMemory->markLive();
}
if (config->isPic) {
// For PIC code we create synthetic functions that apply relocations.
// These get called from __wasm_call_ctors before the user-level
// constructors.
WasmSym::applyDataRelocs = symtab->addSyntheticFunction(
"__wasm_apply_data_relocs", WASM_SYMBOL_VISIBILITY_HIDDEN,
make<SyntheticFunction>(nullSignature, "__wasm_apply_data_relocs"));
WasmSym::applyDataRelocs->markLive();
if (out.globalSec->needsRelocations()) {
WasmSym::applyGlobalRelocs = symtab->addSyntheticFunction(
"__wasm_apply_global_relocs", WASM_SYMBOL_VISIBILITY_HIDDEN,
make<SyntheticFunction>(nullSignature, "__wasm_apply_global_relocs"));
WasmSym::applyGlobalRelocs->markLive();
}
}
if (WasmSym::applyGlobalRelocs && WasmSym::initMemory) {
WasmSym::startFunction = symtab->addSyntheticFunction(
"__wasm_start", WASM_SYMBOL_VISIBILITY_HIDDEN,
make<SyntheticFunction>(nullSignature, "__wasm_start"));
WasmSym::startFunction->markLive();
}
}
void Writer::createInitMemoryFunction() {
LLVM_DEBUG(dbgs() << "createInitMemoryFunction\n");
assert(WasmSym::initMemory);
assert(WasmSym::initMemoryFlag);
assert(hasPassiveInitializedSegments());
uint64_t flagAddress = WasmSym::initMemoryFlag->getVirtualAddress();
bool is64 = config->is64.getValueOr(false);
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
// Initialize memory in a thread-safe manner. The thread that successfully
// increments the flag from 0 to 1 is is responsible for performing the
// memory initialization. Other threads go sleep on the flag until the
// first thread finishing initializing memory, increments the flag to 2,
// and wakes all the other threads. Once the flag has been set to 2,
// subsequently started threads will skip the sleep. All threads
// unconditionally drop their passive data segments once memory has been
// initialized. The generated code is as follows:
//
// (func $__wasm_init_memory
// (if
// (i32.atomic.rmw.cmpxchg align=2 offset=0
// (i32.const $__init_memory_flag)
// (i32.const 0)
// (i32.const 1)
// )
// (then
// (drop
// (i32.atomic.wait align=2 offset=0
// (i32.const $__init_memory_flag)
// (i32.const 1)
// (i32.const -1)
// )
// )
// )
// (else
// ( ... initialize data segments ... )
// (i32.atomic.store align=2 offset=0
// (i32.const $__init_memory_flag)
// (i32.const 2)
// )
// (drop
// (i32.atomic.notify align=2 offset=0
// (i32.const $__init_memory_flag)
// (i32.const -1u)
// )
// )
// )
// )
// ( ... drop data segments ... )
// )
//
// When we are building with PIC, calculate the flag location using:
//
// (global.get $__memory_base)
// (i32.const $__init_memory_flag)
// (i32.const 1)
// With PIC code we cache the flag address in local 0
if (config->isPic) {
writeUleb128(os, 1, "num local decls");
writeUleb128(os, 1, "local count");
writeU8(os, is64 ? WASM_TYPE_I64 : WASM_TYPE_I32, "address type");
writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
writePtrConst(os, flagAddress, is64, "flag address");
writeU8(os, WASM_OPCODE_I32_ADD, "add");
writeU8(os, WASM_OPCODE_LOCAL_SET, "local.set");
writeUleb128(os, 0, "local 0");
} else {
writeUleb128(os, 0, "num locals");
}
auto writeGetFlagAddress = [&]() {
if (config->isPic) {
writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
writeUleb128(os, 0, "local 0");
} else {
writePtrConst(os, flagAddress, is64, "flag address");
}
};
// Atomically check whether this is the main thread.
writeGetFlagAddress();
writeI32Const(os, 0, "expected flag value");
writeI32Const(os, 1, "flag value");
writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
writeUleb128(os, WASM_OPCODE_I32_RMW_CMPXCHG, "i32.atomic.rmw.cmpxchg");
writeMemArg(os, 2, 0);
writeU8(os, WASM_OPCODE_IF, "IF");
writeU8(os, WASM_TYPE_NORESULT, "blocktype");
// Did not increment 0, so wait for main thread to initialize memory
writeGetFlagAddress();
writeI32Const(os, 1, "expected flag value");
writeI64Const(os, -1, "timeout");
writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
writeUleb128(os, WASM_OPCODE_I32_ATOMIC_WAIT, "i32.atomic.wait");
writeMemArg(os, 2, 0);
writeU8(os, WASM_OPCODE_DROP, "drop");
writeU8(os, WASM_OPCODE_ELSE, "ELSE");
// Did increment 0, so conditionally initialize passive data segments
for (const OutputSegment *s : segments) {
if (needsPassiveInitialization(s)) {
// destination address
writePtrConst(os, s->startVA, is64, "destination address");
if (config->isPic) {
writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(),
"memory_base");
writeU8(os, WASM_OPCODE_I32_ADD, "i32.add");
}
// source segment offset
writeI32Const(os, 0, "segment offset");
// memory region size
writeI32Const(os, s->size, "memory region size");
// memory.init instruction
writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
writeUleb128(os, WASM_OPCODE_MEMORY_INIT, "memory.init");
writeUleb128(os, s->index, "segment index immediate");
writeU8(os, 0, "memory index immediate");
}
}
// Set flag to 2 to mark end of initialization
writeGetFlagAddress();
writeI32Const(os, 2, "flag value");
writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
writeUleb128(os, WASM_OPCODE_I32_ATOMIC_STORE, "i32.atomic.store");
writeMemArg(os, 2, 0);
// Notify any waiters that memory initialization is complete
writeGetFlagAddress();
writeI32Const(os, -1, "number of waiters");
writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
writeUleb128(os, WASM_OPCODE_ATOMIC_NOTIFY, "atomic.notify");
writeMemArg(os, 2, 0);
writeU8(os, WASM_OPCODE_DROP, "drop");
writeU8(os, WASM_OPCODE_END, "END");
// Unconditionally drop passive data segments
for (const OutputSegment *s : segments) {
if (needsPassiveInitialization(s)) {
// data.drop instruction
writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
writeUleb128(os, WASM_OPCODE_DATA_DROP, "data.drop");
writeUleb128(os, s->index, "segment index immediate");
}
}
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::initMemory, bodyContent);
}
void Writer::createStartFunction() {
if (WasmSym::startFunction) {
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, WasmSym::initMemory->getFunctionIndex(),
"function index");
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, WasmSym::applyGlobalRelocs->getFunctionIndex(),
"function index");
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::startFunction, bodyContent);
} else if (WasmSym::initMemory) {
WasmSym::startFunction = WasmSym::initMemory;
} else if (WasmSym::applyGlobalRelocs) {
WasmSym::startFunction = WasmSym::applyGlobalRelocs;
}
}
// For -shared (PIC) output, we create create a synthetic function which will
// apply any relocations to the data segments on startup. This function is
// called `__wasm_apply_data_relocs` and is added at the beginning of
// `__wasm_call_ctors` before any of the constructors run.
void Writer::createApplyDataRelocationsFunction() {
LLVM_DEBUG(dbgs() << "createApplyDataRelocationsFunction\n");
// First write the body's contents to a string.
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
for (const OutputSegment *seg : segments)
for (const InputSegment *inSeg : seg->inputSegments)
inSeg->generateRelocationCode(os);
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::applyDataRelocs, bodyContent);
}
// Similar to createApplyDataRelocationsFunction but generates relocation code
// fro WebAssembly globals. Because these globals are not shared between threads
// these relocation need to run on every thread.
void Writer::createApplyGlobalRelocationsFunction() {
// First write the body's contents to a string.
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
out.globalSec->generateRelocationCode(os);
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::applyGlobalRelocs, bodyContent);
}
// Create synthetic "__wasm_call_ctors" function based on ctor functions
// in input object.
void Writer::createCallCtorsFunction() {
// If __wasm_call_ctors isn't referenced, there aren't any ctors, and we
// aren't calling `__wasm_apply_data_relocs` for Emscripten-style PIC, don't
// define the `__wasm_call_ctors` function.
if (!WasmSym::callCtors->isLive() && !WasmSym::applyDataRelocs &&
initFunctions.empty())
return;
// First write the body's contents to a string.
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
if (WasmSym::applyDataRelocs) {
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, WasmSym::applyDataRelocs->getFunctionIndex(),
"function index");
}
// Call constructors
for (const WasmInitEntry &f : initFunctions) {
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, f.sym->getFunctionIndex(), "function index");
for (size_t i = 0; i < f.sym->signature->Returns.size(); i++) {
writeU8(os, WASM_OPCODE_DROP, "DROP");
}
}
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::callCtors, bodyContent);
}
// Create a wrapper around a function export which calls the
// static constructors and destructors.
void Writer::createCommandExportWrapper(uint32_t functionIndex,
DefinedFunction *f) {
// First write the body's contents to a string.
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
// Call `__wasm_call_ctors` which call static constructors (and
// applies any runtime relocations in Emscripten-style PIC mode)
if (WasmSym::callCtors->isLive()) {
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, WasmSym::callCtors->getFunctionIndex(),
"function index");
}
// Call the user's code, leaving any return values on the operand stack.
for (size_t i = 0; i < f->signature->Params.size(); ++i) {
writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
writeUleb128(os, i, "local index");
}
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, functionIndex, "function index");
// Call the function that calls the destructors.
if (DefinedFunction *callDtors = WasmSym::callDtors) {
writeU8(os, WASM_OPCODE_CALL, "CALL");
writeUleb128(os, callDtors->getFunctionIndex(), "function index");
}
// End the function, returning the return values from the user's code.
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(f, bodyContent);
}
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
void Writer::createInitTLSFunction() {
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
OutputSegment *tlsSeg = nullptr;
for (auto *seg : segments) {
if (seg->name == ".tdata") {
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
tlsSeg = seg;
break;
}
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
}
writeUleb128(os, 0, "num locals");
if (tlsSeg) {
writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
writeUleb128(os, 0, "local index");
writeU8(os, WASM_OPCODE_GLOBAL_SET, "global.set");
writeUleb128(os, WasmSym::tlsBase->getGlobalIndex(), "global index");
// FIXME(wvo): this local needs to be I64 in wasm64, or we need an extend op.
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
writeUleb128(os, 0, "local index");
writeI32Const(os, 0, "segment offset");
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
writeI32Const(os, tlsSeg->size, "memory region size");
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
writeUleb128(os, WASM_OPCODE_MEMORY_INIT, "MEMORY.INIT");
writeUleb128(os, tlsSeg->index, "segment index immediate");
writeU8(os, 0, "memory index immediate");
}
writeU8(os, WASM_OPCODE_END, "end function");
}
createFunction(WasmSym::initTLS, bodyContent);
}
// Populate InitFunctions vector with init functions from all input objects.
// This is then used either when creating the output linking section or to
// synthesize the "__wasm_call_ctors" function.
void Writer::calculateInitFunctions() {
if (!config->relocatable && !WasmSym::callCtors->isLive())
return;
for (ObjFile *file : symtab->objectFiles) {
const WasmLinkingData &l = file->getWasmObj()->linkingData();
for (const WasmInitFunc &f : l.InitFunctions) {
FunctionSymbol *sym = file->getFunctionSymbol(f.Symbol);
// comdat exclusions can cause init functions be discarded.
if (sym->isDiscarded() || !sym->isLive())
continue;
if (sym->signature->Params.size() != 0)
error("constructor functions cannot take arguments: " + toString(*sym));
LLVM_DEBUG(dbgs() << "initFunctions: " << toString(*sym) << "\n");
initFunctions.emplace_back(WasmInitEntry{sym, f.Priority});
}
}
// Sort in order of priority (lowest first) so that they are called
// in the correct order.
llvm::stable_sort(initFunctions,
[](const WasmInitEntry &l, const WasmInitEntry &r) {
return l.priority < r.priority;
});
}
void Writer::createSyntheticSections() {
out.dylinkSec = make<DylinkSection>();
out.typeSec = make<TypeSection>();
out.importSec = make<ImportSection>();
out.functionSec = make<FunctionSection>();
out.tableSec = make<TableSection>();
out.memorySec = make<MemorySection>();
out.eventSec = make<EventSection>();
out.globalSec = make<GlobalSection>();
out.exportSec = make<ExportSection>();
out.startSec = make<StartSection>();
out.elemSec = make<ElemSection>();
out.producersSec = make<ProducersSection>();
out.targetFeaturesSec = make<TargetFeaturesSection>();
}
void Writer::createSyntheticSectionsPostLayout() {
out.dataCountSec = make<DataCountSection>(segments);
out.linkingSec = make<LinkingSection>(initFunctions, segments);
out.nameSec = make<NameSection>(segments);
}
void Writer::run() {
if (config->relocatable || config->isPic)
config->globalBase = 0;
// For PIC code the table base is assigned dynamically by the loader.
// For non-PIC, we start at 1 so that accessing table index 0 always traps.
if (!config->isPic) {
config->tableBase = 1;
if (WasmSym::definedTableBase)
WasmSym::definedTableBase->setVirtualAddress(config->tableBase);
}
log("-- createOutputSegments");
createOutputSegments();
log("-- createSyntheticSections");
createSyntheticSections();
log("-- layoutMemory");
layoutMemory();
if (!config->relocatable) {
// Create linker synthesized __start_SECNAME/__stop_SECNAME symbols
// This has to be done after memory layout is performed.
for (const OutputSegment *seg : segments) {
addStartStopSymbols(seg);
}
}
// Delay reporting error about explict exports until after addStartStopSymbols
// which can create optional symbols.
for (auto &entry : config->exportedSymbols) {
StringRef name = entry.first();
Symbol *sym = symtab->find(name);
if (sym && sym->isDefined())
sym->forceExport = true;
else if (config->unresolvedSymbols == UnresolvedPolicy::ReportError)
error(Twine("symbol exported via --export not found: ") + name);
else if (config->unresolvedSymbols == UnresolvedPolicy::Warn)
warn(Twine("symbol exported via --export not found: ") + name);
}
if (config->isPic) {
log("-- combineOutputSegments");
combineOutputSegments();
}
log("-- createSyntheticSectionsPostLayout");
createSyntheticSectionsPostLayout();
log("-- populateProducers");
populateProducers();
log("-- calculateImports");
calculateImports();
log("-- scanRelocations");
scanRelocations();
log("-- finalizeIndirectFunctionTable");
finalizeIndirectFunctionTable();
log("-- createSyntheticInitFunctions");
createSyntheticInitFunctions();
log("-- assignIndexes");
assignIndexes();
log("-- calculateInitFunctions");
calculateInitFunctions();
if (!config->relocatable) {
// Create linker synthesized functions
if (WasmSym::applyDataRelocs)
createApplyDataRelocationsFunction();
if (WasmSym::applyGlobalRelocs)
createApplyGlobalRelocationsFunction();
if (WasmSym::initMemory)
createInitMemoryFunction();
createStartFunction();
createCallCtorsFunction();
// Create export wrappers for commands if needed.
//
// If the input contains a call to `__wasm_call_ctors`, either in one of
// the input objects or an explicit export from the command-line, we
// assume ctors and dtors are taken care of already.
if (!config->relocatable && !config->isPic &&
!WasmSym::callCtors->isUsedInRegularObj &&
!WasmSym::callCtors->isExported()) {
log("-- createCommandExportWrappers");
createCommandExportWrappers();
}
}
if (WasmSym::initTLS && WasmSym::initTLS->isLive())
[WebAssembly] Implement thread-local storage (local-exec model) Summary: Thread local variables are placed inside a `.tdata` segment. Their symbols are offsets from the start of the segment. The address of a thread local variable is computed as `__tls_base` + the offset from the start of the segment. `.tdata` segment is a passive segment and `memory.init` is used once per thread to initialize the thread local storage. `__tls_base` is a wasm global. Since each thread has its own wasm instance, it is effectively thread local. Currently, `__tls_base` must be initialized at thread startup, and so cannot be used with dynamic libraries. `__tls_base` is to be initialized with a new linker-synthesized function, `__wasm_init_tls`, which takes as an argument a block of memory to use as the storage for thread locals. It then initializes the block of memory and sets `__tls_base`. As `__wasm_init_tls` will handle the memory initialization, the memory does not have to be zeroed. To help allocating memory for thread-local storage, a new compiler intrinsic is introduced: `__builtin_wasm_tls_size()`. This instrinsic function returns the size of the thread-local storage for the current function. The expected usage is to run something like the following upon thread startup: __wasm_init_tls(malloc(__builtin_wasm_tls_size())); Reviewers: tlively, aheejin, kripken, sbc100 Subscribers: dschuff, jgravelle-google, hiraditya, sunfish, jfb, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D64537 llvm-svn: 366272
2019-07-17 06:00:45 +08:00
createInitTLSFunction();
if (errorCount())
return;
log("-- calculateTypes");
calculateTypes();
log("-- calculateExports");
calculateExports();
log("-- calculateCustomSections");
calculateCustomSections();
log("-- populateSymtab");
populateSymtab();
log("-- populateTargetFeatures");
populateTargetFeatures();
log("-- addSections");
addSections();
if (errorHandler().verbose) {
log("Defined Functions: " + Twine(out.functionSec->inputFunctions.size()));
log("Defined Globals : " + Twine(out.globalSec->numGlobals()));
log("Defined Events : " + Twine(out.eventSec->inputEvents.size()));
log("Defined Tables : " + Twine(out.tableSec->inputTables.size()));
log("Function Imports : " +
Twine(out.importSec->getNumImportedFunctions()));
log("Global Imports : " + Twine(out.importSec->getNumImportedGlobals()));
log("Event Imports : " + Twine(out.importSec->getNumImportedEvents()));
log("Table Imports : " + Twine(out.importSec->getNumImportedTables()));
for (ObjFile *file : symtab->objectFiles)
file->dumpInfo();
}
createHeader();
log("-- finalizeSections");
finalizeSections();
log("-- writeMapFile");
writeMapFile(outputSections);
log("-- openFile");
openFile();
if (errorCount())
return;
writeHeader();
log("-- writeSections");
writeSections();
if (errorCount())
return;
if (Error e = buffer->commit())
fatal("failed to write the output file: " + toString(std::move(e)));
}
// Open a result file.
void Writer::openFile() {
log("writing: " + config->outputFile);
Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
FileOutputBuffer::create(config->outputFile, fileSize,
FileOutputBuffer::F_executable);
if (!bufferOrErr)
error("failed to open " + config->outputFile + ": " +
toString(bufferOrErr.takeError()));
else
buffer = std::move(*bufferOrErr);
}
void Writer::createHeader() {
raw_string_ostream os(header);
writeBytes(os, WasmMagic, sizeof(WasmMagic), "wasm magic");
writeU32(os, WasmVersion, "wasm version");
os.flush();
fileSize += header.size();
}
void writeResult() { Writer().run(); }
} // namespace wasm
} // namespace lld