Merge branch 'release-6.3-pre-format' into master-format

This merges release-6.3 branch right before it was fully formatted.
There were quite a few conflicts that are resolved here. CoroFlow had
a check for OOM errors introduced in 6.3, but didn't seem applicable in
the new implmentation which seems to use boost.
This commit is contained in:
Vishesh Yadav 2021-03-09 21:08:58 -08:00
commit 2bb4f2e59f
23 changed files with 208 additions and 38 deletions

View File

@ -220,11 +220,18 @@ func (o NetworkOptions) SetExternalClientDirectory(param string) error {
return o.setOpt(63, []byte(param))
}
// Prevents connections through the local client, allowing only connections through externally loaded client libraries. Intended primarily for testing.
// Prevents connections through the local client, allowing only connections through externally loaded client libraries.
func (o NetworkOptions) SetDisableLocalClient() error {
return o.setOpt(64, nil)
}
// Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client.
//
// Parameter: Number of client threads to be spawned. Each cluster will be serviced by a single client thread.
func (o NetworkOptions) SetClientThreadsPerVersion(param int64) error {
return o.setOpt(65, int64ToBytes(param))
}
// Disables logging of client statistics, such as sampled transaction activity.
func (o NetworkOptions) SetDisableClientStatisticsLogging() error {
return o.setOpt(70, nil)
@ -521,6 +528,25 @@ func (o TransactionOptions) SetReportConflictingKeys() error {
return o.setOpt(712, nil)
}
// By default, the special key space will only allow users to read from exactly one module (a subspace in the special key space). Use this option to allow reading from zero or more modules. Users who set this option should be prepared for new modules, which may have different behaviors than the modules they're currently reading. For example, a new module might block or return an error.
func (o TransactionOptions) SetSpecialKeySpaceRelaxed() error {
return o.setOpt(713, nil)
}
// Adds a tag to the transaction that can be used to apply manual targeted throttling. At most 5 tags can be set on a transaction.
//
// Parameter: String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters.
func (o TransactionOptions) SetTag(param string) error {
return o.setOpt(800, []byte(param))
}
// Adds a tag to the transaction that can be used to apply manual or automatic targeted throttling. At most 5 tags can be set on a transaction.
//
// Parameter: String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters.
func (o TransactionOptions) SetAutoThrottleTag(param string) error {
return o.setOpt(801, []byte(param))
}
type StreamingMode int
const (

View File

@ -2,6 +2,28 @@
Release Notes
#############
6.3.11
======
* Added a hint field in the trace event when all replicas of some data are lost. `(PR #4209) <https://github.com/apple/foundationdb/pull/4209>`_
* Rewrote SQLite injected fault handling. `(PR #4212) <https://github.com/apple/foundationdb/pull/4212>`_
* Add a SevWarnAlways trace line to help debug a rare failure. `(PR #4214) <https://github.com/apple/foundationdb/pull/4214>`_
* Use VFSAsyncFile::checkInjectedError to detect injected faults. `(PR #4253) <https://github.com/apple/foundationdb/pull/4253>`_
* Build on Windows using VS 2019 + LLVM/Clang. `(PR #4258) <https://github.com/apple/foundationdb/pull/4258>`_
* RateControl support in AFCCached to enable write op throttling. The feature is disabled by default. `(PR #4229) <https://github.com/apple/foundationdb/pull/4229>`_
* Add knobs for prefix bloom filters and larger block cache for RocksDB. `(PR #4201) <https://github.com/apple/foundationdb/pull/4201>`_
* Adding debug tools to FDB runtime image. `(PR #4247) <https://github.com/apple/foundationdb/pull/4247>`_
* Fix bug in simulated coordinator selection. `(PR #4285) <https://github.com/apple/foundationdb/pull/4285>`_
* Add option to prevent synchronous file deletes on reads for RocksDB. `(PR #4270) <https://github.com/apple/foundationdb/pull/4270>`_
* Report warning when TLS verification fails. `(PR #4299) <https://github.com/apple/foundationdb/pull/4299>`_
* Support multiple worker threads for each version of client that is loaded so that each cluster will be serviced by a client thread. `(PR #4269) <https://github.com/apple/foundationdb/pull/4269>`_
* Reboot simulated process on io_timeout error. `(PR #4345) <https://github.com/apple/foundationdb/pull/4345>`_
* Fix Snapshot backup test failure. `(PR #4372) <https://github.com/apple/foundationdb/pull/4372>`_
* fdbcli: Output errors and warnings to stderr. `(PR #4332) <https://github.com/apple/foundationdb/pull/4332>`_
* Do not generate machine id in locality field if it is set by the user. `(PR #4022) <https://github.com/apple/foundationdb/pull/4022>`_
* Make the RocksDB init method idempotent. `(PR #4400) <https://github.com/apple/foundationdb/pull/4400>`_
* Fix bugs turned up by _GLIBCXX_DEBUG. `(PR #4301) <https://github.com/apple/foundationdb/pull/4301>`_
* Add New Unit and Integration Tests, and associated infrastructure. `(PR #4366) <https://github.com/apple/foundationdb/pull/4366>`_
6.3.10
======
* Make fault tolerance metric calculation in HA clusters consistent with 6.2 branch. `(PR #4175) <https://github.com/apple/foundationdb/pull/4175>`_
@ -88,7 +110,7 @@ Status
* Removed fields ``worst_version_lag_storage_server`` and ``limiting_version_lag_storage_server`` from the ``cluster.qos`` section. The ``worst_data_lag_storage_server`` and ``limiting_data_lag_storage_server`` objects can be used instead. `(PR #3196) <https://github.com/apple/foundationdb/pull/3196>`_
* If a process is unable to flush trace logs to disk, the problem will now be reported via the output of ``status`` command inside ``fdbcli``. `(PR #2605) <https://github.com/apple/foundationdb/pull/2605>`_ `(PR #2820) <https://github.com/apple/foundationdb/pull/2820>`_
* When a configuration key is changed, it will always be included in ``status json`` output, even the value is reverted back to the default value. [6.3.5] `(PR #3610) <https://github.com/apple/foundationdb/pull/3610>`_
* Added transactions.rejected_for_queued_too_long for bookkeeping the number of transactions rejected by commit proxy because its queuing time exceeds MVCC window. `(PR #4353) <https://github.com/apple/foundationdb/pull/4353>`_
* Added transactions.rejected_for_queued_too_long for bookkeeping the number of transactions rejected by commit proxy because its queuing time exceeds MVCC window.[6.3.11] `(PR #4353) <https://github.com/apple/foundationdb/pull/4353>`_
Bindings
--------
@ -140,6 +162,7 @@ Fixes from previous versions
* The 6.3.5 patch release includes all fixes from the patch releases 6.2.24 and 6.2.25. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
* The 6.3.9 patch release includes all fixes from the patch releases 6.2.26. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
* The 6.3.10 patch release includes all fixes from the patch releases 6.2.27-6.2.29 :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
* The 6.3.11 patch release includes all fixes from the patch releases 6.2.30-6.2.32 :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
Fixes only impacting 6.3.0+
---------------------------

View File

@ -771,7 +771,7 @@ void MultiVersionTransaction::reset() {
// MultiVersionDatabase
MultiVersionDatabase::MultiVersionDatabase(MultiVersionApi* api, int threadIdx, std::string clusterFilePath,
Reference<IDatabase> db, bool openConnectors)
: dbState(new DatabaseState()), threadIdx(threadIdx) {
: dbState(new DatabaseState()) {
dbState->db = db;
dbState->dbVar->set(db);

View File

@ -397,7 +397,6 @@ private:
};
const Reference<DatabaseState> dbState;
const int threadIdx;
friend class MultiVersionTransaction;
};

View File

@ -57,7 +57,7 @@ const Value keyServersValue( Standalone<RangeResultRef> result, const std::vecto
std::vector<Tag> destTag;
bool foundOldLocality = false;
for (const KeyValueRef kv : result) {
for (const KeyValueRef& kv : result) {
UID uid = decodeServerTagKey(kv.key);
if (std::find(src.begin(), src.end(), uid) != src.end()) {
srcTag.push_back( decodeServerTagValue(kv.value) );
@ -109,7 +109,7 @@ void decodeKeyServersValue( Standalone<RangeResultRef> result, const ValueRef& v
src.clear();
dest.clear();
for (const KeyValueRef kv : result) {
for (const KeyValueRef& kv : result) {
Tag tag = decodeServerTagValue(kv.value);
if (std::find(srcTag.begin(), srcTag.end(), tag) != srcTag.end()) {
src.push_back( decodeServerTagKey(kv.key) );
@ -122,7 +122,7 @@ void decodeKeyServersValue( Standalone<RangeResultRef> result, const ValueRef& v
std::sort(dest.begin(), dest.end());
if(missingIsError && (src.size() != srcTag.size() || dest.size() != destTag.size())) {
TraceEvent(SevError, "AttemptedToDecodeMissingTag");
for (const KeyValueRef kv : result) {
for (const KeyValueRef& kv : result) {
Tag tag = decodeServerTagValue(kv.value);
UID serverID = decodeServerTagKey(kv.key);
TraceEvent("TagUIDMap").detail("Tag", tag.toString()).detail("UID", serverID.toString());

View File

@ -107,6 +107,33 @@ public:
return Void();
}
ACTOR static Future<Void> renameFile(std::string from, std::string to) {
state TaskPriority taskID = g_network->getCurrentTask();
state Promise<Void> p;
state eio_req* r = eio_rename(from.c_str(), to.c_str(), 0, eio_callback, &p);
try {
wait(p.getFuture());
} catch (...) {
g_network->setCurrentTask(taskID);
eio_cancel(r);
throw;
}
try {
state int result = r->result;
if(result == -1) {
TraceEvent(SevError, "FileRenameError").detail("Errno", r->errorno);
throw internal_error();
} else {
wait(delay(0, taskID));
return Void();
}
} catch (Error& e) {
state Error _e = e;
wait(delay(0, taskID));
throw _e;
}
}
ACTOR static Future<std::time_t> lastWriteTime( std::string filename ) {
EIO_STRUCT_STAT statdata = wait(stat_impl(filename));
return statdata.st_mtime;

View File

@ -128,6 +128,12 @@ public:
return result.getFuture();
}
static Future<Void> renameFile(std::string const& from, std::string const& to) {
::renameFile(from, to);
return Void();
}
Future<Void> write(void const* data, int length, int64_t offset) override {
/*
FIXME

View File

@ -132,3 +132,55 @@ TEST_CASE("/fileio/incrementalDelete" ) {
wait(IAsyncFileSystem::filesystem()->incrementalDeleteFile(filename, true));
return Void();
}
TEST_CASE("/fileio/rename") {
// create a file
state int64_t fileSize = 100e6;
state std::string filename = "/tmp/__JUNK__." + deterministicRandom()->randomUniqueID().toString();
state std::string renamedFile = "/tmp/__RENAMED_JUNK__." + deterministicRandom()->randomUniqueID().toString();
state std::unique_ptr<char[]> data(new char[4096]);
state std::unique_ptr<char[]> readData(new char[4096]);
state Reference<IAsyncFile> f = wait(IAsyncFileSystem::filesystem()->open(
filename, IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_READWRITE,
0644));
;
wait(f->sync());
wait(f->truncate(fileSize));
memset(data.get(), 0, 4096);
// write a random string at the beginning of the file which we can verify after rename
for (int i = 0; i < 16; ++i) {
data[i] = deterministicRandom()->randomAlphaNumeric();
}
// write first and block
wait(f->write(data.get(), 4096, 0));
wait(f->write(data.get(), 4096, fileSize - 4096));
wait(f->sync());
// close file
f.clear();
wait(IAsyncFileSystem::filesystem()->renameFile(filename, renamedFile));
Reference<IAsyncFile> _f = wait(IAsyncFileSystem::filesystem()->open(renamedFile, IAsyncFile::OPEN_READONLY, 0));
f = _f;
// verify rename happened
bool renamedExists = false;
auto bName = basename(renamedFile);
auto files = platform::listFiles("/tmp/");
for (const auto& file : files) {
if (file == bName) {
renamedExists = true;
}
ASSERT(file != filename);
}
ASSERT(renamedExists);
// verify magic string at beginning of file
int length = wait(f->read(readData.get(), 4096, 0));
ASSERT(length == 4096);
ASSERT(memcmp(readData.get(), data.get(), 4096) == 0);
// close the file
f.clear();
// clean up
wait(IAsyncFileSystem::filesystem()->deleteFile(renamedFile, true));
return Void();
}

View File

@ -20,6 +20,7 @@
#ifndef FLOW_IASYNCFILE_H
#define FLOW_IASYNCFILE_H
#include <string>
#pragma once
#include <ctime>
@ -98,6 +99,9 @@ public:
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
virtual Future<Void> deleteFile(const std::string& filename, bool mustBeDurable) = 0;
// renames the file, doesn't sync the directory
virtual Future<Void> renameFile(std::string const& from, std::string const& to) = 0;
// Unlinks a file and then deletes it slowly by truncating the file repeatedly.
// If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
virtual Future<Void> incrementalDeleteFile(const std::string& filename, bool mustBeDurable);

View File

@ -112,6 +112,10 @@ Net2FileSystem::Net2FileSystem(double ioTimeout, const std::string& fileSystemPa
#endif
}
Future<Void> Net2FileSystem::renameFile(const std::string &from, const std::string &to) {
return Net2AsyncFile::renameFile(from, to);
}
void Net2FileSystem::stop() {
Net2AsyncFile::stop();
}

View File

@ -20,6 +20,7 @@
#ifndef FLOW_NET2FILESYSTEM_H
#define FLOW_NET2FILESYSTEM_H
#include <string>
#pragma once
#include "fdbrpc/IAsyncFile.h"
@ -29,12 +30,14 @@ public:
// Opens a file for asynchronous I/O
Future<Reference<class IAsyncFile>> open(const std::string& filename, int64_t flags, int64_t mode) override;
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
Future<Void> deleteFile(const std::string& filename, bool mustBeDurable) override;
// Returns the time of the last modification of the file.
Future<std::time_t> lastWriteTime(const std::string& filename) override;
Future<Void> renameFile(std::string const& from, std::string const& to) override;
//void init();
static void stop();

View File

@ -19,9 +19,7 @@
*/
#include <cinttypes>
#include <deque>
#include <memory>
#include <vector>
#include "fdbrpc/simulator.h"
#define BOOST_SYSTEM_NO_LIB
@ -2085,6 +2083,17 @@ Future<Void> Sim2FileSystem::deleteFile(const std::string& filename, bool mustBe
return Sim2::deleteFileImpl(&g_sim2, filename, mustBeDurable);
}
ACTOR Future<Void> renameFileImpl(std::string from, std::string to) {
wait(delay(0.5*deterministicRandom()->random01()));
::renameFile(from, to);
wait(delay(0.5*deterministicRandom()->random01()));
return Void();
}
Future<Void> Sim2FileSystem::renameFile(std::string const& from, std::string const& to) {
return renameFileImpl(from, to);
}
Future<std::time_t> Sim2FileSystem::lastWriteTime(const std::string& filename) {
// TODO: update this map upon file writes.
static std::map<std::string, double> fileWrites;

View File

@ -21,6 +21,7 @@
#ifndef FLOW_SIMULATOR_H
#define FLOW_SIMULATOR_H
#include "flow/ProtocolVersion.h"
#include <string>
#pragma once
#include "flow/flow.h"
@ -386,11 +387,13 @@ public:
// Opens a file for asynchronous I/O
Future<Reference<class IAsyncFile>> open(const std::string& filename, int64_t flags, int64_t mode) override;
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
// Deletes the given file. If mustBeDurable, returns only when the file is guaranteed to be deleted even after a power failure.
Future<Void> deleteFile(const std::string& filename, bool mustBeDurable) override;
Future<std::time_t> lastWriteTime(const std::string& filename) override;
Future<Void> renameFile(std::string const& from, std::string const& to) override;
Sim2FileSystem() {}
~Sim2FileSystem() override {}

View File

@ -121,7 +121,7 @@ std::map<std::tuple<LogEpoch, Version, int>, std::map<Tag, Version>> BackupProgr
}
}
for (const Tag tag : tags) { // tags without progress data
for (const Tag& tag : tags) { // tags without progress data
tagVersions.insert({ tag, adjustedBeginVersion });
TraceEvent("BackupVersionRange", dbgid)
.detail("OldEpoch", epoch)

View File

@ -508,7 +508,7 @@ ACTOR Future<Void> setBackupKeys(BackupData* self, std::map<UID, Version> savedL
state std::vector<Future<Optional<Version>>> prevVersions;
state std::vector<BackupConfig> versionConfigs;
state std::vector<Future<Optional<bool>>> allWorkersReady;
for (const auto [uid, version] : savedLogVersions) {
for (const auto& [uid, version] : savedLogVersions) {
versionConfigs.emplace_back(uid);
prevVersions.push_back(versionConfigs.back().latestBackupWorkerSavedVersion().get(tr));
allWorkersReady.push_back(versionConfigs.back().allWorkerStarted().get(tr));
@ -573,7 +573,7 @@ ACTOR Future<Void> monitorBackupProgress(BackupData* self) {
if (self->recruitedEpoch == self->oldestBackupEpoch) {
// update update progress so far if previous epochs are done
Version v = std::numeric_limits<Version>::max();
for (const auto [tag, version] : tagVersions) {
for (const auto& [tag, version] : tagVersions) {
v = std::min(v, version);
}
savedLogVersions.emplace(uid, v);
@ -783,7 +783,7 @@ ACTOR Future<Void> saveMutationsToFile(BackupData* self, Version popVersion, int
.detail("TagId", self->tag.id)
.detail("File", file->getFileName());
}
for (const UID uid : activeUids) {
for (const UID& uid : activeUids) {
self->backups[uid].lastSavedVersion = popVersion + 1;
}

View File

@ -200,7 +200,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
ACTOR Future<Void> stopOnError( WorkPool* w ) {
try {
wait( w->getError() );
wait(w->getError());
ASSERT(false);
} catch (Error& e) {
w->stop(e);
@ -221,7 +221,7 @@ public:
}
Future<Void> getError() const override { return pool->anyError.getResult(); }
void addThread(IThreadPoolReceiver* userData) override {
void addThread(IThreadPoolReceiver* userData, const char*) override {
checkError();
auto w = new Worker(pool.getPtr(), userData);

View File

@ -2137,7 +2137,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
.detail("Primary", primary)
.detail("AddedTeams", addedTeams)
.detail("TeamsToBuild", teamsToBuild)
.detail("CurrentTeams", teams.size())
.detail("CurrentServerTeams", teams.size())
.detail("DesiredTeams", desiredTeams)
.detail("MaxTeams", maxTeams)
.detail("StorageTeamSize", configuration.storageTeamSize)
@ -2705,7 +2705,9 @@ ACTOR Future<Void> updateServerMetrics( TCServerInfo *server ) {
}
} else if ( server->serverMetrics.get().versionLag > SERVER_KNOBS->DD_SS_FAILURE_VERSIONLAG ) {
if (server->ssVersionTooFarBehind.get() == false) {
TraceEvent("SSVersionDiffLarge", server->collection->distributorId).detail("ServerId", server->id.toString()).detail("VersionLag", server->serverMetrics.get().versionLag);
TraceEvent(SevWarn, "SSVersionDiffLarge", server->collection->distributorId)
.detail("ServerId", server->id.toString())
.detail("VersionLag", server->serverMetrics.get().versionLag);
server->ssVersionTooFarBehind.set(true);
server->collection->addLaggingStorageServer(server->lastKnownInterface.locality.zoneId().get());
}

View File

@ -367,9 +367,9 @@ struct RocksDBKeyValueStore : IKeyValueStore {
{
writeThread = createGenericThreadPool();
readThreads = createGenericThreadPool();
writeThread->addThread(new Writer(db, id));
writeThread->addThread(new Writer(db, id), "fdb-rocksdb-wr");
for (unsigned i = 0; i < SERVER_KNOBS->ROCKSDB_READ_PARALLELISM; ++i) {
readThreads->addThread(new Reader(db));
readThreads->addThread(new Reader(db), "fdb-rocksdb-re");
}
}

View File

@ -365,7 +365,8 @@ ACTOR Future<bool> getTeamCollectionValid(Database cx, WorkerInterface dataDistr
TraceEvent("GetTeamCollectionValid").detail("Stage", "GotString");
state int64_t currentTeams = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentTeams"));
state int64_t currentTeams =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentServerTeams"));
state int64_t desiredTeams = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredTeams"));
state int64_t maxTeams = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxTeams"));
state int64_t currentMachineTeams = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentMachineTeams"));
@ -411,7 +412,7 @@ ACTOR Future<bool> getTeamCollectionValid(Database cx, WorkerInterface dataDistr
// TODO: Remove the constraint SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER == 3 to ensure that
// the minimun team number per server (and per machine) is always > 0 for any number of replicas
TraceEvent("GetTeamCollectionValid")
.detail("CurrentTeams", currentTeams)
.detail("CurrentServerTeams", currentTeams)
.detail("DesiredTeams", desiredTeams)
.detail("MaxTeams", maxTeams)
.detail("CurrentHealthyMachineTeams", healthyMachineTeams)

View File

@ -90,14 +90,15 @@ public:
ReferenceCounted<ThreadPool>::delref();
return Void();
}
Future<Void> getError() const override { return Never(); } // FIXME
void addref() override { ReferenceCounted<ThreadPool>::addref(); }
void delref() override {
if (ReferenceCounted<ThreadPool>::delref_no_destroy()) stop();
}
void addThread(IThreadPoolReceiver* userData) override {
void addThread(IThreadPoolReceiver* userData, const char* name) override {
threads.push_back(new Thread(this, userData));
startThread(start, threads.back(), stackSize);
startThread(start, threads.back(), stackSize, name);
}
void post(PThreadAction action) override { ios.post(ActionWrapper(action)); }
};

View File

@ -22,6 +22,8 @@
#define FLOW_ITHREADPOOL_H
#pragma once
#include <string_view>
#include "flow/flow.h"
// The IThreadPool interface represents a thread pool suitable for doing blocking disk-intensive work
@ -47,7 +49,7 @@ public:
virtual void init() = 0;
};
struct ThreadAction {
struct ThreadAction {
virtual void operator()(IThreadPoolReceiver*) = 0; // self-destructs
virtual void cancel() = 0;
virtual double getTimeEstimate() const = 0; // for simulation
@ -58,7 +60,7 @@ class IThreadPool {
public:
virtual ~IThreadPool() {}
virtual Future<Void> getError() const = 0; // asynchronously throws an error if there is an internal error
virtual void addThread( IThreadPoolReceiver* userData ) = 0;
virtual void addThread(IThreadPoolReceiver* userData, const char* name = nullptr) = 0;
virtual void post( PThreadAction action ) = 0;
virtual Future<Void> stop(Error const& e = success()) = 0;
virtual bool isCoro() const { return false; }
@ -112,7 +114,7 @@ public:
~DummyThreadPool() override {}
DummyThreadPool() : thread(nullptr) {}
Future<Void> getError() const override { return errors.getFuture(); }
void addThread(IThreadPoolReceiver* userData) override {
void addThread(IThreadPoolReceiver* userData, const char* name = nullptr) override {
ASSERT( !thread );
thread = userData;
}

View File

@ -1050,9 +1050,8 @@ void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint6
reads = total_transfers_read;
writes = total_transfers_write;
writeSectors = total_blocks_read;
readSectors = total_blocks_write;
readSectors = total_blocks_write;
}
}
dev_t getDeviceId(std::string path) {
@ -2568,11 +2567,11 @@ void setCloseOnExec( int fd ) {
} // namespace platform
#ifdef _WIN32
THREAD_HANDLE startThread(void (*func) (void *), void *arg, int stackSize) {
THREAD_HANDLE startThread(void (*func)(void*), void* arg, int stackSize, const char* name) {
return (void *)_beginthread(func, stackSize, arg);
}
#elif (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__))
THREAD_HANDLE startThread(void *(*func) (void *), void *arg, int stackSize) {
THREAD_HANDLE startThread(void* (*func)(void*), void* arg, int stackSize, const char* name) {
pthread_t t;
pthread_attr_t attr;
@ -2591,6 +2590,13 @@ THREAD_HANDLE startThread(void *(*func) (void *), void *arg, int stackSize) {
pthread_create(&t, &attr, func, arg);
pthread_attr_destroy(&attr);
#if defined(__linux__)
if (name != nullptr) {
// TODO: Should this just truncate?
ASSERT_EQ(pthread_setname_np(t, name), 0);
}
#endif
return t;
}
#else
@ -3331,7 +3337,7 @@ int64_t getNumProfilesCaptured() {
void profileHandler(int sig) {
#ifdef __linux__
if(!profileThread) {
if (!profileThread) {
return;
}
@ -3369,7 +3375,7 @@ void profileHandler(int sig) {
#endif
}
void setProfilingEnabled(int enabled) {
void setProfilingEnabled(int enabled) {
#ifdef __linux__
if(profileThread && enabled && !profilingEnabled && profileRequested) {
profilingEnabled = true;
@ -3381,7 +3387,7 @@ void setProfilingEnabled(int enabled) {
}
#else
// No profiling for other platforms!
#endif
#endif
}
void* checkThread(void *arg) {

View File

@ -144,14 +144,16 @@ inline static T& makeDependent(T& value) { return value; }
#define THREAD_FUNC static void __cdecl
#define THREAD_FUNC_RETURN void
#define THREAD_HANDLE void *
THREAD_HANDLE startThread(void (func) (void *), void *arg, int stackSize = 0);
THREAD_HANDLE startThread(void(func)(void*), void* arg, int stackSize = 0, const char* name = nullptr);
#define THREAD_RETURN return
#elif defined(__unixish__)
#define THREAD_FUNC static void *
#define THREAD_FUNC_RETURN void *
#define THREAD_HANDLE pthread_t
THREAD_HANDLE startThread(void *(func) (void *), void *arg, int stackSize = 0);
#define THREAD_RETURN return nullptr
// The last parameter is an optional name for the thread. It is only supported on Linux and has a
// limit of 16 characters.
THREAD_HANDLE startThread(void*(func)(void*), void* arg, int stackSize = 0, const char* name = nullptr);
#define THREAD_RETURN return NULL
#else
#error How do I start a new thread on this platform?
#endif