From df39c5a44ef4d6ee3075f55341871b35a60b4d04 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 30 Jun 2021 17:05:04 -0700 Subject: [PATCH 001/142] Implement Disk Throttling Chaos workload. --- fdbclient/ClientWorkerInterface.h | 23 ++++++++++++++++++- fdbrpc/AsyncFileEIO.actor.h | 26 +++++++++++++++++---- fdbrpc/AsyncFileKAIO.actor.h | 14 +++++++++++- fdbrpc/IAsyncFile.h | 4 ++++ fdbrpc/sim2.actor.cpp | 17 +++++++++++++- fdbrpc/simulator.h | 7 ++++-- fdbserver/CMakeLists.txt | 1 + fdbserver/worker.actor.cpp | 14 ++++++++++++ flow/Knobs.cpp | 4 ++++ flow/Knobs.h | 3 +++ flow/network.h | 38 ++++++++++++++++++++++++++++++- tests/CMakeLists.txt | 1 + 12 files changed, 141 insertions(+), 11 deletions(-) diff --git a/fdbclient/ClientWorkerInterface.h b/fdbclient/ClientWorkerInterface.h index cff4172387..b73c43ebd7 100644 --- a/fdbclient/ClientWorkerInterface.h +++ b/fdbclient/ClientWorkerInterface.h @@ -31,8 +31,10 @@ // A ClientWorkerInterface is embedded as the first element of a WorkerInterface. struct ClientWorkerInterface { constexpr static FileIdentifier file_identifier = 12418152; + RequestStream reboot; RequestStream profiler; + RequestStream setFailureInjection; bool operator==(ClientWorkerInterface const& r) const { return id() == r.id(); } bool operator!=(ClientWorkerInterface const& r) const { return id() != r.id(); } @@ -43,7 +45,7 @@ struct ClientWorkerInterface { template void serialize(Ar& ar) { - serializer(ar, reboot, profiler); + serializer(ar, reboot, profiler, setFailureInjection); } }; @@ -88,4 +90,23 @@ struct ProfilerRequest { } }; +struct SetFailureInjection { + constexpr static FileIdentifier file_identifier = 15439864; + ReplyPromise reply; + struct ThrottleDiskCommand { + double time; + Optional address; // TODO: NEELAM: how do we identify the machine + + template + void serialize(Ar& ar) { + serializer(ar, time, address); + } + }; + Optional throttleDisk; + + template + void serialize(Ar& ar) { + serializer(ar, reply, throttleDisk); + } +}; #endif diff --git a/fdbrpc/AsyncFileEIO.actor.h b/fdbrpc/AsyncFileEIO.actor.h index 44fe6448db..c962e60098 100644 --- a/fdbrpc/AsyncFileEIO.actor.h +++ b/fdbrpc/AsyncFileEIO.actor.h @@ -162,14 +162,16 @@ public: Future read(void* data, int length, int64_t offset) override { ++countFileLogicalReads; ++countLogicalReads; - return read_impl(fd, data, length, offset); + double throttleFor = diskFailureInjector->getDiskDelay(); + return read_impl(fd, data, length, offset, throttleFor); } Future write(void const* data, int length, int64_t offset) override // Copies data synchronously { ++countFileLogicalWrites; ++countLogicalWrites; + double throttleFor = diskFailureInjector->getDiskDelay(); // Standalone copy = StringRef((const uint8_t*)data, length); - return write_impl(fd, err, StringRef((const uint8_t*)data, length), offset); + return write_impl(fd, err, StringRef((const uint8_t*)data, length), offset, throttleFor); } Future truncate(int64_t size) override { ++countFileLogicalWrites; @@ -270,6 +272,7 @@ private: int fd, flags; Reference err; std::string filename; + //DiskFailureInjector* diskFailureInjector; mutable Int64MetricHandle countFileLogicalWrites; mutable Int64MetricHandle countFileLogicalReads; @@ -277,7 +280,8 @@ private: mutable Int64MetricHandle countLogicalReads; AsyncFileEIO(int fd, int flags, std::string const& filename) - : fd(fd), flags(flags), filename(filename), err(new ErrorInfo) { + : fd(fd), flags(flags), filename(filename), err(new ErrorInfo), + diskFailureInjector(DiskFailureInjector::injector()) { if (!g_network->isSimulated()) { countFileLogicalWrites.init(LiteralStringRef("AsyncFile.CountFileLogicalWrites"), filename); countFileLogicalReads.init(LiteralStringRef("AsyncFile.CountFileLogicalReads"), filename); @@ -329,13 +333,18 @@ private: TraceEvent("AsyncFileClosed").suppressFor(1.0).detail("Fd", fd); } - ACTOR static Future read_impl(int fd, void* data, int length, int64_t offset) { + ACTOR static Future read_impl(int fd, void* data, int length, int64_t offset, double throttleFor) { state TaskPriority taskID = g_network->getCurrentTask(); state Promise p; // fprintf(stderr, "eio_read (fd=%d length=%d offset=%lld)\n", fd, length, offset); state eio_req* r = eio_read(fd, data, length, offset, 0, eio_callback, &p); try { wait(p.getFuture()); + // throttleDisk if enabled + //double throttleFor = diskFailureInjector->getDiskDelay(); + if (throttleFor > 0.0) { + wait(delay(throttleFor)); + } } catch (...) { g_network->setCurrentTask(taskID); eio_cancel(r); @@ -358,12 +367,17 @@ private: } } - ACTOR static Future write_impl(int fd, Reference err, StringRef data, int64_t offset) { + ACTOR static Future write_impl(int fd, Reference err, StringRef data, int64_t offset, double throttleFor) { state TaskPriority taskID = g_network->getCurrentTask(); state Promise p; state eio_req* r = eio_write(fd, (void*)data.begin(), data.size(), offset, 0, eio_callback, &p); try { wait(p.getFuture()); + // throttleDisk if enabled + //double throttleFor = diskFailureInjector->getDiskDelay(); + if (throttleFor > 0.0) { + wait(delay(throttleFor)); + } } catch (...) { g_network->setCurrentTask(taskID); eio_cancel(r); @@ -553,6 +567,8 @@ private: static void apple_fsync(eio_req* req) { req->result = fcntl(req->int1, F_FULLFSYNC, 0); } static void free_req(eio_req* req) { free(req); } #endif +public: + DiskFailureInjector* diskFailureInjector; }; #ifdef FILESYSTEM_IMPL diff --git a/fdbrpc/AsyncFileKAIO.actor.h b/fdbrpc/AsyncFileKAIO.actor.h index 5e6592e6ba..15553a85e2 100644 --- a/fdbrpc/AsyncFileKAIO.actor.h +++ b/fdbrpc/AsyncFileKAIO.actor.h @@ -195,7 +195,10 @@ public: void addref() override { ReferenceCounted::addref(); } void delref() override { ReferenceCounted::delref(); } - + ACTOR static void throttleDisk(double throttleFor) { + if (throttleFor > 0.0) + wait(delay(throttleFor)); + } Future read(void* data, int length, int64_t offset) override { ++countFileLogicalReads; ++countLogicalReads; @@ -213,6 +216,9 @@ public: enqueue(io, "read", this); Future result = io->result.getFuture(); + // throttleDisk if enabled + throttleDisk(diskFailureInjector->getDiskDelay()); + #if KAIO_LOGGING // result = map(result, [=](int r) mutable { KAIOLogBlockEvent(io, OpLogEntry::READY, r); return r; }); #endif @@ -238,6 +244,9 @@ public: enqueue(io, "write", this); Future result = io->result.getFuture(); + // throttleDisk if enabled + throttleDisk(diskFailureInjector->getDiskDelay()); + #if KAIO_LOGGING // result = map(result, [=](int r) mutable { KAIOLogBlockEvent(io, OpLogEntry::READY, r); return r; }); #endif @@ -749,6 +758,9 @@ private: } } } + +public: + DiskFailureInjector* diskFailureInjector; }; #if KAIO_LOGGING diff --git a/fdbrpc/IAsyncFile.h b/fdbrpc/IAsyncFile.h index ed703514c6..f21760cb00 100644 --- a/fdbrpc/IAsyncFile.h +++ b/fdbrpc/IAsyncFile.h @@ -34,6 +34,7 @@ // must complete or cancel, but you should probably look at the file implementations you'll be using. class IAsyncFile { public: + //explicit IAsyncFile() : diskFailureInjector(DiskFailureInjector::injector()) {} virtual ~IAsyncFile(); // Pass these to g_network->open to get an IAsyncFile enum { @@ -95,6 +96,9 @@ public: // Used for rate control, at present, only AsyncFileCached supports it virtual Reference const& getRateControl() { throw unsupported_operation(); } virtual void setRateControl(Reference const& rc) { throw unsupported_operation(); } + +//public: + //DiskFailureInjector* diskFailureInjector; }; typedef void (*runCycleFuncPtr)(); diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index ee735b963a..093ef389ac 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -1949,6 +1949,13 @@ public: void clogPair(const IPAddress& from, const IPAddress& to, double seconds) override { g_clogging.clogPairFor(from, to, seconds); } + void throttleDisk(ProcessInfo* machine, double seconds) override { + machine->throttleDiskFor = seconds; + TraceEvent("ThrottleDisk").detail("Delay", seconds). + detail("Roles", getRoles(machine->address)). + detail("Address", machine->address). + detail("StartingClass", machine->startingClass.toString()); + } std::vector getAllProcesses() const override { std::vector processes; for (auto& c : machines) { @@ -2390,11 +2397,19 @@ Future waitUntilDiskReady(Reference diskParameters, int64_ diskParameters->nextOperation += (1.0 / diskParameters->iops) + (size / diskParameters->bandwidth); double randomLatency; - if (sync) { + if (g_simulator.getCurrentProcess()->throttleDiskFor) { + randomLatency = g_simulator.getCurrentProcess()->throttleDiskFor; + TraceEvent("WaitUntilDiskReadyThrottling") + .detail("Delay", randomLatency); + } else if (sync) { randomLatency = .005 + deterministicRandom()->random01() * (BUGGIFY ? 1.0 : .010); } else randomLatency = 10 * deterministicRandom()->random01() / diskParameters->iops; + TraceEvent("WaitUntilDiskReady").detail("Delay", randomLatency). + detail("Roles", g_simulator.getRoles(g_simulator.getCurrentProcess()->address)). + detail("Address", g_simulator.getCurrentProcess()->address). + detail("ThrottleDiskFor", g_simulator.getCurrentProcess()->throttleDiskFor); return delayUntil(diskParameters->nextOperation + randomLatency); } diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 6404eafc17..1da850e48c 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -87,6 +87,7 @@ public: uint64_t fault_injection_r; double fault_injection_p1, fault_injection_p2; bool failedDisk; + double throttleDiskFor; UID uid; @@ -102,7 +103,7 @@ public: : name(name), locality(locality), startingClass(startingClass), addresses(addresses), address(addresses.address), dataFolder(dataFolder), network(net), coordinationFolder(coordinationFolder), failed(false), excluded(false), rebooting(false), fault_injection_p1(0), fault_injection_p2(0), - fault_injection_r(0), machine(0), cleared(false), failedDisk(false) { + fault_injection_r(0), machine(0), cleared(false), failedDisk(false), throttleDiskFor(0) { uid = deterministicRandom()->randomUniqueID(); } @@ -374,6 +375,7 @@ public: virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) = 0; virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) = 0; + virtual void throttleDisk(ProcessInfo* machine, double seconds) = 0; virtual std::vector getAllProcesses() const = 0; virtual ProcessInfo* getProcessByAddress(NetworkAddress const& address) = 0; virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) = 0; @@ -462,8 +464,9 @@ struct DiskParameters : ReferenceCounted { double nextOperation; int64_t iops; int64_t bandwidth; + double throttleFor; - DiskParameters(int64_t iops, int64_t bandwidth) : nextOperation(0), iops(iops), bandwidth(bandwidth) {} + DiskParameters(int64_t iops, int64_t bandwidth) : nextOperation(0), iops(iops), bandwidth(bandwidth), throttleFor(0) {} }; // Simulates delays for performing operations on disk diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index 0f7d5dc860..efa2c7fbf1 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -158,6 +158,7 @@ set(FDBSERVER_SRCS workloads/ChangeConfig.actor.cpp workloads/ClientTransactionProfileCorrectness.actor.cpp workloads/TriggerRecovery.actor.cpp + workloads/DiskThrottling.actor.cpp workloads/SuspendProcesses.actor.cpp workloads/CommitBugCheck.actor.cpp workloads/ConfigureDatabase.actor.cpp diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index ad91d4dd34..dd6ee5e39d 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1209,6 +1209,10 @@ ACTOR Future workerServer(Reference connFile, state Reference>> issues(new AsyncVar>()); + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { + TraceEvent(SevWarnAlways, "ChaosFeaturesEnabled"); + } + folder = abspath(folder); if (metricsPrefix.size() > 0) { @@ -1509,6 +1513,16 @@ ACTOR Future workerServer(Reference connFile, flushAndExit(0); } } + when(SetFailureInjection req = waitNext(interf.clientInterface.setFailureInjection.getFuture())) { + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { + if (req.throttleDisk.present()) { + DiskFailureInjector::injector()->throttleFor(req.throttleDisk.get().time); + } + req.reply.send(Void()); + } else { + req.reply.sendError(client_invalid_operation()); + } + } when(ProfilerRequest req = waitNext(interf.clientInterface.profiler.getFuture())) { state ProfilerRequest profilerReq = req; // There really isn't a great "filepath sanitizer" or "filepath escape" function available, diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 7ceeb95801..1d91a7e8da 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -64,6 +64,10 @@ void FlowKnobs::initialize(Randomize _randomize, IsSimulated _isSimulated) { init( HUGE_ARENA_LOGGING_BYTES, 100e6 ); init( HUGE_ARENA_LOGGING_INTERVAL, 5.0 ); + // Chaos testing + init( ENABLE_CHAOS_FEATURES, false ); + + init( WRITE_TRACING_ENABLED, true ); if( randomize && BUGGIFY ) WRITE_TRACING_ENABLED = false; init( TRACING_UDP_LISTENER_PORT, 8889 ); // Only applicable if TracerType is set to a network option. diff --git a/flow/Knobs.h b/flow/Knobs.h index ef4fdcf2af..340848b68f 100644 --- a/flow/Knobs.h +++ b/flow/Knobs.h @@ -98,6 +98,9 @@ public: double HUGE_ARENA_LOGGING_BYTES; double HUGE_ARENA_LOGGING_INTERVAL; + // Chaos testing + bool ENABLE_CHAOS_FEATURES; + bool WRITE_TRACING_ENABLED; int TRACING_UDP_LISTENER_PORT; diff --git a/flow/network.h b/flow/network.h index 00f430fb86..d174601fec 100644 --- a/flow/network.h +++ b/flow/network.h @@ -486,7 +486,8 @@ public: enNetworkAddressesFunc = 11, enClientFailureMonitor = 12, enSQLiteInjectedError = 13, - enGlobalConfig = 14 + enGlobalConfig = 14, + enFailureInjector = 15 }; virtual void longTaskCheck(const char* name) {} @@ -646,4 +647,39 @@ public: // Returns the interface that should be used to make and accept socket connections }; +struct DiskFailureInjector : FastAllocated { + static DiskFailureInjector* injector() { + auto res = g_network->global(INetwork::enFailureInjector); + if (!res) { + res = new DiskFailureInjector(); + g_network->setGlobal(INetwork::enFailureInjector, res); + } + return static_cast(res); + } + + //double getSendDelay(NetworkAddress const& peer); + //double getReceiveDelay(NetworkAddress const& peer); + + //virtual void throttleFor(double time) = 0; + //virtual double getDiskDelay() = 0; + + void throttleFor(double time) { + throttleUntil = std::max(throttleUntil, timer_monotonic() + time); + } + + double getDiskDelay() { + if (!FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { + return 0.0; + } + return throttleUntil; + } + +private: // members + double throttleUntil = 0.0; + +private: // construction + DiskFailureInjector() = default; + DiskFailureInjector(DiskFailureInjector const&) = delete; +}; + #endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 913b39413b..5a5bf2c208 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -124,6 +124,7 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES fast/ConstrainedRandomSelector.toml) add_fdb_test(TEST_FILES fast/CycleAndLock.toml) add_fdb_test(TEST_FILES fast/CycleTest.toml) + add_fdb_test(TEST_FILES fast/DiskThrottledCycle.toml IGNORE) add_fdb_test(TEST_FILES fast/FuzzApiCorrectness.toml) add_fdb_test(TEST_FILES fast/FuzzApiCorrectnessClean.toml) add_fdb_test(TEST_FILES fast/IncrementalBackup.toml) From 96bde8919f29b0c8720d924b1c1706f5ad5c2af8 Mon Sep 17 00:00:00 2001 From: negoyal Date: Thu, 1 Jul 2021 15:00:19 -0700 Subject: [PATCH 002/142] Adding the Disk throttle workload file that I forgot earlier. --- fdbserver/workloads/DiskThrottling.actor.cpp | 133 +++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 fdbserver/workloads/DiskThrottling.actor.cpp diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskThrottling.actor.cpp new file mode 100644 index 0000000000..1eb4c1639d --- /dev/null +++ b/fdbserver/workloads/DiskThrottling.actor.cpp @@ -0,0 +1,133 @@ +#include "fdbclient/NativeAPI.actor.h" +#include "fdbserver/TesterInterface.actor.h" +#include "fdbserver/workloads/workloads.actor.h" +#include "fdbrpc/simulator.h" +#include "fdbserver/WorkerInterface.actor.h" +#include "fdbserver/ServerDBInfo.h" +#include "fdbserver/QuietDatabase.h" +#include "flow/actorcompiler.h" // This must be the last #include. + +struct DiskThrottlingWorkload : TestWorkload { + bool enabled; + double testDuration; + double throttleFor; + DiskThrottlingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { + enabled = !clientId; // only do this on the "first" client + testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); + throttleFor = getOption(options, LiteralStringRef("throttleDelay"), 2.0); + TraceEvent("DiskThrottlingWorkload").detail("TestDuration", testDuration).detail("For", throttleFor); + } + + std::string description() const override { + if (&g_simulator == g_network) + return "DiskThrottling"; + else + return "NoSimDiskThrolling"; + } + + Future setup(Database const& cx) override { return Void(); } + + Future start(Database const& cx) override { + if (&g_simulator == g_network && enabled) { + TraceEvent("DiskThrottlingStart").detail("For", throttleFor); + return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), + testDuration, + Void()); + } else if (enabled) { + return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), + testDuration, + Void()); + } else + return Void(); + } + + Future check(Database const& cx) override { return true; } + + void getMetrics(vector& m) override {} + + ACTOR void doThrottle(ISimulator::ProcessInfo* machine, double t, double delay = 0.0) { + wait(::delay(delay)); + TraceEvent("ThrottleDisk").detail("For", t); + g_simulator.throttleDisk(machine, t); + TraceEvent("ThrottleDiskSet").detail("For", t); + } + + static void checkDiskThrottleResult(Future res, WorkerInterface worker) { + if (res.isError()) { + auto err = res.getError(); + if (err.code() == error_code_client_invalid_operation) { + TraceEvent(SevError, "ChaosDisabled") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); + } else { + TraceEvent(SevError, "DiskThrottlingFailed") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) + .error(err); + } + } + } + + ACTOR void doThrottle(WorkerInterface worker, double t, double delay = 0.0) { + state Future res; + wait(::delay(delay)); + SetFailureInjection::ThrottleDiskCommand throttleDisk; + throttleDisk.time = t; + SetFailureInjection req; + req.throttleDisk = throttleDisk; + TraceEvent("ThrottleDisk").detail("For", t); + res = worker.clientInterface.setFailureInjection.getReply(req); + wait(ready(res)); + checkDiskThrottleResult(res, worker); + } + + static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { + result->clear(); + *result = g_simulator.getAllProcesses(); + return Void(); + } + + static Future getAllStorageWorkers(Database cx, DiskThrottlingWorkload* self, std::vector* result) { + vector all = g_simulator.getAllProcesses(); + for (int i = 0; i < all.size(); i++) + if (!all[i]->failed && + all[i]->name == std::string("Server") && + ((all[i]->startingClass == ProcessClass::StorageClass) || + (all[i]->startingClass == ProcessClass::UnsetClass))) + result->emplace_back(all[i]); + return Void(); + } + + ACTOR static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { + result->clear(); + std::vector res = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + for (auto& worker : res) { + result->emplace_back(worker.interf); + } + return Void(); + } + + ACTOR static Future getAllStorageWorkers(Database cx, DiskThrottlingWorkload* self, std::vector* result) { + result->clear(); + state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); + for (auto& worker : res) { + result->emplace_back(worker); + } + return Void(); + } + + ACTOR template + Future throttleDiskClient(Database cx, DiskThrottlingWorkload* self) { + state double lastTime = now(); + state double workloadEnd = now() + self->testDuration; + state std::vector machines; + loop { + wait(poisson(&lastTime, 1)); + wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); + //wait(DiskThrottlingWorkload::getAllWorkers(self, &machines)); + auto machine = deterministicRandom()->randomChoice(machines); + TraceEvent("DoThrottleDisk").detail("For", self->throttleFor); + self->doThrottle(machine, self->throttleFor); + } + } +}; +WorkloadFactory DiskThrottlingWorkloadFactory("DiskThrottling"); From 957eceb14cf52c7030277c1026fac0bae9d82b85 Mon Sep 17 00:00:00 2001 From: negoyal Date: Thu, 1 Jul 2021 15:01:13 -0700 Subject: [PATCH 003/142] And the test file. --- tests/fast/DiskThrottledCycle.toml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tests/fast/DiskThrottledCycle.toml diff --git a/tests/fast/DiskThrottledCycle.toml b/tests/fast/DiskThrottledCycle.toml new file mode 100644 index 0000000000..c0f35293aa --- /dev/null +++ b/tests/fast/DiskThrottledCycle.toml @@ -0,0 +1,13 @@ +[[test]] +testTitle = 'DiskThrottledCycle' + + [[test.workload]] + testName = 'Cycle' + transactionsPerSecond = 2500.0 + testDuration = 30.0 + expectedRate = 0 + + [[test.workload]] + testName = 'DiskThrottling' + testDuration = 30.0 + From 2b5a96f745e23d557ce725a068ba5ace0145a2f6 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 7 Jul 2021 23:58:14 -0700 Subject: [PATCH 004/142] Single code path for sim and non-sim modes. --- fdbrpc/AsyncFileNonDurable.actor.h | 43 ++++++++++++++++---- fdbserver/worker.actor.cpp | 1 + fdbserver/workloads/DiskThrottling.actor.cpp | 20 ++++----- flow/Knobs.cpp | 2 +- flow/network.h | 8 ++-- 5 files changed, 50 insertions(+), 24 deletions(-) diff --git a/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/AsyncFileNonDurable.actor.h index f813c1a354..0c63846169 100644 --- a/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/AsyncFileNonDurable.actor.h @@ -31,6 +31,7 @@ #include "flow/flow.h" #include "fdbrpc/IAsyncFile.h" #include "flow/ActorCollection.h" +#include "flow/network.h" #include "fdbrpc/simulator.h" #include "fdbrpc/TraceFileIO.h" #include "fdbrpc/RangeMap.h" @@ -61,7 +62,7 @@ private: Future shutdown; public: - explicit AsyncFileDetachable(Reference file) : file(file) { shutdown = doShutdown(this); } + explicit AsyncFileDetachable(Reference file) : file(file), diskFailureInjector(DiskFailureInjector::injector()) { shutdown = doShutdown(this); } ACTOR Future doShutdown(AsyncFileDetachable* self) { wait(success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture())); @@ -84,12 +85,20 @@ public: Future read(void* data, int length, int64_t offset) override { if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady()) return io_error().asInjectedFault(); + // throttleDisk if enabled + auto throttleFor = diskFailureInjector->getDiskDelay(); + if (throttleFor > 0.0) { + TraceEvent("AsyncFileDetachable_Read").detail("ThrottleDelay", throttleFor); + //wait(delay(throttleFor)); + } return sendErrorOnShutdown(file->read(data, length, offset)); } Future write(void const* data, int length, int64_t offset) override { if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady()) return io_error().asInjectedFault(); + if (diskFailureInjector->getDiskDelay() > 0.0) + TraceEvent("AsyncFileDetachable_Write").detail("ThrottleDelay", diskFailureInjector->getDiskDelay()); return sendErrorOnShutdown(file->write(data, length, offset)); } @@ -121,6 +130,8 @@ public: throw io_error().asInjectedFault(); return file->getFilename(); } +public: + DiskFailureInjector* diskFailureInjector; }; // An async file implementation which wraps another async file and will randomly destroy sectors that it is writing when @@ -190,11 +201,12 @@ private: Reference diskParameters, NetworkAddress openedAddress, bool aio) - : filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters), - openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false), - aio(aio) { + : filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters), + openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false), + aio(aio), diskFailureInjector(DiskFailureInjector::injector()) + { - // This is only designed to work in simulation + // This is only designed to work in simulation ASSERT(g_network->isSimulated()); this->id = deterministicRandom()->randomUniqueID(); @@ -309,7 +321,7 @@ public: // Passes along reads straight to the underlying file, waiting for any outstanding changes that could affect the // results - Future read(void* data, int length, int64_t offset) override { return read(this, data, length, offset); } + Future read(void* data, int length, int64_t offset) override { return read(this, data, length, offset, diskFailureInjector->getDiskDelay()); } // Writes data to the file. Writes are delayed a random amount of time before being // passed to the underlying file @@ -324,7 +336,7 @@ public: Promise writeStarted; Promise> writeEnded; - writeEnded.send(write(this, writeStarted, writeEnded.getFuture(), data, length, offset)); + writeEnded.send(write(this, writeStarted, writeEnded.getFuture(), data, length, offset, diskFailureInjector->getDiskDelay())); return writeStarted.getFuture(); } @@ -432,7 +444,7 @@ private: return readFuture.get(); } - ACTOR Future read(AsyncFileNonDurable* self, void* data, int length, int64_t offset) { + ACTOR Future read(AsyncFileNonDurable* self, void* data, int length, int64_t offset, double throttleFor = 0.0) { state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess(); state TaskPriority currentTaskID = g_network->getCurrentTask(); wait(g_simulator.onMachine(currentProcess)); @@ -441,6 +453,11 @@ private: state int rep = wait(self->onRead(self, data, length, offset)); wait(g_simulator.onProcess(currentProcess, currentTaskID)); + // throttleDisk if enabled + if (throttleFor > 0.0) { + TraceEvent("AsyncFileNonDurable_ReadDone", self->id).detail("ThrottleDelay", throttleFor).detail("Filename", self->filename).detail("ReadLength", length).detail("Offset", offset); + wait(delay(throttleFor)); + } return rep; } catch (Error& e) { state Error err = e; @@ -457,7 +474,8 @@ private: Future> ownFuture, void const* data, int length, - int64_t offset) { + int64_t offset, + double throttleFor = 0.0) { state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess(); state TaskPriority currentTaskID = g_network->getCurrentTask(); wait(g_simulator.onMachine(currentProcess)); @@ -621,6 +639,11 @@ private: } wait(waitForAll(writeFutures)); + // throttleDisk if enabled + if (throttleFor > 0.0) { + TraceEvent("AsyncFileNonDurable_WriteDone", self->id).detail("ThrottleDelay", throttleFor).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset); + wait(delay(throttleFor)); + } //TraceEvent("AsyncFileNonDurable_WriteDone", self->id).detail("Delay", delayDuration).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset); return Void(); } @@ -866,6 +889,8 @@ private: throw err; } } +public: + DiskFailureInjector* diskFailureInjector; }; #include "flow/unactorcompiler.h" diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index dd6ee5e39d..4a99e02265 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1516,6 +1516,7 @@ ACTOR Future workerServer(Reference connFile, when(SetFailureInjection req = waitNext(interf.clientInterface.setFailureInjection.getFuture())) { if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { if (req.throttleDisk.present()) { + TraceEvent("DiskThrottleRequest").detail("Delay", req.throttleDisk.get().time); DiskFailureInjector::injector()->throttleFor(req.throttleDisk.get().time); } req.reply.send(Void()); diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskThrottling.actor.cpp index 1eb4c1639d..61c465ae7f 100644 --- a/fdbserver/workloads/DiskThrottling.actor.cpp +++ b/fdbserver/workloads/DiskThrottling.actor.cpp @@ -28,12 +28,13 @@ struct DiskThrottlingWorkload : TestWorkload { Future setup(Database const& cx) override { return Void(); } Future start(Database const& cx) override { - if (&g_simulator == g_network && enabled) { - TraceEvent("DiskThrottlingStart").detail("For", throttleFor); - return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), - testDuration, - Void()); - } else if (enabled) { + //if (&g_simulator == g_network && enabled) { + // TraceEvent("DiskThrottlingStart").detail("For", throttleFor); + // return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), + // testDuration, + // Void()); + //} else + if (enabled) { return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), testDuration, Void()); @@ -45,7 +46,7 @@ struct DiskThrottlingWorkload : TestWorkload { void getMetrics(vector& m) override {} - ACTOR void doThrottle(ISimulator::ProcessInfo* machine, double t, double delay = 0.0) { + ACTOR void doThrottle_unused(ISimulator::ProcessInfo* machine, double t, double delay = 0.0) { wait(::delay(delay)); TraceEvent("ThrottleDisk").detail("For", t); g_simulator.throttleDisk(machine, t); @@ -79,13 +80,13 @@ struct DiskThrottlingWorkload : TestWorkload { checkDiskThrottleResult(res, worker); } - static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { + static Future getAllWorkers_unused(DiskThrottlingWorkload* self, std::vector* result) { result->clear(); *result = g_simulator.getAllProcesses(); return Void(); } - static Future getAllStorageWorkers(Database cx, DiskThrottlingWorkload* self, std::vector* result) { + static Future getAllStorageWorkers_unused(Database cx, DiskThrottlingWorkload* self, std::vector* result) { vector all = g_simulator.getAllProcesses(); for (int i = 0; i < all.size(); i++) if (!all[i]->failed && @@ -123,7 +124,6 @@ struct DiskThrottlingWorkload : TestWorkload { loop { wait(poisson(&lastTime, 1)); wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); - //wait(DiskThrottlingWorkload::getAllWorkers(self, &machines)); auto machine = deterministicRandom()->randomChoice(machines); TraceEvent("DoThrottleDisk").detail("For", self->throttleFor); self->doThrottle(machine, self->throttleFor); diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 1d91a7e8da..12bc0d70c9 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -65,7 +65,7 @@ void FlowKnobs::initialize(Randomize _randomize, IsSimulated _isSimulated) { init( HUGE_ARENA_LOGGING_INTERVAL, 5.0 ); // Chaos testing - init( ENABLE_CHAOS_FEATURES, false ); + init( ENABLE_CHAOS_FEATURES, true ); init( WRITE_TRACING_ENABLED, true ); if( randomize && BUGGIFY ) WRITE_TRACING_ENABLED = false; diff --git a/flow/network.h b/flow/network.h index d174601fec..651882d23e 100644 --- a/flow/network.h +++ b/flow/network.h @@ -657,25 +657,25 @@ struct DiskFailureInjector : FastAllocated { return static_cast(res); } - //double getSendDelay(NetworkAddress const& peer); - //double getReceiveDelay(NetworkAddress const& peer); - //virtual void throttleFor(double time) = 0; //virtual double getDiskDelay() = 0; void throttleFor(double time) { + TraceEvent("DiskFailureInjectorBefore").detail("ThrottleUntil", throttleUntil); throttleUntil = std::max(throttleUntil, timer_monotonic() + time); + TraceEvent("DiskFailureInjectorAfter").detail("ThrottleUntil", throttleUntil); } double getDiskDelay() { if (!FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { return 0.0; } - return throttleUntil; + return std::max(0.0, throttleUntil - timer_monotonic()); } private: // members double throttleUntil = 0.0; + std::unordered_map throttleDisk; private: // construction DiskFailureInjector() = default; From 1b8b22deccfb940b669bbd65a8884089b2d3bd91 Mon Sep 17 00:00:00 2001 From: negoyal Date: Mon, 12 Jul 2021 17:51:01 -0700 Subject: [PATCH 005/142] Wrapper class to avoid adding overhead to all async disk calls --- fdbclient/ClientWorkerInterface.h | 10 ++- fdbrpc/AsyncFileDelayed.actor.h | 89 ++++++++++++++++++++ fdbrpc/AsyncFileEIO.actor.h | 26 ++---- fdbrpc/AsyncFileKAIO.actor.h | 13 --- fdbrpc/AsyncFileNonDurable.actor.h | 36 ++------ fdbrpc/Net2FileSystem.cpp | 3 + fdbrpc/sim2.actor.cpp | 20 +---- fdbrpc/simulator.h | 7 +- fdbserver/worker.actor.cpp | 10 ++- fdbserver/workloads/DiskThrottling.actor.cpp | 54 ++++-------- flow/network.h | 52 +++++++++--- tests/fast/DiskThrottledCycle.toml | 1 + 12 files changed, 182 insertions(+), 139 deletions(-) create mode 100644 fdbrpc/AsyncFileDelayed.actor.h diff --git a/fdbclient/ClientWorkerInterface.h b/fdbclient/ClientWorkerInterface.h index b73c43ebd7..181017cfcf 100644 --- a/fdbclient/ClientWorkerInterface.h +++ b/fdbclient/ClientWorkerInterface.h @@ -94,12 +94,16 @@ struct SetFailureInjection { constexpr static FileIdentifier file_identifier = 15439864; ReplyPromise reply; struct ThrottleDiskCommand { - double time; - Optional address; // TODO: NEELAM: how do we identify the machine + // how often should the delay be inserted (0 meaning once, 10 meaning every 10 secs) + double delayFrequency; + // min delay to be inserted + double delayMin; + //max delay to be inserted + double delayMax; template void serialize(Ar& ar) { - serializer(ar, time, address); + serializer(ar, delayFrequency, delayMin, delayMax); } }; Optional throttleDisk; diff --git a/fdbrpc/AsyncFileDelayed.actor.h b/fdbrpc/AsyncFileDelayed.actor.h new file mode 100644 index 0000000000..5dfb9c655a --- /dev/null +++ b/fdbrpc/AsyncFileDelayed.actor.h @@ -0,0 +1,89 @@ +/* + * VersionedBTree.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "flow/flow.h" +#include "flow/serialize.h" +#include "flow/genericactors.actor.h" +#include "fdbrpc/IAsyncFile.h" +#include "flow/network.h" +#include "flow/ActorCollection.h" +#include "flow/actorcompiler.h" + + +//template +class AsyncFileDelayed final : public IAsyncFile, public ReferenceCounted { +private: + Reference file; +public: + explicit AsyncFileDelayed(Reference file) : file(file) {} + + void addref() override { ReferenceCounted::addref(); } + void delref() override { ReferenceCounted::delref(); } + + Future read(void* data, int length, int64_t offset) override { + double delay = 0.0; + auto res = g_network->global(INetwork::enFailureInjector); + if (res) + delay = static_cast(res)->getDiskDelay(); + TraceEvent("AsyncFileDelayedRead").detail("ThrottleDelay", delay); + return delayed(file->read(data, length, offset), delay); + } + + Future write(void const* data, int length, int64_t offset) override { + double delay = 0.0; + auto res = g_network->global(INetwork::enFailureInjector); + if (res) + delay = static_cast(res)->getDiskDelay(); + TraceEvent("AsyncFileDelayedWrite").detail("ThrottleDelay", delay); + return delayed(file->write(data, length, offset), delay); + } + + Future truncate(int64_t size) override { + double delay = 0.0; + auto res = g_network->global(INetwork::enFailureInjector); + if (res) + delay = static_cast(res)->getDiskDelay(); + return delayed(file->truncate(size), delay); + } + + Future sync() override { + double delay = 0.0; + auto res = g_network->global(INetwork::enFailureInjector); + if (res) + delay = static_cast(res)->getDiskDelay(); + return delayed(file->sync(), delay); + } + + Future size() const override { + double delay = 0.0; + auto res = g_network->global(INetwork::enFailureInjector); + if (res) + delay = static_cast(res)->getDiskDelay(); + return delayed(file->size(), delay); + } + + int64_t debugFD() const override { + return file->debugFD(); + } + + std::string getFilename() const override { + return file->getFilename(); + } +}; diff --git a/fdbrpc/AsyncFileEIO.actor.h b/fdbrpc/AsyncFileEIO.actor.h index c962e60098..1d3ab4791e 100644 --- a/fdbrpc/AsyncFileEIO.actor.h +++ b/fdbrpc/AsyncFileEIO.actor.h @@ -162,16 +162,14 @@ public: Future read(void* data, int length, int64_t offset) override { ++countFileLogicalReads; ++countLogicalReads; - double throttleFor = diskFailureInjector->getDiskDelay(); - return read_impl(fd, data, length, offset, throttleFor); + return read_impl(fd, data, length, offset); } Future write(void const* data, int length, int64_t offset) override // Copies data synchronously { ++countFileLogicalWrites; ++countLogicalWrites; - double throttleFor = diskFailureInjector->getDiskDelay(); // Standalone copy = StringRef((const uint8_t*)data, length); - return write_impl(fd, err, StringRef((const uint8_t*)data, length), offset, throttleFor); + return write_impl(fd, err, StringRef((const uint8_t*)data, length), offset); } Future truncate(int64_t size) override { ++countFileLogicalWrites; @@ -272,7 +270,6 @@ private: int fd, flags; Reference err; std::string filename; - //DiskFailureInjector* diskFailureInjector; mutable Int64MetricHandle countFileLogicalWrites; mutable Int64MetricHandle countFileLogicalReads; @@ -280,8 +277,7 @@ private: mutable Int64MetricHandle countLogicalReads; AsyncFileEIO(int fd, int flags, std::string const& filename) - : fd(fd), flags(flags), filename(filename), err(new ErrorInfo), - diskFailureInjector(DiskFailureInjector::injector()) { + : fd(fd), flags(flags), filename(filename), err(new ErrorInfo) { if (!g_network->isSimulated()) { countFileLogicalWrites.init(LiteralStringRef("AsyncFile.CountFileLogicalWrites"), filename); countFileLogicalReads.init(LiteralStringRef("AsyncFile.CountFileLogicalReads"), filename); @@ -333,18 +329,13 @@ private: TraceEvent("AsyncFileClosed").suppressFor(1.0).detail("Fd", fd); } - ACTOR static Future read_impl(int fd, void* data, int length, int64_t offset, double throttleFor) { + ACTOR static Future read_impl(int fd, void* data, int length, int64_t offset) { state TaskPriority taskID = g_network->getCurrentTask(); state Promise p; // fprintf(stderr, "eio_read (fd=%d length=%d offset=%lld)\n", fd, length, offset); state eio_req* r = eio_read(fd, data, length, offset, 0, eio_callback, &p); try { wait(p.getFuture()); - // throttleDisk if enabled - //double throttleFor = diskFailureInjector->getDiskDelay(); - if (throttleFor > 0.0) { - wait(delay(throttleFor)); - } } catch (...) { g_network->setCurrentTask(taskID); eio_cancel(r); @@ -367,17 +358,12 @@ private: } } - ACTOR static Future write_impl(int fd, Reference err, StringRef data, int64_t offset, double throttleFor) { + ACTOR static Future write_impl(int fd, Reference err, StringRef data, int64_t offset) { state TaskPriority taskID = g_network->getCurrentTask(); state Promise p; state eio_req* r = eio_write(fd, (void*)data.begin(), data.size(), offset, 0, eio_callback, &p); try { wait(p.getFuture()); - // throttleDisk if enabled - //double throttleFor = diskFailureInjector->getDiskDelay(); - if (throttleFor > 0.0) { - wait(delay(throttleFor)); - } } catch (...) { g_network->setCurrentTask(taskID); eio_cancel(r); @@ -567,8 +553,6 @@ private: static void apple_fsync(eio_req* req) { req->result = fcntl(req->int1, F_FULLFSYNC, 0); } static void free_req(eio_req* req) { free(req); } #endif -public: - DiskFailureInjector* diskFailureInjector; }; #ifdef FILESYSTEM_IMPL diff --git a/fdbrpc/AsyncFileKAIO.actor.h b/fdbrpc/AsyncFileKAIO.actor.h index 15553a85e2..c82b57161b 100644 --- a/fdbrpc/AsyncFileKAIO.actor.h +++ b/fdbrpc/AsyncFileKAIO.actor.h @@ -195,10 +195,6 @@ public: void addref() override { ReferenceCounted::addref(); } void delref() override { ReferenceCounted::delref(); } - ACTOR static void throttleDisk(double throttleFor) { - if (throttleFor > 0.0) - wait(delay(throttleFor)); - } Future read(void* data, int length, int64_t offset) override { ++countFileLogicalReads; ++countLogicalReads; @@ -216,9 +212,6 @@ public: enqueue(io, "read", this); Future result = io->result.getFuture(); - // throttleDisk if enabled - throttleDisk(diskFailureInjector->getDiskDelay()); - #if KAIO_LOGGING // result = map(result, [=](int r) mutable { KAIOLogBlockEvent(io, OpLogEntry::READY, r); return r; }); #endif @@ -244,9 +237,6 @@ public: enqueue(io, "write", this); Future result = io->result.getFuture(); - // throttleDisk if enabled - throttleDisk(diskFailureInjector->getDiskDelay()); - #if KAIO_LOGGING // result = map(result, [=](int r) mutable { KAIOLogBlockEvent(io, OpLogEntry::READY, r); return r; }); #endif @@ -758,9 +748,6 @@ private: } } } - -public: - DiskFailureInjector* diskFailureInjector; }; #if KAIO_LOGGING diff --git a/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/AsyncFileNonDurable.actor.h index 0c63846169..98bbe0c4e8 100644 --- a/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/AsyncFileNonDurable.actor.h @@ -62,7 +62,7 @@ private: Future shutdown; public: - explicit AsyncFileDetachable(Reference file) : file(file), diskFailureInjector(DiskFailureInjector::injector()) { shutdown = doShutdown(this); } + explicit AsyncFileDetachable(Reference file) : file(file) { shutdown = doShutdown(this); } ACTOR Future doShutdown(AsyncFileDetachable* self) { wait(success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture())); @@ -85,20 +85,12 @@ public: Future read(void* data, int length, int64_t offset) override { if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady()) return io_error().asInjectedFault(); - // throttleDisk if enabled - auto throttleFor = diskFailureInjector->getDiskDelay(); - if (throttleFor > 0.0) { - TraceEvent("AsyncFileDetachable_Read").detail("ThrottleDelay", throttleFor); - //wait(delay(throttleFor)); - } return sendErrorOnShutdown(file->read(data, length, offset)); } Future write(void const* data, int length, int64_t offset) override { if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady()) return io_error().asInjectedFault(); - if (diskFailureInjector->getDiskDelay() > 0.0) - TraceEvent("AsyncFileDetachable_Write").detail("ThrottleDelay", diskFailureInjector->getDiskDelay()); return sendErrorOnShutdown(file->write(data, length, offset)); } @@ -130,8 +122,6 @@ public: throw io_error().asInjectedFault(); return file->getFilename(); } -public: - DiskFailureInjector* diskFailureInjector; }; // An async file implementation which wraps another async file and will randomly destroy sectors that it is writing when @@ -203,7 +193,7 @@ private: bool aio) : filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters), openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false), - aio(aio), diskFailureInjector(DiskFailureInjector::injector()) + aio(aio) { // This is only designed to work in simulation @@ -321,7 +311,7 @@ public: // Passes along reads straight to the underlying file, waiting for any outstanding changes that could affect the // results - Future read(void* data, int length, int64_t offset) override { return read(this, data, length, offset, diskFailureInjector->getDiskDelay()); } + Future read(void* data, int length, int64_t offset) override { return read(this, data, length, offset); } // Writes data to the file. Writes are delayed a random amount of time before being // passed to the underlying file @@ -336,7 +326,7 @@ public: Promise writeStarted; Promise> writeEnded; - writeEnded.send(write(this, writeStarted, writeEnded.getFuture(), data, length, offset, diskFailureInjector->getDiskDelay())); + writeEnded.send(write(this, writeStarted, writeEnded.getFuture(), data, length, offset)); return writeStarted.getFuture(); } @@ -444,7 +434,7 @@ private: return readFuture.get(); } - ACTOR Future read(AsyncFileNonDurable* self, void* data, int length, int64_t offset, double throttleFor = 0.0) { + ACTOR Future read(AsyncFileNonDurable* self, void* data, int length, int64_t offset) { state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess(); state TaskPriority currentTaskID = g_network->getCurrentTask(); wait(g_simulator.onMachine(currentProcess)); @@ -452,12 +442,6 @@ private: try { state int rep = wait(self->onRead(self, data, length, offset)); wait(g_simulator.onProcess(currentProcess, currentTaskID)); - - // throttleDisk if enabled - if (throttleFor > 0.0) { - TraceEvent("AsyncFileNonDurable_ReadDone", self->id).detail("ThrottleDelay", throttleFor).detail("Filename", self->filename).detail("ReadLength", length).detail("Offset", offset); - wait(delay(throttleFor)); - } return rep; } catch (Error& e) { state Error err = e; @@ -474,8 +458,7 @@ private: Future> ownFuture, void const* data, int length, - int64_t offset, - double throttleFor = 0.0) { + int64_t offset) { state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess(); state TaskPriority currentTaskID = g_network->getCurrentTask(); wait(g_simulator.onMachine(currentProcess)); @@ -639,11 +622,6 @@ private: } wait(waitForAll(writeFutures)); - // throttleDisk if enabled - if (throttleFor > 0.0) { - TraceEvent("AsyncFileNonDurable_WriteDone", self->id).detail("ThrottleDelay", throttleFor).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset); - wait(delay(throttleFor)); - } //TraceEvent("AsyncFileNonDurable_WriteDone", self->id).detail("Delay", delayDuration).detail("Filename", self->filename).detail("WriteLength", length).detail("Offset", offset); return Void(); } @@ -889,8 +867,6 @@ private: throw err; } } -public: - DiskFailureInjector* diskFailureInjector; }; #include "flow/unactorcompiler.h" diff --git a/fdbrpc/Net2FileSystem.cpp b/fdbrpc/Net2FileSystem.cpp index 71a7d784a1..a71115a859 100644 --- a/fdbrpc/Net2FileSystem.cpp +++ b/fdbrpc/Net2FileSystem.cpp @@ -31,6 +31,7 @@ #define FILESYSTEM_IMPL 1 #include "fdbrpc/AsyncFileCached.actor.h" +#include "fdbrpc/AsyncFileDelayed.actor.h" #include "fdbrpc/AsyncFileEIO.actor.h" #include "fdbrpc/AsyncFileWinASIO.actor.h" #include "fdbrpc/AsyncFileKAIO.actor.h" @@ -76,6 +77,8 @@ Future> Net2FileSystem::open(const std::string& file static_cast((void*)g_network->global(INetwork::enASIOService))); if (FLOW_KNOBS->PAGE_WRITE_CHECKSUM_HISTORY > 0) f = map(f, [=](Reference r) { return Reference(new AsyncFileWriteChecker(r)); }); + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) + f = map(f, [=](Reference r) { return Reference(new AsyncFileDelayed(r)); }); return f; } diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 093ef389ac..1e30618279 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -34,6 +34,7 @@ #include "fdbrpc/IAsyncFile.h" #include "fdbrpc/AsyncFileCached.actor.h" #include "fdbrpc/AsyncFileNonDurable.actor.h" +#include "fdbrpc/AsyncFileDelayed.actor.h" #include "flow/crc32c.h" #include "fdbrpc/TraceFileIO.h" #include "flow/FaultInjection.h" @@ -1949,13 +1950,6 @@ public: void clogPair(const IPAddress& from, const IPAddress& to, double seconds) override { g_clogging.clogPairFor(from, to, seconds); } - void throttleDisk(ProcessInfo* machine, double seconds) override { - machine->throttleDiskFor = seconds; - TraceEvent("ThrottleDisk").detail("Delay", seconds). - detail("Roles", getRoles(machine->address)). - detail("Address", machine->address). - detail("StartingClass", machine->startingClass.toString()); - } std::vector getAllProcesses() const override { std::vector processes; for (auto& c : machines) { @@ -2397,19 +2391,11 @@ Future waitUntilDiskReady(Reference diskParameters, int64_ diskParameters->nextOperation += (1.0 / diskParameters->iops) + (size / diskParameters->bandwidth); double randomLatency; - if (g_simulator.getCurrentProcess()->throttleDiskFor) { - randomLatency = g_simulator.getCurrentProcess()->throttleDiskFor; - TraceEvent("WaitUntilDiskReadyThrottling") - .detail("Delay", randomLatency); - } else if (sync) { + if (sync) { randomLatency = .005 + deterministicRandom()->random01() * (BUGGIFY ? 1.0 : .010); } else randomLatency = 10 * deterministicRandom()->random01() / diskParameters->iops; - TraceEvent("WaitUntilDiskReady").detail("Delay", randomLatency). - detail("Roles", g_simulator.getRoles(g_simulator.getCurrentProcess()->address)). - detail("Address", g_simulator.getCurrentProcess()->address). - detail("ThrottleDiskFor", g_simulator.getCurrentProcess()->throttleDiskFor); return delayUntil(diskParameters->nextOperation + randomLatency); } @@ -2488,6 +2474,8 @@ Future> Sim2FileSystem::open(const std::string& file f = AsyncFileDetachable::open(f); if (FLOW_KNOBS->PAGE_WRITE_CHECKSUM_HISTORY > 0) f = map(f, [=](Reference r) { return Reference(new AsyncFileWriteChecker(r)); }); + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) + f = map(f, [=](Reference r) { return Reference(new AsyncFileDelayed(r)); }); return f; } else return AsyncFileCached::open(filename, flags, mode); diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 1da850e48c..6404eafc17 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -87,7 +87,6 @@ public: uint64_t fault_injection_r; double fault_injection_p1, fault_injection_p2; bool failedDisk; - double throttleDiskFor; UID uid; @@ -103,7 +102,7 @@ public: : name(name), locality(locality), startingClass(startingClass), addresses(addresses), address(addresses.address), dataFolder(dataFolder), network(net), coordinationFolder(coordinationFolder), failed(false), excluded(false), rebooting(false), fault_injection_p1(0), fault_injection_p2(0), - fault_injection_r(0), machine(0), cleared(false), failedDisk(false), throttleDiskFor(0) { + fault_injection_r(0), machine(0), cleared(false), failedDisk(false) { uid = deterministicRandom()->randomUniqueID(); } @@ -375,7 +374,6 @@ public: virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) = 0; virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) = 0; - virtual void throttleDisk(ProcessInfo* machine, double seconds) = 0; virtual std::vector getAllProcesses() const = 0; virtual ProcessInfo* getProcessByAddress(NetworkAddress const& address) = 0; virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) = 0; @@ -464,9 +462,8 @@ struct DiskParameters : ReferenceCounted { double nextOperation; int64_t iops; int64_t bandwidth; - double throttleFor; - DiskParameters(int64_t iops, int64_t bandwidth) : nextOperation(0), iops(iops), bandwidth(bandwidth), throttleFor(0) {} + DiskParameters(int64_t iops, int64_t bandwidth) : nextOperation(0), iops(iops), bandwidth(bandwidth) {} }; // Simulates delays for performing operations on disk diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 4a99e02265..2b20f695ae 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1516,8 +1516,14 @@ ACTOR Future workerServer(Reference connFile, when(SetFailureInjection req = waitNext(interf.clientInterface.setFailureInjection.getFuture())) { if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { if (req.throttleDisk.present()) { - TraceEvent("DiskThrottleRequest").detail("Delay", req.throttleDisk.get().time); - DiskFailureInjector::injector()->throttleFor(req.throttleDisk.get().time); + TraceEvent("DiskThrottleRequest").detail("DelayFrequency",req.throttleDisk.get().delayFrequency). + detail("DelayMin", req.throttleDisk.get().delayMin). + detail("DelayMax", req.throttleDisk.get().delayMax); + auto diskFailureInjector = DiskFailureInjector::injector(); + //DiskFailureInjector::injector()->throttleFor(req.throttleDisk.get()); + diskFailureInjector->throttleFor(req.throttleDisk.get().delayFrequency, + req.throttleDisk.get().delayMin, + req.throttleDisk.get().delayMax); } req.reply.send(Void()); } else { diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskThrottling.actor.cpp index 61c465ae7f..8529be5fc3 100644 --- a/fdbserver/workloads/DiskThrottling.actor.cpp +++ b/fdbserver/workloads/DiskThrottling.actor.cpp @@ -10,12 +10,18 @@ struct DiskThrottlingWorkload : TestWorkload { bool enabled; double testDuration; - double throttleFor; + double throttleFrequency; + double throttleMin; + double throttleMax; DiskThrottlingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { enabled = !clientId; // only do this on the "first" client testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); - throttleFor = getOption(options, LiteralStringRef("throttleDelay"), 2.0); - TraceEvent("DiskThrottlingWorkload").detail("TestDuration", testDuration).detail("For", throttleFor); + throttleFrequency = getOption(options, LiteralStringRef("throttleFrequency"), 0.0); + throttleMin = getOption(options, LiteralStringRef("throttleMin"), 2.0); + throttleMax = getOption(options, LiteralStringRef("throttleMax"), 2.0); + TraceEvent("DiskThrottlingWorkload") + .detail("TestDuration", testDuration).detail("Frequency", throttleFrequency) + .detail("Min", throttleMin).detail("Max", throttleMax); } std::string description() const override { @@ -28,12 +34,6 @@ struct DiskThrottlingWorkload : TestWorkload { Future setup(Database const& cx) override { return Void(); } Future start(Database const& cx) override { - //if (&g_simulator == g_network && enabled) { - // TraceEvent("DiskThrottlingStart").detail("For", throttleFor); - // return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), - // testDuration, - // Void()); - //} else if (enabled) { return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), testDuration, @@ -46,13 +46,6 @@ struct DiskThrottlingWorkload : TestWorkload { void getMetrics(vector& m) override {} - ACTOR void doThrottle_unused(ISimulator::ProcessInfo* machine, double t, double delay = 0.0) { - wait(::delay(delay)); - TraceEvent("ThrottleDisk").detail("For", t); - g_simulator.throttleDisk(machine, t); - TraceEvent("ThrottleDiskSet").detail("For", t); - } - static void checkDiskThrottleResult(Future res, WorkerInterface worker) { if (res.isError()) { auto err = res.getError(); @@ -67,36 +60,20 @@ struct DiskThrottlingWorkload : TestWorkload { } } - ACTOR void doThrottle(WorkerInterface worker, double t, double delay = 0.0) { + ACTOR void doThrottle(WorkerInterface worker, double frequency, double minDelay, double maxDelay, double startDelay = 0.0) { state Future res; - wait(::delay(delay)); + wait(::delay(startDelay)); SetFailureInjection::ThrottleDiskCommand throttleDisk; - throttleDisk.time = t; + throttleDisk.delayFrequency = frequency; + throttleDisk.delayMin = minDelay; + throttleDisk.delayMax = maxDelay; SetFailureInjection req; req.throttleDisk = throttleDisk; - TraceEvent("ThrottleDisk").detail("For", t); res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); checkDiskThrottleResult(res, worker); } - static Future getAllWorkers_unused(DiskThrottlingWorkload* self, std::vector* result) { - result->clear(); - *result = g_simulator.getAllProcesses(); - return Void(); - } - - static Future getAllStorageWorkers_unused(Database cx, DiskThrottlingWorkload* self, std::vector* result) { - vector all = g_simulator.getAllProcesses(); - for (int i = 0; i < all.size(); i++) - if (!all[i]->failed && - all[i]->name == std::string("Server") && - ((all[i]->startingClass == ProcessClass::StorageClass) || - (all[i]->startingClass == ProcessClass::UnsetClass))) - result->emplace_back(all[i]); - return Void(); - } - ACTOR static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { result->clear(); std::vector res = @@ -125,8 +102,7 @@ struct DiskThrottlingWorkload : TestWorkload { wait(poisson(&lastTime, 1)); wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); auto machine = deterministicRandom()->randomChoice(machines); - TraceEvent("DoThrottleDisk").detail("For", self->throttleFor); - self->doThrottle(machine, self->throttleFor); + self->doThrottle(machine, self->throttleFrequency, self->throttleMin, self->throttleMax); } } }; diff --git a/flow/network.h b/flow/network.h index 651882d23e..4b34a648cc 100644 --- a/flow/network.h +++ b/flow/network.h @@ -647,6 +647,44 @@ public: // Returns the interface that should be used to make and accept socket connections }; +struct DelayGenerator : FastAllocated { + + void setDelay(double frequency, double min, double max) { + delayFrequency = frequency; + delayMin = min; + delayMax = max; + delayFor = (delayMin == delayMax) ? delayMin : deterministicRandom()->randomInt(delayMin, delayMax); + delayUntil = std::max(delayUntil, timer_monotonic() + delayFor); + TraceEvent("DelayGeneratorSetDelay").detail("DelayFrequency", frequency).detail("DelayMin", min). + detail("DelayMax", max).detail("DelayFor", delayFor).detail("DelayUntil", delayUntil); + } + + double getDelay() { + // If a delayFrequency was specified, this logic determins the delay to be inserted at any point in time + if (delayFrequency) { + auto timeElapsed = fmod(timer_monotonic(), delayFrequency); + TraceEvent("DelayGeneratorGetDelay").detail("DelayFrequency", delayFrequency). + detail("TimeElapsed", timeElapsed).detail("DelayFor", delayFor); + return std::max(0.0, delayFor - timeElapsed); + } + TraceEvent("DelayGeneratorGetDelay").detail("DelayFrequency", delayFrequency). + detail("CurTime", timer_monotonic()).detail("DelayUntil", delayUntil); + return std::max(0.0, delayUntil - timer_monotonic()); + } + +private: //members + double delayFrequency = 0.0; // how often should the delay be inserted (0 meaning once, 10 meaning every 10 secs) + double delayMin; // min delay to be inserted + double delayMax; // max delay to be inserted + double delayFor = 0.0; // randomly chosen delay between min and max + double delayUntil = 0.0; // used when the delayFrequency is 0 + +public: // construction + DelayGenerator() = default; + DelayGenerator(DelayGenerator const&) = delete; + +}; + struct DiskFailureInjector : FastAllocated { static DiskFailureInjector* injector() { auto res = g_network->global(INetwork::enFailureInjector); @@ -657,25 +695,19 @@ struct DiskFailureInjector : FastAllocated { return static_cast(res); } - //virtual void throttleFor(double time) = 0; - //virtual double getDiskDelay() = 0; - - void throttleFor(double time) { - TraceEvent("DiskFailureInjectorBefore").detail("ThrottleUntil", throttleUntil); - throttleUntil = std::max(throttleUntil, timer_monotonic() + time); - TraceEvent("DiskFailureInjectorAfter").detail("ThrottleUntil", throttleUntil); + void throttleFor(double frequency, double delayMin, double delayMax) { + delayGenerator.setDelay(frequency, delayMin, delayMax); } double getDiskDelay() { if (!FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { return 0.0; } - return std::max(0.0, throttleUntil - timer_monotonic()); + return delayGenerator.getDelay(); } private: // members - double throttleUntil = 0.0; - std::unordered_map throttleDisk; + DelayGenerator delayGenerator; private: // construction DiskFailureInjector() = default; diff --git a/tests/fast/DiskThrottledCycle.toml b/tests/fast/DiskThrottledCycle.toml index c0f35293aa..60429350b6 100644 --- a/tests/fast/DiskThrottledCycle.toml +++ b/tests/fast/DiskThrottledCycle.toml @@ -10,4 +10,5 @@ testTitle = 'DiskThrottledCycle' [[test.workload]] testName = 'DiskThrottling' testDuration = 30.0 + throttleFrequency = 10.0 From f950fe9f9d0fcb18c36927198ded3adafaea1712 Mon Sep 17 00:00:00 2001 From: negoyal Date: Sun, 18 Jul 2021 17:35:05 -0700 Subject: [PATCH 006/142] Chaos workload to randomly flip bits during SS writes. --- fdbclient/ClientWorkerInterface.h | 16 ++++++++- fdbrpc/AsyncFileDelayed.actor.h | 36 ++++++++++++++++++-- fdbserver/CMakeLists.txt | 3 +- fdbserver/worker.actor.cpp | 8 ++++- flow/network.h | 55 ++++++++++++++++++++++++++++++- tests/CMakeLists.txt | 1 + 6 files changed, 112 insertions(+), 7 deletions(-) diff --git a/fdbclient/ClientWorkerInterface.h b/fdbclient/ClientWorkerInterface.h index 181017cfcf..9c80312708 100644 --- a/fdbclient/ClientWorkerInterface.h +++ b/fdbclient/ClientWorkerInterface.h @@ -106,11 +106,25 @@ struct SetFailureInjection { serializer(ar, delayFrequency, delayMin, delayMax); } }; + + struct FlipBitsCommand { + // File that the bit flips are requested for + //Reference filename; + // percent of bits to flip in the given file + double percentBitFlips; + + template + void serialize(Ar& ar) { + serializer(ar, percentBitFlips); + } + }; + Optional throttleDisk; + Optional flipBits; template void serialize(Ar& ar) { - serializer(ar, reply, throttleDisk); + serializer(ar, reply, throttleDisk, flipBits); } }; #endif diff --git a/fdbrpc/AsyncFileDelayed.actor.h b/fdbrpc/AsyncFileDelayed.actor.h index 5dfb9c655a..6e990f276c 100644 --- a/fdbrpc/AsyncFileDelayed.actor.h +++ b/fdbrpc/AsyncFileDelayed.actor.h @@ -37,22 +37,52 @@ public: void addref() override { ReferenceCounted::addref(); } void delref() override { ReferenceCounted::delref(); } + uint8_t toggleNthBit(uint8_t b, uint8_t n) { + auto singleBitMask = uint8_t(1) << (n); + return b ^ singleBitMask; + } + + void flipBits(void* data, int length, double percentBitFlips) { + auto toFlip = int(float(length*8) * percentBitFlips / 100); + TraceEvent("AsyncFileFlipBits").detail("ToFlip", toFlip); + for (auto i = 0; i < toFlip; i++) { + auto byteOffset = deterministicRandom()->randomInt64(0, length); + auto bitOffset = uint8_t(deterministicRandom()->randomInt(0, 8)); + ((uint8_t *)data)[byteOffset] = toggleNthBit(((uint8_t *)data)[byteOffset], bitOffset); + } + } + Future read(void* data, int length, int64_t offset) override { double delay = 0.0; auto res = g_network->global(INetwork::enFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); TraceEvent("AsyncFileDelayedRead").detail("ThrottleDelay", delay); - return delayed(file->read(data, length, offset), delay); + return delayed(file->read(data, length, offset), delay); } Future write(void const* data, int length, int64_t offset) override { double delay = 0.0; - auto res = g_network->global(INetwork::enFailureInjector); + char* pdata = nullptr; + auto res = g_network->global(INetwork::enBitFlipper); + if (res) { + auto percentBitFlips = static_cast(res)->getPercentBitFlips(); + if (percentBitFlips > 0.0) { + TraceEvent("AsyncFileCorruptWrite").detail("PercentBitFlips", percentBitFlips); + pdata = new char[length]; + memcpy(pdata, data, length); + flipBits(pdata, length, percentBitFlips); + auto diff = memcmp(pdata, data, length); + if (diff) + TraceEvent("AsyncFileCorruptWriteDiff").detail("Diff", diff); + } + } + + res = g_network->global(INetwork::enFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); TraceEvent("AsyncFileDelayedWrite").detail("ThrottleDelay", delay); - return delayed(file->write(data, length, offset), delay); + return delayed(file->write((pdata != nullptr) ? pdata : data, length, offset), delay); } Future truncate(int64_t size) override { diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index efa2c7fbf1..d1686bf478 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -151,6 +151,7 @@ set(FDBSERVER_SRCS workloads/BackupToDBAbort.actor.cpp workloads/BackupToDBCorrectness.actor.cpp workloads/BackupToDBUpgrade.actor.cpp + workloads/BitFlipping.actor.cpp workloads/BlobStoreWorkload.h workloads/BulkLoad.actor.cpp workloads/BulkSetup.actor.h @@ -158,7 +159,6 @@ set(FDBSERVER_SRCS workloads/ChangeConfig.actor.cpp workloads/ClientTransactionProfileCorrectness.actor.cpp workloads/TriggerRecovery.actor.cpp - workloads/DiskThrottling.actor.cpp workloads/SuspendProcesses.actor.cpp workloads/CommitBugCheck.actor.cpp workloads/ConfigureDatabase.actor.cpp @@ -172,6 +172,7 @@ set(FDBSERVER_SRCS workloads/DDMetricsExclude.actor.cpp workloads/DiskDurability.actor.cpp workloads/DiskDurabilityTest.actor.cpp + workloads/DiskThrottling.actor.cpp workloads/Downgrade.actor.cpp workloads/DummyWorkload.actor.cpp workloads/ExternalWorkload.actor.cpp diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 2b20f695ae..8d70c1ccda 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1520,10 +1520,16 @@ ACTOR Future workerServer(Reference connFile, detail("DelayMin", req.throttleDisk.get().delayMin). detail("DelayMax", req.throttleDisk.get().delayMax); auto diskFailureInjector = DiskFailureInjector::injector(); - //DiskFailureInjector::injector()->throttleFor(req.throttleDisk.get()); diskFailureInjector->throttleFor(req.throttleDisk.get().delayFrequency, req.throttleDisk.get().delayMin, req.throttleDisk.get().delayMax); + } else if (req.flipBits.present()) { + TraceEvent("FlipBitsRequest"). + detail("Percent", req.flipBits.get().percentBitFlips); + //detail("File",req.flipBits.get().file). + auto bitFlipper = BitFlipper::flipper(); + bitFlipper->setPercentBitFlips(req.flipBits.get().percentBitFlips); + //flipBits(req.flipBits.get().file, req.flipBits.get().percent); } req.reply.send(Void()); } else { diff --git a/flow/network.h b/flow/network.h index 4b34a648cc..6f96e993cf 100644 --- a/flow/network.h +++ b/flow/network.h @@ -487,7 +487,8 @@ public: enClientFailureMonitor = 12, enSQLiteInjectedError = 13, enGlobalConfig = 14, - enFailureInjector = 15 + enFailureInjector = 15, + enBitFlipper = 16 }; virtual void longTaskCheck(const char* name) {} @@ -714,4 +715,56 @@ private: // construction DiskFailureInjector(DiskFailureInjector const&) = delete; }; +struct BitFlipper : FastAllocated { + static BitFlipper* flipper() { + auto res = g_network->global(INetwork::enBitFlipper); + if (!res) { + res = new BitFlipper(); + g_network->setGlobal(INetwork::enBitFlipper, res); + } + return static_cast(res); + } + + //uint8_t toggleNthBit(uint8_t b, uint8_t n) { + // auto singleBitMask = uint8(1) << (n); + // return b ^ singleBitMask; + //} + + //void flipBitAtOffset(int64_t byteOffset, uint8_t bitOffset) { + //auto oneByte = make([]byte, 1); + // uint8_t oneByte[1]; + // int readBytes = wait(file->Read(oneByte, 1, byteOffset)); + + // oneByte[0] = toggleNthBit(oneByte[0], bitOffset); + // file->write(oneByte, 1, byteOffset); + //} + + //void flipBits(Reference fileName, double percent) { + // file = fileName; + // auto toFlip = int(float64(file->size()*8) * percent / 100); + // for (auto i = 0; i < toFlip; i++) { + // auto byteOffset = deterministicRandom()->randomInt64(0, file->size()); + // auto bitOffset = uint8_t(deterministicRandom()->randomInt(0, 8)); + // flipBitAtOffset(byteOffset, bitOffset); + // } + //} + + double getPercentBitFlips() { + TraceEvent("BitFlipperGetPercentBitFlips").detail("PercentBitFlips", percentBitFlips); + return percentBitFlips; + } + + void setPercentBitFlips(double percentFlips) { + percentBitFlips = percentFlips; + TraceEvent("BitFlipperSetPercentBitFlips").detail("PercentBitFlips", percentBitFlips); + } + +private: // members + double percentBitFlips = 0.0; + //Reference file; + +private: // construction + BitFlipper() = default; + BitFlipper(BitFlipper const&) = delete; +}; #endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5a5bf2c208..758b16949b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -118,6 +118,7 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES fast/BackupCorrectnessClean.toml) add_fdb_test(TEST_FILES fast/BackupToDBCorrectness.toml) add_fdb_test(TEST_FILES fast/BackupToDBCorrectnessClean.toml) + add_fdb_test(TEST_FILES fast/BitFlippedCycle.toml IGNORE) add_fdb_test(TEST_FILES fast/CacheTest.toml) add_fdb_test(TEST_FILES fast/CloggedSideband.toml) add_fdb_test(TEST_FILES fast/ConfigureLocked.toml) From 596ca92e2fad977a08738d96ede113734f1062e4 Mon Sep 17 00:00:00 2001 From: negoyal Date: Mon, 19 Jul 2021 11:13:57 -0700 Subject: [PATCH 007/142] Add missing files and rename some. --- ...Delayed.actor.h => AsyncFileChaos.actor.h} | 22 ++--- fdbrpc/Net2FileSystem.cpp | 4 +- fdbrpc/sim2.actor.cpp | 4 +- fdbserver/worker.actor.cpp | 2 - fdbserver/workloads/BitFlipping.actor.cpp | 92 +++++++++++++++++++ flow/network.h | 6 +- tests/fast/BitFlippedCycle.toml | 13 +++ 7 files changed, 123 insertions(+), 20 deletions(-) rename fdbrpc/{AsyncFileDelayed.actor.h => AsyncFileChaos.actor.h} (81%) create mode 100644 fdbserver/workloads/BitFlipping.actor.cpp create mode 100644 tests/fast/BitFlippedCycle.toml diff --git a/fdbrpc/AsyncFileDelayed.actor.h b/fdbrpc/AsyncFileChaos.actor.h similarity index 81% rename from fdbrpc/AsyncFileDelayed.actor.h rename to fdbrpc/AsyncFileChaos.actor.h index 6e990f276c..7890ff0b51 100644 --- a/fdbrpc/AsyncFileDelayed.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -28,14 +28,14 @@ //template -class AsyncFileDelayed final : public IAsyncFile, public ReferenceCounted { +class AsyncFileChaos final : public IAsyncFile, public ReferenceCounted { private: Reference file; public: - explicit AsyncFileDelayed(Reference file) : file(file) {} + explicit AsyncFileChaos(Reference file) : file(file) {} - void addref() override { ReferenceCounted::addref(); } - void delref() override { ReferenceCounted::delref(); } + void addref() override { ReferenceCounted::addref(); } + void delref() override { ReferenceCounted::delref(); } uint8_t toggleNthBit(uint8_t b, uint8_t n) { auto singleBitMask = uint8_t(1) << (n); @@ -54,10 +54,10 @@ public: Future read(void* data, int length, int64_t offset) override { double delay = 0.0; - auto res = g_network->global(INetwork::enFailureInjector); + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); - TraceEvent("AsyncFileDelayedRead").detail("ThrottleDelay", delay); + TraceEvent("AsyncFileChaosRead").detail("ThrottleDelay", delay); return delayed(file->read(data, length, offset), delay); } @@ -78,16 +78,16 @@ public: } } - res = g_network->global(INetwork::enFailureInjector); + res = g_network->global(INetwork::enDiskFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); - TraceEvent("AsyncFileDelayedWrite").detail("ThrottleDelay", delay); + TraceEvent("AsyncFileChaosWrite").detail("ThrottleDelay", delay); return delayed(file->write((pdata != nullptr) ? pdata : data, length, offset), delay); } Future truncate(int64_t size) override { double delay = 0.0; - auto res = g_network->global(INetwork::enFailureInjector); + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); return delayed(file->truncate(size), delay); @@ -95,7 +95,7 @@ public: Future sync() override { double delay = 0.0; - auto res = g_network->global(INetwork::enFailureInjector); + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); return delayed(file->sync(), delay); @@ -103,7 +103,7 @@ public: Future size() const override { double delay = 0.0; - auto res = g_network->global(INetwork::enFailureInjector); + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) delay = static_cast(res)->getDiskDelay(); return delayed(file->size(), delay); diff --git a/fdbrpc/Net2FileSystem.cpp b/fdbrpc/Net2FileSystem.cpp index a71115a859..76128ffd86 100644 --- a/fdbrpc/Net2FileSystem.cpp +++ b/fdbrpc/Net2FileSystem.cpp @@ -31,7 +31,7 @@ #define FILESYSTEM_IMPL 1 #include "fdbrpc/AsyncFileCached.actor.h" -#include "fdbrpc/AsyncFileDelayed.actor.h" +#include "fdbrpc/AsyncFileChaos.actor.h" #include "fdbrpc/AsyncFileEIO.actor.h" #include "fdbrpc/AsyncFileWinASIO.actor.h" #include "fdbrpc/AsyncFileKAIO.actor.h" @@ -78,7 +78,7 @@ Future> Net2FileSystem::open(const std::string& file if (FLOW_KNOBS->PAGE_WRITE_CHECKSUM_HISTORY > 0) f = map(f, [=](Reference r) { return Reference(new AsyncFileWriteChecker(r)); }); if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) - f = map(f, [=](Reference r) { return Reference(new AsyncFileDelayed(r)); }); + f = map(f, [=](Reference r) { return Reference(new AsyncFileChaos(r)); }); return f; } diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 1e30618279..4051f935a0 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -34,7 +34,7 @@ #include "fdbrpc/IAsyncFile.h" #include "fdbrpc/AsyncFileCached.actor.h" #include "fdbrpc/AsyncFileNonDurable.actor.h" -#include "fdbrpc/AsyncFileDelayed.actor.h" +#include "fdbrpc/AsyncFileChaos.actor.h" #include "flow/crc32c.h" #include "fdbrpc/TraceFileIO.h" #include "flow/FaultInjection.h" @@ -2475,7 +2475,7 @@ Future> Sim2FileSystem::open(const std::string& file if (FLOW_KNOBS->PAGE_WRITE_CHECKSUM_HISTORY > 0) f = map(f, [=](Reference r) { return Reference(new AsyncFileWriteChecker(r)); }); if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) - f = map(f, [=](Reference r) { return Reference(new AsyncFileDelayed(r)); }); + f = map(f, [=](Reference r) { return Reference(new AsyncFileChaos(r)); }); return f; } else return AsyncFileCached::open(filename, flags, mode); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 8d70c1ccda..1c5bf22408 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1526,10 +1526,8 @@ ACTOR Future workerServer(Reference connFile, } else if (req.flipBits.present()) { TraceEvent("FlipBitsRequest"). detail("Percent", req.flipBits.get().percentBitFlips); - //detail("File",req.flipBits.get().file). auto bitFlipper = BitFlipper::flipper(); bitFlipper->setPercentBitFlips(req.flipBits.get().percentBitFlips); - //flipBits(req.flipBits.get().file, req.flipBits.get().percent); } req.reply.send(Void()); } else { diff --git a/fdbserver/workloads/BitFlipping.actor.cpp b/fdbserver/workloads/BitFlipping.actor.cpp new file mode 100644 index 0000000000..ff1941fbe3 --- /dev/null +++ b/fdbserver/workloads/BitFlipping.actor.cpp @@ -0,0 +1,92 @@ +#include "fdbclient/NativeAPI.actor.h" +#include "fdbserver/TesterInterface.actor.h" +#include "fdbserver/workloads/workloads.actor.h" +#include "fdbrpc/simulator.h" +#include "fdbserver/WorkerInterface.actor.h" +#include "fdbserver/ServerDBInfo.h" +#include "fdbserver/QuietDatabase.h" +#include "flow/actorcompiler.h" // This must be the last #include. + +struct BitFlippingWorkload : TestWorkload { + bool enabled; + double testDuration; + double percentBitFlips; + BitFlippingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { + enabled = !clientId; // only do this on the "first" client + testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); + percentBitFlips = getOption(options, LiteralStringRef("percentBitFlips"), 1.0); + TraceEvent("BitFlippingWorkload") + .detail("TestDuration", testDuration).detail("Percentage", percentBitFlips); + } + + std::string description() const override { + if (&g_simulator == g_network) + return "BitFlipping"; + else + return "NoSimBitFlipping"; + } + + Future setup(Database const& cx) override { return Void(); } + + Future start(Database const& cx) override { + if (enabled) { + return timeout(reportErrors(flipBitsClient(cx, this), "BitFlippingError"), + testDuration, + Void()); + } else + return Void(); + } + + Future check(Database const& cx) override { return true; } + + void getMetrics(vector& m) override {} + + static void checkBitFlipResult(Future res, WorkerInterface worker) { + if (res.isError()) { + auto err = res.getError(); + if (err.code() == error_code_client_invalid_operation) { + TraceEvent(SevError, "ChaosDisabled") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); + } else { + TraceEvent(SevError, "BitFlippingFailed") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) + .error(err); + } + } + } + + ACTOR void doBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { + state Future res; + wait(::delay(startDelay)); + SetFailureInjection::FlipBitsCommand flipBits; + flipBits.percentBitFlips = percentage; + SetFailureInjection req; + req.flipBits = flipBits; + res = worker.clientInterface.setFailureInjection.getReply(req); + wait(ready(res)); + checkBitFlipResult(res, worker); + } + + ACTOR static Future getAllStorageWorkers(Database cx, BitFlippingWorkload* self, std::vector* result) { + result->clear(); + state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); + for (auto& worker : res) { + result->emplace_back(worker); + } + return Void(); + } + + ACTOR template + Future flipBitsClient(Database cx, BitFlippingWorkload* self) { + state double lastTime = now(); + state double workloadEnd = now() + self->testDuration; + state std::vector machines; + loop { + wait(poisson(&lastTime, 1)); + wait(BitFlippingWorkload::getAllStorageWorkers(cx, self, &machines)); + auto machine = deterministicRandom()->randomChoice(machines); + self->doBitFlips(machine, self->percentBitFlips); + } + } +}; +WorkloadFactory BitFlippingWorkloadFactory("BitFlipping"); diff --git a/flow/network.h b/flow/network.h index 6f96e993cf..0ce243e9f2 100644 --- a/flow/network.h +++ b/flow/network.h @@ -487,7 +487,7 @@ public: enClientFailureMonitor = 12, enSQLiteInjectedError = 13, enGlobalConfig = 14, - enFailureInjector = 15, + enDiskFailureInjector = 15, enBitFlipper = 16 }; @@ -688,10 +688,10 @@ public: // construction struct DiskFailureInjector : FastAllocated { static DiskFailureInjector* injector() { - auto res = g_network->global(INetwork::enFailureInjector); + auto res = g_network->global(INetwork::enDiskFailureInjector); if (!res) { res = new DiskFailureInjector(); - g_network->setGlobal(INetwork::enFailureInjector, res); + g_network->setGlobal(INetwork::enDiskFailureInjector, res); } return static_cast(res); } diff --git a/tests/fast/BitFlippedCycle.toml b/tests/fast/BitFlippedCycle.toml new file mode 100644 index 0000000000..3cab1f74fe --- /dev/null +++ b/tests/fast/BitFlippedCycle.toml @@ -0,0 +1,13 @@ +[[test]] +testTitle = 'BitFlippedCycle' + + [[test.workload]] + testName = 'Cycle' + transactionsPerSecond = 2500.0 + testDuration = 60.0 + expectedRate = 0 + + [[test.workload]] + testName = 'BitFlipping' + testDuration = 60.0 + percentBitFlips = 20.0 From fa3ce6d98712732b9649446a4ab0f0ff5d9e98b0 Mon Sep 17 00:00:00 2001 From: negoyal Date: Tue, 20 Jul 2021 15:28:46 -0700 Subject: [PATCH 008/142] Adding the clear range workload. --- fdbserver/CMakeLists.txt | 1 + fdbserver/workloads/BitFlipping.actor.cpp | 20 +++++++ .../workloads/ClearSingleRange.actor.cpp | 59 +++++++++++++++++++ fdbserver/workloads/DiskThrottling.actor.cpp | 20 +++++++ 4 files changed, 100 insertions(+) create mode 100644 fdbserver/workloads/ClearSingleRange.actor.cpp diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index d1686bf478..9aedb11e9a 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -157,6 +157,7 @@ set(FDBSERVER_SRCS workloads/BulkSetup.actor.h workloads/Cache.actor.cpp workloads/ChangeConfig.actor.cpp + workloads/ClearSingleRange.actor.cpp workloads/ClientTransactionProfileCorrectness.actor.cpp workloads/TriggerRecovery.actor.cpp workloads/SuspendProcesses.actor.cpp diff --git a/fdbserver/workloads/BitFlipping.actor.cpp b/fdbserver/workloads/BitFlipping.actor.cpp index ff1941fbe3..b3e2f3f235 100644 --- a/fdbserver/workloads/BitFlipping.actor.cpp +++ b/fdbserver/workloads/BitFlipping.actor.cpp @@ -1,3 +1,23 @@ +/* + * BitFlipping.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #include "fdbclient/NativeAPI.actor.h" #include "fdbserver/TesterInterface.actor.h" #include "fdbserver/workloads/workloads.actor.h" diff --git a/fdbserver/workloads/ClearSingleRange.actor.cpp b/fdbserver/workloads/ClearSingleRange.actor.cpp new file mode 100644 index 0000000000..3419da80c6 --- /dev/null +++ b/fdbserver/workloads/ClearSingleRange.actor.cpp @@ -0,0 +1,59 @@ +/* + * ClearSingleRange.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fdbclient/NativeAPI.actor.h" +#include "fdbserver/TesterInterface.actor.h" +#include "fdbserver/workloads/workloads.actor.h" +#include "fdbserver/workloads/BulkSetup.actor.h" +#include "flow/actorcompiler.h" // This must be the last #include. + +struct ClearSingleRange : TestWorkload { + Key begin; + Key end; + double startDelay; + + ClearSingleRange(WorkloadContext const& wcx) : TestWorkload(wcx) { + begin = getOption(options, LiteralStringRef("begin"), normalKeys.begin); + end = getOption(options, LiteralStringRef("end"), normalKeys.end); + startDelay = getOption(options, LiteralStringRef("beginClearRange"), 10.0); + } + + std::string description() const override { return "ClearSingleRangeWorkload"; } + + Future setup(Database const& cx) override { return Void(); } + + Future start(Database const& cx) override { + return clientId != 0 ? Void() : fdbClientClearRange(cx, this); + } + + Future check(Database const& cx) override { return true; } + + void getMetrics(vector& m) override {} + + ACTOR static Future fdbClientClearRange(Database db, ClearSingleRange* self) { + state Transaction tr(db); + TraceEvent("ClearSingleRangeWaiting").detail("StartDelay", self->startDelay); + wait(delay(self->startDelay)); + tr.clear(KeyRangeRef(self->begin, self->end)); + return Void(); + } +}; + +WorkloadFactory ClearSingleRangeWorkloadFactory("ClearSingleRange"); diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskThrottling.actor.cpp index 8529be5fc3..5eead212f4 100644 --- a/fdbserver/workloads/DiskThrottling.actor.cpp +++ b/fdbserver/workloads/DiskThrottling.actor.cpp @@ -1,3 +1,23 @@ +/* + * DiskThrottling.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #include "fdbclient/NativeAPI.actor.h" #include "fdbserver/TesterInterface.actor.h" #include "fdbserver/workloads/workloads.actor.h" From 050c218502c671bb6b32c335517a4781dd702eb8 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 28 Jul 2021 16:03:37 -0700 Subject: [PATCH 009/142] New Disk Delay Logic and ChaosMetrics. --- fdbclient/ClientWorkerInterface.h | 22 +- fdbrpc/AsyncFileChaos.actor.h | 119 +++---- fdbrpc/AsyncFileNonDurable.actor.h | 11 +- fdbrpc/IAsyncFile.h | 4 - fdbrpc/sim2.actor.cpp | 3 + fdbrpc/simulator.h | 1 + fdbserver/worker.actor.cpp | 50 +-- fdbserver/workloads/BitFlipping.actor.cpp | 259 +++++++++++---- .../workloads/ClearSingleRange.actor.cpp | 4 +- fdbserver/workloads/DiskThrottling.actor.cpp | 302 +++++++++++++----- flow/Knobs.cpp | 1 + flow/Knobs.h | 1 + flow/Net2.actor.cpp | 6 +- flow/network.h | 152 +++++---- tests/fast/DiskThrottledCycle.toml | 4 +- 15 files changed, 602 insertions(+), 337 deletions(-) diff --git a/fdbclient/ClientWorkerInterface.h b/fdbclient/ClientWorkerInterface.h index 9c80312708..28caa48739 100644 --- a/fdbclient/ClientWorkerInterface.h +++ b/fdbclient/ClientWorkerInterface.h @@ -93,23 +93,21 @@ struct ProfilerRequest { struct SetFailureInjection { constexpr static FileIdentifier file_identifier = 15439864; ReplyPromise reply; - struct ThrottleDiskCommand { - // how often should the delay be inserted (0 meaning once, 10 meaning every 10 secs) - double delayFrequency; - // min delay to be inserted - double delayMin; - //max delay to be inserted - double delayMax; + struct DiskFailureCommand { + // how often should the disk be stalled (0 meaning once, 10 meaning every 10 secs) + double stallInterval; + // Period of time disk stalls will be injected for + double stallPeriod; + // Period of time the disk will be slowed down for + double throttlePeriod; template void serialize(Ar& ar) { - serializer(ar, delayFrequency, delayMin, delayMax); + serializer(ar, stallInterval, stallPeriod, throttlePeriod); } }; struct FlipBitsCommand { - // File that the bit flips are requested for - //Reference filename; // percent of bits to flip in the given file double percentBitFlips; @@ -119,12 +117,12 @@ struct SetFailureInjection { } }; - Optional throttleDisk; + Optional diskFailure; Optional flipBits; template void serialize(Ar& ar) { - serializer(ar, reply, throttleDisk, flipBits); + serializer(ar, reply, diskFailure, flipBits); } }; #endif diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 7890ff0b51..c7eaaefe15 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -1,5 +1,5 @@ /* - * VersionedBTree.actor.cpp + * AsyncFileChaos.actor.h * * This source file is part of the FoundationDB open source project * @@ -26,94 +26,103 @@ #include "flow/ActorCollection.h" #include "flow/actorcompiler.h" - -//template +// template class AsyncFileChaos final : public IAsyncFile, public ReferenceCounted { private: Reference file; + Arena arena; + public: explicit AsyncFileChaos(Reference file) : file(file) {} void addref() override { ReferenceCounted::addref(); } void delref() override { ReferenceCounted::delref(); } - uint8_t toggleNthBit(uint8_t b, uint8_t n) { - auto singleBitMask = uint8_t(1) << (n); - return b ^ singleBitMask; - } + static double getDelay() { + double delayFor = 0.0; + auto res = g_network->global(INetwork::enDiskFailureInjector); + if (res) { + DiskFailureInjector* delayInjector = static_cast(res); + delayFor = delayInjector->getDiskDelay(); - void flipBits(void* data, int length, double percentBitFlips) { - auto toFlip = int(float(length*8) * percentBitFlips / 100); - TraceEvent("AsyncFileFlipBits").detail("ToFlip", toFlip); - for (auto i = 0; i < toFlip; i++) { - auto byteOffset = deterministicRandom()->randomInt64(0, length); - auto bitOffset = uint8_t(deterministicRandom()->randomInt(0, 8)); - ((uint8_t *)data)[byteOffset] = toggleNthBit(((uint8_t *)data)[byteOffset], bitOffset); + // increment the metric for disk delays + if (delayFor > 0.0) { + auto res = g_network->global(INetwork::enChaosMetrics); + if (res) { + ChaosMetrics* chaosMetrics = static_cast(res); + chaosMetrics->diskDelays++; + } + } } + return delayFor; } Future read(void* data, int length, int64_t offset) override { - double delay = 0.0; - auto res = g_network->global(INetwork::enDiskFailureInjector); - if (res) - delay = static_cast(res)->getDiskDelay(); - TraceEvent("AsyncFileChaosRead").detail("ThrottleDelay", delay); - return delayed(file->read(data, length, offset), delay); + double diskDelay = getDelay(); + + // Wait for diskDelay before submitting the I/O + // Template types are being provided explicitly because they can't be automatically deduced for some reason. + return mapAsync(Void)>, int>( + delay(diskDelay), [=](Void _) -> Future { return file->read(data, length, offset); }); } Future write(void const* data, int length, int64_t offset) override { - double delay = 0.0; char* pdata = nullptr; + + // Check if a bit flip event was injected, if so, copy the buffer contents + // with a random bit flipped in a new buffer and use that for the write auto res = g_network->global(INetwork::enBitFlipper); if (res) { - auto percentBitFlips = static_cast(res)->getPercentBitFlips(); - if (percentBitFlips > 0.0) { - TraceEvent("AsyncFileCorruptWrite").detail("PercentBitFlips", percentBitFlips); - pdata = new char[length]; + auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); + if (bitFlipPercentage > 0.0) { + pdata = (char*)arena.allocate4kAlignedBuffer(length); memcpy(pdata, data, length); - flipBits(pdata, length, percentBitFlips); - auto diff = memcmp(pdata, data, length); - if (diff) - TraceEvent("AsyncFileCorruptWriteDiff").detail("Diff", diff); + if (deterministicRandom()->random01() < bitFlipPercentage) { + // copy buffer with a flipped bit + pdata[deterministicRandom()->randomInt(0, length)] ^= (1 << deterministicRandom()->randomInt(0, 8)); + + // increment the metric for bit flips + auto res = g_network->global(INetwork::enChaosMetrics); + if (res) { + ChaosMetrics* chaosMetrics = static_cast(res); + chaosMetrics->bitFlips++; + } + } } } - res = g_network->global(INetwork::enDiskFailureInjector); - if (res) - delay = static_cast(res)->getDiskDelay(); - TraceEvent("AsyncFileChaosWrite").detail("ThrottleDelay", delay); - return delayed(file->write((pdata != nullptr) ? pdata : data, length, offset), delay); + double diskDelay = getDelay(); + // Wait for diskDelay before submitting the I/O + return mapAsync(Void)>, Void>(delay(diskDelay), [=](Void _) -> Future { + if (pdata) + return holdWhile(pdata, file->write(pdata, length, offset)); + + return file->write(data, length, offset); + }); } Future truncate(int64_t size) override { - double delay = 0.0; - auto res = g_network->global(INetwork::enDiskFailureInjector); - if (res) - delay = static_cast(res)->getDiskDelay(); - return delayed(file->truncate(size), delay); + double diskDelay = getDelay(); + // Wait for diskDelay before submitting the I/O + return mapAsync(Void)>, Void>( + delay(diskDelay), [=](Void _) -> Future { return file->truncate(size); }); } Future sync() override { - double delay = 0.0; - auto res = g_network->global(INetwork::enDiskFailureInjector); - if (res) - delay = static_cast(res)->getDiskDelay(); - return delayed(file->sync(), delay); + double diskDelay = getDelay(); + // Wait for diskDelay before submitting the I/O + return mapAsync(Void)>, Void>( + delay(diskDelay), [=](Void _) -> Future { return file->sync(); }); } Future size() const override { - double delay = 0.0; - auto res = g_network->global(INetwork::enDiskFailureInjector); - if (res) - delay = static_cast(res)->getDiskDelay(); - return delayed(file->size(), delay); + double diskDelay = getDelay(); + // Wait for diskDelay before submitting the I/O + return mapAsync(Void)>, int64_t>( + delay(diskDelay), [=](Void _) -> Future { return file->size(); }); } - int64_t debugFD() const override { - return file->debugFD(); - } + int64_t debugFD() const override { return file->debugFD(); } - std::string getFilename() const override { - return file->getFilename(); - } + std::string getFilename() const override { return file->getFilename(); } }; diff --git a/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/AsyncFileNonDurable.actor.h index 98bbe0c4e8..8489a3842d 100644 --- a/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/AsyncFileNonDurable.actor.h @@ -191,12 +191,11 @@ private: Reference diskParameters, NetworkAddress openedAddress, bool aio) - : filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters), - openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false), - aio(aio) - { + : filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters), + openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false), + aio(aio) { - // This is only designed to work in simulation + // This is only designed to work in simulation ASSERT(g_network->isSimulated()); this->id = deterministicRandom()->randomUniqueID(); @@ -458,7 +457,7 @@ private: Future> ownFuture, void const* data, int length, - int64_t offset) { + int64_t offset) { state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess(); state TaskPriority currentTaskID = g_network->getCurrentTask(); wait(g_simulator.onMachine(currentProcess)); diff --git a/fdbrpc/IAsyncFile.h b/fdbrpc/IAsyncFile.h index f21760cb00..ed703514c6 100644 --- a/fdbrpc/IAsyncFile.h +++ b/fdbrpc/IAsyncFile.h @@ -34,7 +34,6 @@ // must complete or cancel, but you should probably look at the file implementations you'll be using. class IAsyncFile { public: - //explicit IAsyncFile() : diskFailureInjector(DiskFailureInjector::injector()) {} virtual ~IAsyncFile(); // Pass these to g_network->open to get an IAsyncFile enum { @@ -96,9 +95,6 @@ public: // Used for rate control, at present, only AsyncFileCached supports it virtual Reference const& getRateControl() { throw unsupported_operation(); } virtual void setRateControl(Reference const& rc) { throw unsupported_operation(); } - -//public: - //DiskFailureInjector* diskFailureInjector; }; typedef void (*runCycleFuncPtr)(); diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 4051f935a0..94b78f1ede 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -1186,6 +1186,9 @@ public: m->protocolVersion = protocol; m->setGlobal(enTDMetrics, (flowGlobalType)&m->tdmetrics); + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { + m->setGlobal(enChaosMetrics, (flowGlobalType)&m->chaosMetrics); + } m->setGlobal(enNetworkConnections, (flowGlobalType)m->network); m->setGlobal(enASIOTimedOut, (flowGlobalType) false); diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 6404eafc17..764b8b125b 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -73,6 +73,7 @@ public: LocalityData locality; ProcessClass startingClass; TDMetricCollection tdmetrics; + ChaosMetrics chaosMetrics; HistogramRegistry histograms; std::map> listenerMap; std::map> boundUDPSockets; diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 1c5bf22408..ba57c9505c 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1165,6 +1165,28 @@ struct SharedLogsValue { : actor(actor), uid(uid), requests(requests) {} }; +ACTOR Future chaosMetricsLogger() { + + auto res = g_network->global(INetwork::enChaosMetrics); + if (!res) + return Void(); + + state ChaosMetrics* chaosMetrics = static_cast(res); + chaosMetrics->clear(); + + loop { + wait(delay(FLOW_KNOBS->CHAOS_LOGGING_INTERVAL)); + + TraceEvent e("ChaosMetrics"); + // double elapsed = now() - chaosMetrics->startTime; + double elapsed = timer_monotonic() - chaosMetrics->startTime; + e.detail("Elapsed", elapsed); + chaosMetrics->getFields(&e); + e.trackLatest("ChaosMetrics"); + chaosMetrics->clear(); + } +} + ACTOR Future workerServer(Reference connFile, Reference>> ccInterface, LocalityData locality, @@ -1191,6 +1213,7 @@ ACTOR Future workerServer(Reference connFile, state Promise stopping; state WorkerCache storageCache; state Future metricsLogger; + state Future chaosMetricsActor; state Reference> degraded = FlowTransport::transport().getDegraded(); // tLogFnForOptions() can return a function that doesn't correspond with the FDB version that the // TLogVersion represents. This can be done if the newer TLog doesn't support a requested option. @@ -1211,6 +1234,7 @@ ACTOR Future workerServer(Reference connFile, if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { TraceEvent(SevWarnAlways, "ChaosFeaturesEnabled"); + chaosMetricsActor = chaosMetricsLogger(); } folder = abspath(folder); @@ -1436,15 +1460,8 @@ ACTOR Future workerServer(Reference connFile, wait(waitForAll(recoveries)); recoveredDiskFiles.send(Void()); - errorForwarders.add(registrationClient(ccInterface, - interf, - asyncPriorityInfo, - initialClass, - ddInterf, - rkInterf, - degraded, - connFile, - issues)); + errorForwarders.add(registrationClient( + ccInterface, interf, asyncPriorityInfo, initialClass, ddInterf, rkInterf, degraded, connFile, issues)); if (SERVER_KNOBS->ENABLE_WORKER_HEALTH_MONITOR) { errorForwarders.add(healthMonitor(ccInterface, interf, locality, dbInfo)); @@ -1515,19 +1532,14 @@ ACTOR Future workerServer(Reference connFile, } when(SetFailureInjection req = waitNext(interf.clientInterface.setFailureInjection.getFuture())) { if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { - if (req.throttleDisk.present()) { - TraceEvent("DiskThrottleRequest").detail("DelayFrequency",req.throttleDisk.get().delayFrequency). - detail("DelayMin", req.throttleDisk.get().delayMin). - detail("DelayMax", req.throttleDisk.get().delayMax); + if (req.diskFailure.present()) { auto diskFailureInjector = DiskFailureInjector::injector(); - diskFailureInjector->throttleFor(req.throttleDisk.get().delayFrequency, - req.throttleDisk.get().delayMin, - req.throttleDisk.get().delayMax); + diskFailureInjector->setDiskFailure(req.diskFailure.get().stallInterval, + req.diskFailure.get().stallPeriod, + req.diskFailure.get().throttlePeriod); } else if (req.flipBits.present()) { - TraceEvent("FlipBitsRequest"). - detail("Percent", req.flipBits.get().percentBitFlips); auto bitFlipper = BitFlipper::flipper(); - bitFlipper->setPercentBitFlips(req.flipBits.get().percentBitFlips); + bitFlipper->setBitFlipPercentage(req.flipBits.get().percentBitFlips); } req.reply.send(Void()); } else { diff --git a/fdbserver/workloads/BitFlipping.actor.cpp b/fdbserver/workloads/BitFlipping.actor.cpp index b3e2f3f235..8dd8781b6f 100644 --- a/fdbserver/workloads/BitFlipping.actor.cpp +++ b/fdbserver/workloads/BitFlipping.actor.cpp @@ -28,85 +28,202 @@ #include "flow/actorcompiler.h" // This must be the last #include. struct BitFlippingWorkload : TestWorkload { - bool enabled; - double testDuration; - double percentBitFlips; - BitFlippingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { - enabled = !clientId; // only do this on the "first" client - testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); - percentBitFlips = getOption(options, LiteralStringRef("percentBitFlips"), 1.0); - TraceEvent("BitFlippingWorkload") - .detail("TestDuration", testDuration).detail("Percentage", percentBitFlips); - } + bool enabled; + double testDuration; + double percentBitFlips; + double periodicCheckInterval; + std::vector chosenWorkers; + std::vector> clients; - std::string description() const override { - if (&g_simulator == g_network) - return "BitFlipping"; - else - return "NoSimBitFlipping"; - } + BitFlippingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { + enabled = !clientId; // only do this on the "first" client + testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); + percentBitFlips = getOption(options, LiteralStringRef("percentBitFlips"), 10.0); + periodicCheckInterval = getOption(options, LiteralStringRef("periodicCheckInterval"), 10.0); + } - Future setup(Database const& cx) override { return Void(); } + std::string description() const override { + if (&g_simulator == g_network) + return "BitFlipping"; + else + return "NoSimBitFlipping"; + } - Future start(Database const& cx) override { - if (enabled) { - return timeout(reportErrors(flipBitsClient(cx, this), "BitFlippingError"), - testDuration, - Void()); - } else - return Void(); - } + Future setup(Database const& cx) override { return Void(); } - Future check(Database const& cx) override { return true; } + // Starts the workload by - + // 1. Starting the actor to periodically check chaosMetrics, and + // 2. Starting the actor that injects failures on chosen storage servers + Future start(Database const& cx) override { + if (enabled) { + clients.push_back(periodicMetricCheck(this)); + clients.push_back(flipBitsClient(cx, this)); + return timeout(waitForAll(clients), testDuration, Void()); + } else + return Void(); + } - void getMetrics(vector& m) override {} + Future check(Database const& cx) override { return true; } - static void checkBitFlipResult(Future res, WorkerInterface worker) { - if (res.isError()) { - auto err = res.getError(); - if (err.code() == error_code_client_invalid_operation) { - TraceEvent(SevError, "ChaosDisabled") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); - } else { - TraceEvent(SevError, "BitFlippingFailed") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) - .error(err); - } - } - } + void getMetrics(vector& m) override {} - ACTOR void doBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { - state Future res; - wait(::delay(startDelay)); - SetFailureInjection::FlipBitsCommand flipBits; - flipBits.percentBitFlips = percentage; - SetFailureInjection req; - req.flipBits = flipBits; - res = worker.clientInterface.setFailureInjection.getReply(req); - wait(ready(res)); - checkBitFlipResult(res, worker); - } + static void checkBitFlipResult(Future res, WorkerInterface worker) { + if (res.isError()) { + auto err = res.getError(); + if (err.code() == error_code_client_invalid_operation) { + TraceEvent(SevError, "ChaosDisabled") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); + } else { + TraceEvent(SevError, "BitFlippingFailed") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) + .error(err); + } + } + } - ACTOR static Future getAllStorageWorkers(Database cx, BitFlippingWorkload* self, std::vector* result) { - result->clear(); - state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); - for (auto& worker : res) { - result->emplace_back(worker); - } - return Void(); - } + ACTOR void doBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { + state Future res; + wait(::delay(startDelay)); + SetFailureInjection::FlipBitsCommand flipBits; + flipBits.percentBitFlips = percentage; + SetFailureInjection req; + req.flipBits = flipBits; + res = worker.clientInterface.setFailureInjection.getReply(req); + wait(ready(res)); + checkBitFlipResult(res, worker); + } - ACTOR template - Future flipBitsClient(Database cx, BitFlippingWorkload* self) { - state double lastTime = now(); - state double workloadEnd = now() + self->testDuration; - state std::vector machines; - loop { - wait(poisson(&lastTime, 1)); - wait(BitFlippingWorkload::getAllStorageWorkers(cx, self, &machines)); - auto machine = deterministicRandom()->randomChoice(machines); - self->doBitFlips(machine, self->percentBitFlips); - } - } + ACTOR static Future getAllStorageWorkers(Database cx, + BitFlippingWorkload* self, + std::vector* result) { + result->clear(); + state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); + for (auto& worker : res) { + result->emplace_back(worker); + } + return Void(); + } + + ACTOR template + Future flipBitsClient(Database cx, BitFlippingWorkload* self) { + state double lastTime = now(); + state double workloadEnd = now() + self->testDuration; + state std::vector machines; + loop { + wait(poisson(&lastTime, 1)); + wait(BitFlippingWorkload::getAllStorageWorkers(cx, self, &machines)); + auto machine = deterministicRandom()->randomChoice(machines); + + // If we have already chosen this worker, then just continue + if (find(self->chosenWorkers.begin(), self->chosenWorkers.end(), machine.address()) != + self->chosenWorkers.end()) + continue; + + // Keep track of chosen workers for verification purpose + self->chosenWorkers.emplace_back(machine.address()); + self->doBitFlips(machine, self->percentBitFlips); + } + } + + // Resend the chaos event to previosuly chosen workers, in case some workers got restarted and lost their chaos + // config + ACTOR static Future reSendChaos(BitFlippingWorkload* self) { + std::vector workers = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + std::map workersMap; + for (auto worker : workers) { + workersMap[worker.interf.address()] = worker.interf; + } + for (auto& workerAddress : self->chosenWorkers) { + auto itr = workersMap.find(workerAddress); + if (itr != workersMap.end()) + self->doBitFlips(itr->second, self->percentBitFlips); + } + return Void(); + } + // For fetching chaosMetrics to ensure chaos events are happening + // This is borrowed code from Status.actor.cpp + struct WorkerEvents : std::map {}; + + ACTOR static Future>>> latestEventOnWorkers( + std::vector workers, + std::string eventName) { + try { + state vector>> eventTraces; + for (int c = 0; c < workers.size(); c++) { + EventLogRequest req = + eventName.size() > 0 ? EventLogRequest(Standalone(eventName)) : EventLogRequest(); + eventTraces.push_back(errorOr(timeoutError(workers[c].interf.eventLogRequest.getReply(req), 2.0))); + } + + wait(waitForAll(eventTraces)); + + std::set failed; + WorkerEvents results; + + for (int i = 0; i < eventTraces.size(); i++) { + const ErrorOr& v = eventTraces[i].get(); + if (v.isError()) { + failed.insert(workers[i].interf.address().toString()); + results[workers[i].interf.address()] = TraceEventFields(); + } else { + results[workers[i].interf.address()] = v.get(); + } + } + + std::pair> val; + val.first = results; + val.second = failed; + + return val; + } catch (Error& e) { + ASSERT(e.code() == + error_code_actor_cancelled); // All errors should be filtering through the errorOr actor above + throw; + } + } + + // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers + ACTOR static Future chaosGetStatus(BitFlippingWorkload* self) { + std::vector workers = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + + Future>>> latestEventsFuture; + latestEventsFuture = latestEventOnWorkers(workers, "ChaosMetrics"); + state Optional>> workerEvents = wait(latestEventsFuture); + + state WorkerEvents cMetrics = workerEvents.present() ? workerEvents.get().first : WorkerEvents(); + + // Now verify that all chosen workers for chaos events have non-zero chaosMetrics + for (auto& workerAddress : self->chosenWorkers) { + auto chaosMetrics = cMetrics.find(workerAddress); + if (chaosMetrics != cMetrics.end()) { + int bitFlips = chaosMetrics->second.getInt("BitFlips"); + + // we expect bitFlips to be non-zero for chosenWorkers + if (bitFlips == 0) { + TraceEvent(SevError, "ChaosGetStatus") + .detail("OnEndpoint", workerAddress.toString()) + .detail("BitFlips", bitFlips); + } + } + } + + return Void(); + } + + // Periodically fetches chaosMetrics to ensure that chaas events are taking place + ACTOR static Future periodicMetricCheck(BitFlippingWorkload* self) { + state double start = now(); + state double elapsed = 0.0; + + loop { + // re-send the chaos event in case of a process restart + wait(reSendChaos(self)); + elapsed += self->periodicCheckInterval; + wait(delayUntil(start + elapsed)); + wait(chaosGetStatus(self)); + } + } }; WorkloadFactory BitFlippingWorkloadFactory("BitFlipping"); diff --git a/fdbserver/workloads/ClearSingleRange.actor.cpp b/fdbserver/workloads/ClearSingleRange.actor.cpp index 3419da80c6..f8f48be929 100644 --- a/fdbserver/workloads/ClearSingleRange.actor.cpp +++ b/fdbserver/workloads/ClearSingleRange.actor.cpp @@ -39,9 +39,7 @@ struct ClearSingleRange : TestWorkload { Future setup(Database const& cx) override { return Void(); } - Future start(Database const& cx) override { - return clientId != 0 ? Void() : fdbClientClearRange(cx, this); - } + Future start(Database const& cx) override { return clientId != 0 ? Void() : fdbClientClearRange(cx, this); } Future check(Database const& cx) override { return true; } diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskThrottling.actor.cpp index 5eead212f4..264ef477a2 100644 --- a/fdbserver/workloads/DiskThrottling.actor.cpp +++ b/fdbserver/workloads/DiskThrottling.actor.cpp @@ -28,102 +28,230 @@ #include "flow/actorcompiler.h" // This must be the last #include. struct DiskThrottlingWorkload : TestWorkload { - bool enabled; - double testDuration; - double throttleFrequency; - double throttleMin; - double throttleMax; - DiskThrottlingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { - enabled = !clientId; // only do this on the "first" client - testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); - throttleFrequency = getOption(options, LiteralStringRef("throttleFrequency"), 0.0); - throttleMin = getOption(options, LiteralStringRef("throttleMin"), 2.0); - throttleMax = getOption(options, LiteralStringRef("throttleMax"), 2.0); - TraceEvent("DiskThrottlingWorkload") - .detail("TestDuration", testDuration).detail("Frequency", throttleFrequency) - .detail("Min", throttleMin).detail("Max", throttleMax); - } + bool enabled; + double testDuration; + double startDelay; + double stallInterval; + double stallPeriod; + double throttlePeriod; + double periodicCheckInterval; + std::vector chosenWorkers; + std::vector> clients; - std::string description() const override { - if (&g_simulator == g_network) - return "DiskThrottling"; - else - return "NoSimDiskThrolling"; - } + DiskThrottlingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { + enabled = !clientId; // only do this on the "first" client + startDelay = getOption(options, LiteralStringRef("startDelay"), 0.0); + testDuration = getOption(options, LiteralStringRef("testDuration"), 60.0); + stallInterval = getOption(options, LiteralStringRef("stallInterval"), 0.0); + stallPeriod = getOption(options, LiteralStringRef("stallPeriod"), 60.0); + throttlePeriod = getOption(options, LiteralStringRef("throttlePeriod"), 60.0); + periodicCheckInterval = getOption(options, LiteralStringRef("periodicCheckInterval"), 10.0); + } - Future setup(Database const& cx) override { return Void(); } + std::string description() const override { + if (&g_simulator == g_network) + return "DiskThrottling"; + else + return "NoSimDiskThrolling"; + } - Future start(Database const& cx) override { - if (enabled) { - return timeout(reportErrors(throttleDiskClient(cx, this), "DiskThrottlingError"), - testDuration, - Void()); - } else - return Void(); - } + Future setup(Database const& cx) override { return Void(); } - Future check(Database const& cx) override { return true; } + // Starts the workload by - + // 1. Starting the actor to periodically check chaosMetrics, and + // 2. Starting the actor that injects failures on chosen storage servers + Future start(Database const& cx) override { + if (enabled) { + clients.push_back(periodicMetricCheck(this)); + clients.push_back(throttleDiskClient(cx, this)); + return timeout(waitForAll(clients), testDuration, Void()); + } else + return Void(); + } - void getMetrics(vector& m) override {} + Future check(Database const& cx) override { return true; } - static void checkDiskThrottleResult(Future res, WorkerInterface worker) { - if (res.isError()) { - auto err = res.getError(); - if (err.code() == error_code_client_invalid_operation) { - TraceEvent(SevError, "ChaosDisabled") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); - } else { - TraceEvent(SevError, "DiskThrottlingFailed") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) - .error(err); - } - } - } + void getMetrics(vector& m) override {} - ACTOR void doThrottle(WorkerInterface worker, double frequency, double minDelay, double maxDelay, double startDelay = 0.0) { - state Future res; - wait(::delay(startDelay)); - SetFailureInjection::ThrottleDiskCommand throttleDisk; - throttleDisk.delayFrequency = frequency; - throttleDisk.delayMin = minDelay; - throttleDisk.delayMax = maxDelay; - SetFailureInjection req; - req.throttleDisk = throttleDisk; - res = worker.clientInterface.setFailureInjection.getReply(req); - wait(ready(res)); - checkDiskThrottleResult(res, worker); - } + static void checkDiskThrottleResult(Future res, WorkerInterface worker) { + if (res.isError()) { + auto err = res.getError(); + if (err.code() == error_code_client_invalid_operation) { + TraceEvent(SevError, "ChaosDisabled") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); + } else { + TraceEvent(SevError, "DiskThrottlingFailed") + .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) + .error(err); + } + } + } - ACTOR static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { - result->clear(); - std::vector res = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); - for (auto& worker : res) { - result->emplace_back(worker.interf); - } - return Void(); - } + // Sets the disk failure request + ACTOR void doThrottle(WorkerInterface worker, + double stallInterval, + double stallPeriod, + double throttlePeriod, + double startDelay) { + state Future res; + wait(::delay(startDelay)); + SetFailureInjection::DiskFailureCommand diskFailure; + diskFailure.stallInterval = stallInterval; + diskFailure.stallPeriod = stallPeriod; + diskFailure.throttlePeriod = throttlePeriod; + SetFailureInjection req; + req.diskFailure = diskFailure; + res = worker.clientInterface.setFailureInjection.getReply(req); + wait(ready(res)); + checkDiskThrottleResult(res, worker); + } - ACTOR static Future getAllStorageWorkers(Database cx, DiskThrottlingWorkload* self, std::vector* result) { - result->clear(); - state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); - for (auto& worker : res) { - result->emplace_back(worker); - } - return Void(); - } + // Currently unused, because we only inject disk failures on storage servers + ACTOR static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { + result->clear(); + std::vector res = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + for (auto& worker : res) { + result->emplace_back(worker.interf); + } + return Void(); + } - ACTOR template - Future throttleDiskClient(Database cx, DiskThrottlingWorkload* self) { - state double lastTime = now(); - state double workloadEnd = now() + self->testDuration; - state std::vector machines; - loop { - wait(poisson(&lastTime, 1)); - wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); - auto machine = deterministicRandom()->randomChoice(machines); - self->doThrottle(machine, self->throttleFrequency, self->throttleMin, self->throttleMax); - } - } + ACTOR static Future getAllStorageWorkers(Database cx, + DiskThrottlingWorkload* self, + std::vector* result) { + result->clear(); + state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); + for (auto& worker : res) { + result->emplace_back(worker); + } + return Void(); + } + + // Choose random storage servers to inject disk failures + ACTOR template + Future throttleDiskClient(Database cx, DiskThrottlingWorkload* self) { + state double lastTime = now(); + state std::vector machines; + loop { + wait(poisson(&lastTime, 1)); + wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); + auto machine = deterministicRandom()->randomChoice(machines); + + // If we have already chosen this worker, then just continue + if (find(self->chosenWorkers.begin(), self->chosenWorkers.end(), machine.address()) != + self->chosenWorkers.end()) + continue; + + // Keep track of chosen workers for verification purpose + self->chosenWorkers.emplace_back(machine.address()); + self->doThrottle(machine, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + } + } + + // Resend the chaos event to previosuly chosen workers, in case some workers got restarted and lost their chaos + // config + ACTOR static Future reSendChaos(DiskThrottlingWorkload* self) { + std::vector workers = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + std::map workersMap; + for (auto worker : workers) { + workersMap[worker.interf.address()] = worker.interf; + } + for (auto& workerAddress : self->chosenWorkers) { + auto itr = workersMap.find(workerAddress); + if (itr != workersMap.end()) + self->doThrottle( + itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + } + return Void(); + } + + // For fetching chaosMetrics to ensure chaos events are happening + // This is borrowed code from Status.actor.cpp + struct WorkerEvents : std::map {}; + + ACTOR static Future>>> latestEventOnWorkers( + std::vector workers, + std::string eventName) { + try { + state vector>> eventTraces; + for (int c = 0; c < workers.size(); c++) { + EventLogRequest req = + eventName.size() > 0 ? EventLogRequest(Standalone(eventName)) : EventLogRequest(); + eventTraces.push_back(errorOr(timeoutError(workers[c].interf.eventLogRequest.getReply(req), 2.0))); + } + + wait(waitForAll(eventTraces)); + + std::set failed; + WorkerEvents results; + + for (int i = 0; i < eventTraces.size(); i++) { + const ErrorOr& v = eventTraces[i].get(); + if (v.isError()) { + failed.insert(workers[i].interf.address().toString()); + results[workers[i].interf.address()] = TraceEventFields(); + } else { + results[workers[i].interf.address()] = v.get(); + } + } + + std::pair> val; + val.first = results; + val.second = failed; + + return val; + } catch (Error& e) { + ASSERT(e.code() == + error_code_actor_cancelled); // All errors should be filtering through the errorOr actor above + throw; + } + } + + // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers + ACTOR static Future chaosGetStatus(DiskThrottlingWorkload* self) { + std::vector workers = + wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + + Future>>> latestEventsFuture; + latestEventsFuture = latestEventOnWorkers(workers, "ChaosMetrics"); + state Optional>> workerEvents = wait(latestEventsFuture); + + state WorkerEvents cMetrics = workerEvents.present() ? workerEvents.get().first : WorkerEvents(); + + // Now verify that all chosen workers for chaos events have non-zero chaosMetrics + std::vector>>>> futures; + + for (auto& workerAddress : self->chosenWorkers) { + auto chaosMetrics = cMetrics.find(workerAddress); + if (chaosMetrics != cMetrics.end()) { + int diskDelays = chaosMetrics->second.getInt("DiskDelays"); + + // we expect diskDelays to be non-zero for chosenWorkers + if (diskDelays == 0) { + TraceEvent(SevError, "ChaosGetStatus") + .detail("OnEndpoint", workerAddress.toString()) + .detail("DiskDelays", diskDelays); + } + } + } + + return Void(); + } + + // Periodically fetches chaosMetrics to ensure that chaas events are taking place + ACTOR static Future periodicMetricCheck(DiskThrottlingWorkload* self) { + state double start = now(); + state double elapsed = 0.0; + + loop { + // re-send the chaos event in case of a process restart + wait(reSendChaos(self)); + elapsed += self->periodicCheckInterval; + wait(delayUntil(start + elapsed)); + wait(chaosGetStatus(self)); + } + } }; WorkloadFactory DiskThrottlingWorkloadFactory("DiskThrottling"); diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 12bc0d70c9..322eb5d52d 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -66,6 +66,7 @@ void FlowKnobs::initialize(Randomize _randomize, IsSimulated _isSimulated) { // Chaos testing init( ENABLE_CHAOS_FEATURES, true ); + init( CHAOS_LOGGING_INTERVAL, 5.0 ); init( WRITE_TRACING_ENABLED, true ); if( randomize && BUGGIFY ) WRITE_TRACING_ENABLED = false; diff --git a/flow/Knobs.h b/flow/Knobs.h index 340848b68f..a6f9006c12 100644 --- a/flow/Knobs.h +++ b/flow/Knobs.h @@ -100,6 +100,7 @@ public: // Chaos testing bool ENABLE_CHAOS_FEATURES; + double CHAOS_LOGGING_INTERVAL; bool WRITE_TRACING_ENABLED; int TRACING_UDP_LISTENER_PORT; diff --git a/flow/Net2.actor.cpp b/flow/Net2.actor.cpp index c3b35f1203..85aea2c5f9 100644 --- a/flow/Net2.actor.cpp +++ b/flow/Net2.actor.cpp @@ -226,6 +226,7 @@ public: TaskPriority currentTaskID; uint64_t tasksIssued; TDMetricCollection tdmetrics; + ChaosMetrics chaosMetrics; double currentTime; // May be accessed off the network thread, e.g. by onMainThread std::atomic stopped; @@ -1188,6 +1189,9 @@ Net2::Net2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics) if (useMetrics) { setGlobal(INetwork::enTDMetrics, (flowGlobalType)&tdmetrics); } + if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { + setGlobal(INetwork::enChaosMetrics, (flowGlobalType)&chaosMetrics); + } setGlobal(INetwork::enNetworkConnections, (flowGlobalType)network); setGlobal(INetwork::enASIOService, (flowGlobalType)&reactor.ios); setGlobal(INetwork::enBlobCredentialFiles, &blobCredentialFiles); @@ -1513,7 +1517,7 @@ void Net2::run() { double newTaskBegin = timer_monotonic(); if (check_yield(TaskPriority::Max, tscNow)) { checkForSlowTask(tscBegin, tscNow, newTaskBegin - taskBegin, currentTaskID); - taskBegin = newTaskBegin; + taskBegin = newTaskBegin; FDB_TRACE_PROBE(run_loop_yield); ++countYields; break; diff --git a/flow/network.h b/flow/network.h index 0ce243e9f2..ec9923052b 100644 --- a/flow/network.h +++ b/flow/network.h @@ -347,7 +347,8 @@ struct NetworkMetrics { std::unordered_map activeTrackers; double lastRunLoopBusyness; // network thread busyness (measured every 5s by default) - std::atomic networkBusyness; // network thread busyness which is returned to the the client (measured every 1s by default) + std::atomic + networkBusyness; // network thread busyness which is returned to the the client (measured every 1s by default) // starvation trackers which keeps track of different task priorities std::vector starvationTrackers; @@ -487,8 +488,9 @@ public: enClientFailureMonitor = 12, enSQLiteInjectedError = 13, enGlobalConfig = 14, - enDiskFailureInjector = 15, - enBitFlipper = 16 + enChaosMetrics = 15, + enDiskFailureInjector = 16, + enBitFlipper = 17 }; virtual void longTaskCheck(const char* name) {} @@ -648,45 +650,40 @@ public: // Returns the interface that should be used to make and accept socket connections }; -struct DelayGenerator : FastAllocated { +// Chaos Metrics - We periodically log chaosMetrics to make sure that chaos events are happening +// Only includes DiskDelays which encapsulates all type delays and BitFlips for now +// Expand as per need +struct ChaosMetrics { - void setDelay(double frequency, double min, double max) { - delayFrequency = frequency; - delayMin = min; - delayMax = max; - delayFor = (delayMin == delayMax) ? delayMin : deterministicRandom()->randomInt(delayMin, delayMax); - delayUntil = std::max(delayUntil, timer_monotonic() + delayFor); - TraceEvent("DelayGeneratorSetDelay").detail("DelayFrequency", frequency).detail("DelayMin", min). - detail("DelayMax", max).detail("DelayFor", delayFor).detail("DelayUntil", delayUntil); + ChaosMetrics() { clear(); } + + void clear() { + memset(this, 0, sizeof(ChaosMetrics)); + startTime = timer_monotonic(); } - - double getDelay() { - // If a delayFrequency was specified, this logic determins the delay to be inserted at any point in time - if (delayFrequency) { - auto timeElapsed = fmod(timer_monotonic(), delayFrequency); - TraceEvent("DelayGeneratorGetDelay").detail("DelayFrequency", delayFrequency). - detail("TimeElapsed", timeElapsed).detail("DelayFor", delayFor); - return std::max(0.0, delayFor - timeElapsed); + + unsigned int diskDelays; + unsigned int bitFlips; + double startTime; + + void getFields(TraceEvent* e) { + std::pair metrics[] = { { "DiskDelays", diskDelays }, { "BitFlips", bitFlips } }; + if (e != nullptr) { + for (auto& m : metrics) { + char c = m.first[0]; + if (c != 0) { + e->detail(m.first, m.second); + } + } } - TraceEvent("DelayGeneratorGetDelay").detail("DelayFrequency", delayFrequency). - detail("CurTime", timer_monotonic()).detail("DelayUntil", delayUntil); - return std::max(0.0, delayUntil - timer_monotonic()); } - -private: //members - double delayFrequency = 0.0; // how often should the delay be inserted (0 meaning once, 10 meaning every 10 secs) - double delayMin; // min delay to be inserted - double delayMax; // max delay to be inserted - double delayFor = 0.0; // randomly chosen delay between min and max - double delayUntil = 0.0; // used when the delayFrequency is 0 - -public: // construction - DelayGenerator() = default; - DelayGenerator(DelayGenerator const&) = delete; - }; -struct DiskFailureInjector : FastAllocated { +// This class supports injecting two type of disk failures +// 1. Stalls: Every interval seconds, the disk will stall and no IO will complete for x seconds, where x is a randomly +// chosen interval +// 2. Slowdown: Random slowdown is injected to each disk operation for specified period of time +struct DiskFailureInjector { static DiskFailureInjector* injector() { auto res = g_network->global(INetwork::enDiskFailureInjector); if (!res) { @@ -696,26 +693,56 @@ struct DiskFailureInjector : FastAllocated { return static_cast(res); } - void throttleFor(double frequency, double delayMin, double delayMax) { - delayGenerator.setDelay(frequency, delayMin, delayMax); + void setDiskFailure(double interval, double stallFor, double throttleFor) { + stallInterval = interval; + stallPeriod = stallFor; + stallUntil = std::max(stallUntil, timer_monotonic() + stallFor); + // random stall duration in ms (chosen once) + stallDuration = 0.001 * deterministicRandom()->randomInt(1, 5); + throttlePeriod = throttleFor; + throttleUntil = std::max(throttleUntil, timer_monotonic() + throttleFor); + TraceEvent("SetDiskFailure") + .detail("StallInterval", interval) + .detail("StallPeriod", stallFor) + .detail("StallUntil", stallUntil) + .detail("ThrottlePeriod", throttleFor) + .detail("ThrottleUntil", throttleUntil); } - double getDiskDelay() { - if (!FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { - return 0.0; + double getStallDelay() { + // If we are in a stall period and a stallInterval was specified, determine the + // delay to be inserted + if (((stallUntil - timer_monotonic()) > 0.0) && stallInterval) { + auto timeElapsed = fmod(timer_monotonic(), stallInterval); + return std::max(0.0, stallDuration - timeElapsed); } - return delayGenerator.getDelay(); + return 0.0; } + double getThrottleDelay() { + // If we are in the throttle period, insert a random delay (in ms) + if ((throttleUntil - timer_monotonic()) > 0.0) + return (0.001 * deterministicRandom()->randomInt(1, 3)); + + return 0.0; + } + + double getDiskDelay() { return getStallDelay() + getThrottleDelay(); } + private: // members - DelayGenerator delayGenerator; + double stallInterval = 0.0; // how often should the disk be stalled (0 meaning once, 10 meaning every 10 secs) + double stallPeriod; // Period of time disk stalls will be injected for + double stallUntil; // End of disk stall period + double stallDuration; // Duration of each stall + double throttlePeriod; // Period of time the disk will be slowed down for + double throttleUntil; // End of disk slowdown period private: // construction DiskFailureInjector() = default; DiskFailureInjector(DiskFailureInjector const&) = delete; }; -struct BitFlipper : FastAllocated { +struct BitFlipper { static BitFlipper* flipper() { auto res = g_network->global(INetwork::enBitFlipper); if (!res) { @@ -725,43 +752,12 @@ struct BitFlipper : FastAllocated { return static_cast(res); } - //uint8_t toggleNthBit(uint8_t b, uint8_t n) { - // auto singleBitMask = uint8(1) << (n); - // return b ^ singleBitMask; - //} + double getBitFlipPercentage() { return bitFlipPercentage; } - //void flipBitAtOffset(int64_t byteOffset, uint8_t bitOffset) { - //auto oneByte = make([]byte, 1); - // uint8_t oneByte[1]; - // int readBytes = wait(file->Read(oneByte, 1, byteOffset)); - - // oneByte[0] = toggleNthBit(oneByte[0], bitOffset); - // file->write(oneByte, 1, byteOffset); - //} - - //void flipBits(Reference fileName, double percent) { - // file = fileName; - // auto toFlip = int(float64(file->size()*8) * percent / 100); - // for (auto i = 0; i < toFlip; i++) { - // auto byteOffset = deterministicRandom()->randomInt64(0, file->size()); - // auto bitOffset = uint8_t(deterministicRandom()->randomInt(0, 8)); - // flipBitAtOffset(byteOffset, bitOffset); - // } - //} - - double getPercentBitFlips() { - TraceEvent("BitFlipperGetPercentBitFlips").detail("PercentBitFlips", percentBitFlips); - return percentBitFlips; - } - - void setPercentBitFlips(double percentFlips) { - percentBitFlips = percentFlips; - TraceEvent("BitFlipperSetPercentBitFlips").detail("PercentBitFlips", percentBitFlips); - } + void setBitFlipPercentage(double percentage) { bitFlipPercentage = percentage; } private: // members - double percentBitFlips = 0.0; - //Reference file; + double bitFlipPercentage = 0.0; private: // construction BitFlipper() = default; diff --git a/tests/fast/DiskThrottledCycle.toml b/tests/fast/DiskThrottledCycle.toml index 60429350b6..83df7fdb1d 100644 --- a/tests/fast/DiskThrottledCycle.toml +++ b/tests/fast/DiskThrottledCycle.toml @@ -10,5 +10,7 @@ testTitle = 'DiskThrottledCycle' [[test.workload]] testName = 'DiskThrottling' testDuration = 30.0 - throttleFrequency = 10.0 + stallInterval = 10.0 + stallPeriod = 30.0 + throttlePeriod = 30.0 From 4b8771647555bf3f0715d3d4cd39e70c8f8011e2 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 28 Jul 2021 18:19:55 -0700 Subject: [PATCH 010/142] Turn the chaos knob off by default. --- fdbrpc/AsyncFileNonDurable.actor.h | 1 - flow/Knobs.cpp | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/AsyncFileNonDurable.actor.h index 8489a3842d..2a74908517 100644 --- a/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/AsyncFileNonDurable.actor.h @@ -31,7 +31,6 @@ #include "flow/flow.h" #include "fdbrpc/IAsyncFile.h" #include "flow/ActorCollection.h" -#include "flow/network.h" #include "fdbrpc/simulator.h" #include "fdbrpc/TraceFileIO.h" #include "fdbrpc/RangeMap.h" diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index ec1f041c18..5233f65757 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -67,7 +67,7 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) { init( HUGE_ARENA_LOGGING_INTERVAL, 5.0 ); // Chaos testing - init( ENABLE_CHAOS_FEATURES, true ); + init( ENABLE_CHAOS_FEATURES, false ); init( CHAOS_LOGGING_INTERVAL, 5.0 ); From 9e7197faba0378bf9120dbbeac3d1c85a9b2478c Mon Sep 17 00:00:00 2001 From: negoyal Date: Fri, 30 Jul 2021 01:32:43 -0700 Subject: [PATCH 011/142] Bunch of changes based on review comments and discussions. --- fdbrpc/AsyncFileChaos.actor.h | 40 ++- fdbrpc/simulator.h | 8 + fdbserver/CMakeLists.txt | 3 +- fdbserver/Status.actor.cpp | 3 +- fdbserver/Status.h | 4 + fdbserver/VFSAsync.h | 3 +- fdbserver/VersionedBTree.actor.cpp | 3 + fdbserver/worker.actor.cpp | 10 +- fdbserver/workloads/BitFlipping.actor.cpp | 229 ------------------ ...tor.cpp => DiskFailureInjection.actor.cpp} | 208 ++++++++-------- flow/Knobs.cpp | 6 +- flow/network.h | 12 +- tests/CMakeLists.txt | 3 +- tests/fast/BitFlippedCycle.toml | 13 - tests/fast/DiskThrottledCycle.toml | 16 -- tests/slow/DiskFailureCycle.toml | 30 +++ 16 files changed, 199 insertions(+), 392 deletions(-) delete mode 100644 fdbserver/workloads/BitFlipping.actor.cpp rename fdbserver/workloads/{DiskThrottling.actor.cpp => DiskFailureInjection.actor.cpp} (50%) delete mode 100644 tests/fast/BitFlippedCycle.toml delete mode 100644 tests/fast/DiskThrottledCycle.toml create mode 100644 tests/slow/DiskFailureCycle.toml diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index c7eaaefe15..11b60f6692 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -30,16 +30,22 @@ class AsyncFileChaos final : public IAsyncFile, public ReferenceCounted { private: Reference file; - Arena arena; + bool enabled; public: - explicit AsyncFileChaos(Reference file) : file(file) {} + explicit AsyncFileChaos(Reference file) : file(file) { + // We onlyl allow chaod events on storage files + enabled = StringRef(file->getFilename()).startsWith(LiteralStringRef("storage-")); + } void addref() override { ReferenceCounted::addref(); } void delref() override { ReferenceCounted::delref(); } - static double getDelay() { + double getDelay() const { double delayFor = 0.0; + if (!enabled) + return delayFor; + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) { DiskFailureInjector* delayInjector = static_cast(res); @@ -60,6 +66,9 @@ public: Future read(void* data, int length, int64_t offset) override { double diskDelay = getDelay(); + if (diskDelay == 0.0) + return file->read(data, length, offset); + // Wait for diskDelay before submitting the I/O // Template types are being provided explicitly because they can't be automatically deduced for some reason. return mapAsync(Void)>, int>( @@ -67,18 +76,19 @@ public: } Future write(void const* data, int length, int64_t offset) override { + Arena arena; char* pdata = nullptr; // Check if a bit flip event was injected, if so, copy the buffer contents // with a random bit flipped in a new buffer and use that for the write auto res = g_network->global(INetwork::enBitFlipper); - if (res) { + if (enabled && res) { auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); if (bitFlipPercentage > 0.0) { - pdata = (char*)arena.allocate4kAlignedBuffer(length); - memcpy(pdata, data, length); if (deterministicRandom()->random01() < bitFlipPercentage) { - // copy buffer with a flipped bit + pdata = (char*)arena.allocate4kAlignedBuffer(length); + memcpy(pdata, data, length); + // flip a random bit in the copied buffer pdata[deterministicRandom()->randomInt(0, length)] ^= (1 << deterministicRandom()->randomInt(0, 8)); // increment the metric for bit flips @@ -92,6 +102,13 @@ public: } double diskDelay = getDelay(); + if (diskDelay == 0.0) { + if (pdata) + return holdWhile(arena, file->write(pdata, length, offset)); + + return file->write(data, length, offset); + } + // Wait for diskDelay before submitting the I/O return mapAsync(Void)>, Void>(delay(diskDelay), [=](Void _) -> Future { if (pdata) @@ -103,6 +120,9 @@ public: Future truncate(int64_t size) override { double diskDelay = getDelay(); + if (diskDelay == 0.0) + return file->truncate(size); + // Wait for diskDelay before submitting the I/O return mapAsync(Void)>, Void>( delay(diskDelay), [=](Void _) -> Future { return file->truncate(size); }); @@ -110,6 +130,9 @@ public: Future sync() override { double diskDelay = getDelay(); + if (diskDelay == 0.0) + return file->sync(); + // Wait for diskDelay before submitting the I/O return mapAsync(Void)>, Void>( delay(diskDelay), [=](Void _) -> Future { return file->sync(); }); @@ -117,6 +140,9 @@ public: Future size() const override { double diskDelay = getDelay(); + if (diskDelay == 0.0) + return file->size(); + // Wait for diskDelay before submitting the I/O return mapAsync(Void)>, int64_t>( delay(diskDelay), [=](Void _) -> Future { return file->size(); }); diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 764b8b125b..13c493d434 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -410,6 +410,7 @@ public: std::vector>> primarySatelliteDcIds; std::vector>> remoteSatelliteDcIds; TSSMode tssMode; + std::map corruptWorkerMap; // Used by workloads that perform reconfigurations int testerCount; @@ -440,6 +441,13 @@ public: static thread_local ProcessInfo* currentProcess; + bool checkInjectedCorruption() { + auto iter = corruptWorkerMap.find(currentProcess->address); + if (iter != corruptWorkerMap.end()) + return iter->second; + return false; + } + protected: Mutex mutex; diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index cabf457bdd..f18d247c4e 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -151,7 +151,6 @@ set(FDBSERVER_SRCS workloads/BackupToDBAbort.actor.cpp workloads/BackupToDBCorrectness.actor.cpp workloads/BackupToDBUpgrade.actor.cpp - workloads/BitFlipping.actor.cpp workloads/BlobStoreWorkload.h workloads/BulkLoad.actor.cpp workloads/BulkSetup.actor.h @@ -173,7 +172,7 @@ set(FDBSERVER_SRCS workloads/DDMetricsExclude.actor.cpp workloads/DiskDurability.actor.cpp workloads/DiskDurabilityTest.actor.cpp - workloads/DiskThrottling.actor.cpp + workloads/DiskFailureInjection.actor.cpp workloads/Downgrade.actor.cpp workloads/DummyWorkload.actor.cpp workloads/ExternalWorkload.actor.cpp diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index ac15df2d04..828cb094be 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -95,7 +95,6 @@ extern int limitReasonEnd; extern const char* limitReasonName[]; extern const char* limitReasonDesc[]; -struct WorkerEvents : std::map {}; typedef std::map EventMap; ACTOR static Future> latestEventOnWorker(WorkerInterface worker, std::string eventName) { @@ -115,7 +114,7 @@ ACTOR static Future> latestEventOnWorker(WorkerInterf } } -ACTOR static Future>>> latestEventOnWorkers( +ACTOR Future>>> latestEventOnWorkers( std::vector workers, std::string eventName) { try { diff --git a/fdbserver/Status.h b/fdbserver/Status.h index 3cfb019a8e..f56780e1d1 100644 --- a/fdbserver/Status.h +++ b/fdbserver/Status.h @@ -46,4 +46,8 @@ Future clusterGetStatus( Version const& datacenterVersionDifference, ConfigBroadcaster const* const& conifgBroadcaster); +struct WorkerEvents : std::map {}; +Future>>> latestEventOnWorkers( + std::vector const& workers, + std::string const& eventName); #endif diff --git a/fdbserver/VFSAsync.h b/fdbserver/VFSAsync.h index e2dbd18d28..77aea71348 100644 --- a/fdbserver/VFSAsync.h +++ b/fdbserver/VFSAsync.h @@ -22,6 +22,7 @@ #include #include #include "fdbrpc/IAsyncFile.h" +#include "fdbrpc/simulator.h" /* ** When using this VFS, the sqlite3_file* handles that SQLite uses are @@ -71,7 +72,7 @@ struct VFSAsyncFile { .detail("Found", e) .detail("ErrorCode", (int64_t)g_network->global(INetwork::enSQLiteInjectedError)) .backtrace(); - return e; + return e || (g_network->isSimulated() && g_simulator.checkInjectedCorruption()); } uint32_t* const pLockCount; // +1 for each SHARED_LOCK, or 1+X_COUNT for lock level X diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 15a1ae2a35..2dd22544db 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -26,6 +26,7 @@ #include #include #include "fdbrpc/ContinuousSample.h" +#include "fdbrpc/simulator.h" #include "fdbserver/IPager.h" #include "fdbclient/Tuple.h" #include "flow/serialize.h" @@ -2727,6 +2728,8 @@ public: debug_printf( "DWALPager(%s) checksum failed for %s\n", self->filename.c_str(), toString(pageID).c_str()); Error e = checksum_failed(); + if (g_network->isSimulated() && g_simulator.checkInjectedCorruption()) + e = e.asInjectedFault(); TraceEvent(SevError, "RedwoodChecksumFailed") .detail("Filename", self->filename.c_str()) .detail("PageID", pageID) diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index b0339fcf05..bf5d7d5d0c 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -613,7 +613,6 @@ bool addressInDbAndPrimaryDc(const NetworkAddress& address, Reference(Endpoint({ grvProxyAddress }, UID(1, 2))); + grvProxyInterf.getConsistentReadVersion = + RequestStream(Endpoint({ grvProxyAddress }, UID(1, 2))); testDbInfo.client.grvProxies.push_back(grvProxyInterf); ASSERT(addressInDbAndPrimaryDc(grvProxyAddress, makeReference>(testDbInfo))); NetworkAddress commitProxyAddress(IPAddress(0x37373737), 1); CommitProxyInterface commitProxyInterf; - commitProxyInterf.commit = RequestStream(Endpoint({ commitProxyAddress }, UID(1, 2))); + commitProxyInterf.commit = + RequestStream(Endpoint({ commitProxyAddress }, UID(1, 2))); testDbInfo.client.commitProxies.push_back(commitProxyInterf); ASSERT(addressInDbAndPrimaryDc(commitProxyAddress, makeReference>(testDbInfo))); @@ -1204,8 +1205,7 @@ ACTOR Future chaosMetricsLogger() { wait(delay(FLOW_KNOBS->CHAOS_LOGGING_INTERVAL)); TraceEvent e("ChaosMetrics"); - // double elapsed = now() - chaosMetrics->startTime; - double elapsed = timer_monotonic() - chaosMetrics->startTime; + double elapsed = now() - chaosMetrics->startTime; e.detail("Elapsed", elapsed); chaosMetrics->getFields(&e); e.trackLatest("ChaosMetrics"); diff --git a/fdbserver/workloads/BitFlipping.actor.cpp b/fdbserver/workloads/BitFlipping.actor.cpp deleted file mode 100644 index 8dd8781b6f..0000000000 --- a/fdbserver/workloads/BitFlipping.actor.cpp +++ /dev/null @@ -1,229 +0,0 @@ -/* - * BitFlipping.actor.cpp - * - * This source file is part of the FoundationDB open source project - * - * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "fdbclient/NativeAPI.actor.h" -#include "fdbserver/TesterInterface.actor.h" -#include "fdbserver/workloads/workloads.actor.h" -#include "fdbrpc/simulator.h" -#include "fdbserver/WorkerInterface.actor.h" -#include "fdbserver/ServerDBInfo.h" -#include "fdbserver/QuietDatabase.h" -#include "flow/actorcompiler.h" // This must be the last #include. - -struct BitFlippingWorkload : TestWorkload { - bool enabled; - double testDuration; - double percentBitFlips; - double periodicCheckInterval; - std::vector chosenWorkers; - std::vector> clients; - - BitFlippingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { - enabled = !clientId; // only do this on the "first" client - testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0); - percentBitFlips = getOption(options, LiteralStringRef("percentBitFlips"), 10.0); - periodicCheckInterval = getOption(options, LiteralStringRef("periodicCheckInterval"), 10.0); - } - - std::string description() const override { - if (&g_simulator == g_network) - return "BitFlipping"; - else - return "NoSimBitFlipping"; - } - - Future setup(Database const& cx) override { return Void(); } - - // Starts the workload by - - // 1. Starting the actor to periodically check chaosMetrics, and - // 2. Starting the actor that injects failures on chosen storage servers - Future start(Database const& cx) override { - if (enabled) { - clients.push_back(periodicMetricCheck(this)); - clients.push_back(flipBitsClient(cx, this)); - return timeout(waitForAll(clients), testDuration, Void()); - } else - return Void(); - } - - Future check(Database const& cx) override { return true; } - - void getMetrics(vector& m) override {} - - static void checkBitFlipResult(Future res, WorkerInterface worker) { - if (res.isError()) { - auto err = res.getError(); - if (err.code() == error_code_client_invalid_operation) { - TraceEvent(SevError, "ChaosDisabled") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); - } else { - TraceEvent(SevError, "BitFlippingFailed") - .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) - .error(err); - } - } - } - - ACTOR void doBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { - state Future res; - wait(::delay(startDelay)); - SetFailureInjection::FlipBitsCommand flipBits; - flipBits.percentBitFlips = percentage; - SetFailureInjection req; - req.flipBits = flipBits; - res = worker.clientInterface.setFailureInjection.getReply(req); - wait(ready(res)); - checkBitFlipResult(res, worker); - } - - ACTOR static Future getAllStorageWorkers(Database cx, - BitFlippingWorkload* self, - std::vector* result) { - result->clear(); - state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); - for (auto& worker : res) { - result->emplace_back(worker); - } - return Void(); - } - - ACTOR template - Future flipBitsClient(Database cx, BitFlippingWorkload* self) { - state double lastTime = now(); - state double workloadEnd = now() + self->testDuration; - state std::vector machines; - loop { - wait(poisson(&lastTime, 1)); - wait(BitFlippingWorkload::getAllStorageWorkers(cx, self, &machines)); - auto machine = deterministicRandom()->randomChoice(machines); - - // If we have already chosen this worker, then just continue - if (find(self->chosenWorkers.begin(), self->chosenWorkers.end(), machine.address()) != - self->chosenWorkers.end()) - continue; - - // Keep track of chosen workers for verification purpose - self->chosenWorkers.emplace_back(machine.address()); - self->doBitFlips(machine, self->percentBitFlips); - } - } - - // Resend the chaos event to previosuly chosen workers, in case some workers got restarted and lost their chaos - // config - ACTOR static Future reSendChaos(BitFlippingWorkload* self) { - std::vector workers = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); - std::map workersMap; - for (auto worker : workers) { - workersMap[worker.interf.address()] = worker.interf; - } - for (auto& workerAddress : self->chosenWorkers) { - auto itr = workersMap.find(workerAddress); - if (itr != workersMap.end()) - self->doBitFlips(itr->second, self->percentBitFlips); - } - return Void(); - } - // For fetching chaosMetrics to ensure chaos events are happening - // This is borrowed code from Status.actor.cpp - struct WorkerEvents : std::map {}; - - ACTOR static Future>>> latestEventOnWorkers( - std::vector workers, - std::string eventName) { - try { - state vector>> eventTraces; - for (int c = 0; c < workers.size(); c++) { - EventLogRequest req = - eventName.size() > 0 ? EventLogRequest(Standalone(eventName)) : EventLogRequest(); - eventTraces.push_back(errorOr(timeoutError(workers[c].interf.eventLogRequest.getReply(req), 2.0))); - } - - wait(waitForAll(eventTraces)); - - std::set failed; - WorkerEvents results; - - for (int i = 0; i < eventTraces.size(); i++) { - const ErrorOr& v = eventTraces[i].get(); - if (v.isError()) { - failed.insert(workers[i].interf.address().toString()); - results[workers[i].interf.address()] = TraceEventFields(); - } else { - results[workers[i].interf.address()] = v.get(); - } - } - - std::pair> val; - val.first = results; - val.second = failed; - - return val; - } catch (Error& e) { - ASSERT(e.code() == - error_code_actor_cancelled); // All errors should be filtering through the errorOr actor above - throw; - } - } - - // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers - ACTOR static Future chaosGetStatus(BitFlippingWorkload* self) { - std::vector workers = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); - - Future>>> latestEventsFuture; - latestEventsFuture = latestEventOnWorkers(workers, "ChaosMetrics"); - state Optional>> workerEvents = wait(latestEventsFuture); - - state WorkerEvents cMetrics = workerEvents.present() ? workerEvents.get().first : WorkerEvents(); - - // Now verify that all chosen workers for chaos events have non-zero chaosMetrics - for (auto& workerAddress : self->chosenWorkers) { - auto chaosMetrics = cMetrics.find(workerAddress); - if (chaosMetrics != cMetrics.end()) { - int bitFlips = chaosMetrics->second.getInt("BitFlips"); - - // we expect bitFlips to be non-zero for chosenWorkers - if (bitFlips == 0) { - TraceEvent(SevError, "ChaosGetStatus") - .detail("OnEndpoint", workerAddress.toString()) - .detail("BitFlips", bitFlips); - } - } - } - - return Void(); - } - - // Periodically fetches chaosMetrics to ensure that chaas events are taking place - ACTOR static Future periodicMetricCheck(BitFlippingWorkload* self) { - state double start = now(); - state double elapsed = 0.0; - - loop { - // re-send the chaos event in case of a process restart - wait(reSendChaos(self)); - elapsed += self->periodicCheckInterval; - wait(delayUntil(start + elapsed)); - wait(chaosGetStatus(self)); - } - } -}; -WorkloadFactory BitFlippingWorkloadFactory("BitFlipping"); diff --git a/fdbserver/workloads/DiskThrottling.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp similarity index 50% rename from fdbserver/workloads/DiskThrottling.actor.cpp rename to fdbserver/workloads/DiskFailureInjection.actor.cpp index 264ef477a2..bb1cf9b124 100644 --- a/fdbserver/workloads/DiskThrottling.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -1,5 +1,5 @@ /* - * DiskThrottling.actor.cpp + * DiskFailureInjection.actor.cpp * * This source file is part of the FoundationDB open source project * @@ -25,34 +25,45 @@ #include "fdbserver/WorkerInterface.actor.h" #include "fdbserver/ServerDBInfo.h" #include "fdbserver/QuietDatabase.h" +#include "fdbserver/Status.h" #include "flow/actorcompiler.h" // This must be the last #include. -struct DiskThrottlingWorkload : TestWorkload { +struct DiskFailureInjectionWorkload : TestWorkload { bool enabled; double testDuration; double startDelay; + bool throttleDisk; + int workersToThrottle; double stallInterval; double stallPeriod; double throttlePeriod; - double periodicCheckInterval; + bool corruptFile; + int workersToCorrupt; + double percentBitFlips; + double periodicBroadcastInterval; std::vector chosenWorkers; std::vector> clients; - DiskThrottlingWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { + DiskFailureInjectionWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { enabled = !clientId; // only do this on the "first" client startDelay = getOption(options, LiteralStringRef("startDelay"), 0.0); testDuration = getOption(options, LiteralStringRef("testDuration"), 60.0); + throttleDisk = getOption(options, LiteralStringRef("throttleDisk"), false); + workersToThrottle = getOption(options, LiteralStringRef("workersToThrottle"), 3); stallInterval = getOption(options, LiteralStringRef("stallInterval"), 0.0); stallPeriod = getOption(options, LiteralStringRef("stallPeriod"), 60.0); throttlePeriod = getOption(options, LiteralStringRef("throttlePeriod"), 60.0); - periodicCheckInterval = getOption(options, LiteralStringRef("periodicCheckInterval"), 10.0); + corruptFile = getOption(options, LiteralStringRef("corruptFile"), false); + workersToCorrupt = getOption(options, LiteralStringRef("workersToCorrupt"), 1); + percentBitFlips = getOption(options, LiteralStringRef("percentBitFlips"), 10.0); + periodicBroadcastInterval = getOption(options, LiteralStringRef("periodicBroadcastInterval"), 5.0); } std::string description() const override { if (&g_simulator == g_network) - return "DiskThrottling"; + return "DiskFailureInjection"; else - return "NoSimDiskThrolling"; + return "NoSimDiskFailureInjection"; } Future setup(Database const& cx) override { return Void(); } @@ -62,8 +73,8 @@ struct DiskThrottlingWorkload : TestWorkload { // 2. Starting the actor that injects failures on chosen storage servers Future start(Database const& cx) override { if (enabled) { - clients.push_back(periodicMetricCheck(this)); - clients.push_back(throttleDiskClient(cx, this)); + clients.push_back(diskFailureInjectionClient(cx, this)); + clients.push_back(periodicEventBroadcast(this)); return timeout(waitForAll(clients), testDuration, Void()); } else return Void(); @@ -73,26 +84,26 @@ struct DiskThrottlingWorkload : TestWorkload { void getMetrics(vector& m) override {} - static void checkDiskThrottleResult(Future res, WorkerInterface worker) { + static void checkDiskFailureInjectionResult(Future res, WorkerInterface worker) { if (res.isError()) { auto err = res.getError(); if (err.code() == error_code_client_invalid_operation) { TraceEvent(SevError, "ChaosDisabled") .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()); } else { - TraceEvent(SevError, "DiskThrottlingFailed") + TraceEvent(SevError, "DiskFailureInjectionFailed") .detail("OnEndpoint", worker.waitFailure.getEndpoint().addresses.address.toString()) .error(err); } } } - // Sets the disk failure request - ACTOR void doThrottle(WorkerInterface worker, - double stallInterval, - double stallPeriod, - double throttlePeriod, - double startDelay) { + // Sets the disk delay request + ACTOR void injectDiskDelays(WorkerInterface worker, + double stallInterval, + double stallPeriod, + double throttlePeriod, + double startDelay) { state Future res; wait(::delay(startDelay)); SetFailureInjection::DiskFailureCommand diskFailure; @@ -103,39 +114,34 @@ struct DiskThrottlingWorkload : TestWorkload { req.diskFailure = diskFailure; res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); - checkDiskThrottleResult(res, worker); + checkDiskFailureInjectionResult(res, worker); } - // Currently unused, because we only inject disk failures on storage servers - ACTOR static Future getAllWorkers(DiskThrottlingWorkload* self, std::vector* result) { - result->clear(); - std::vector res = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); - for (auto& worker : res) { - result->emplace_back(worker.interf); - } - return Void(); + // Sets the disk corruption request + ACTOR void injectBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { + state Future res; + wait(::delay(startDelay)); + SetFailureInjection::FlipBitsCommand flipBits; + flipBits.percentBitFlips = percentage; + SetFailureInjection req; + req.flipBits = flipBits; + res = worker.clientInterface.setFailureInjection.getReply(req); + wait(ready(res)); + checkDiskFailureInjectionResult(res, worker); } - ACTOR static Future getAllStorageWorkers(Database cx, - DiskThrottlingWorkload* self, - std::vector* result) { - result->clear(); - state std::vector res = wait(getStorageWorkers(cx, self->dbInfo, false)); - for (auto& worker : res) { - result->emplace_back(worker); - } - return Void(); - } - - // Choose random storage servers to inject disk failures + // Choose random storage servers to inject disk failures. + // We currently only inject disk failure on storage servers. Can be expanded to include + // other worker types in future ACTOR template - Future throttleDiskClient(Database cx, DiskThrottlingWorkload* self) { + Future diskFailureInjectionClient(Database cx, DiskFailureInjectionWorkload* self) { state double lastTime = now(); state std::vector machines; + state int throttledWorkers = 0; + state int corruptedWorkers = 0; loop { wait(poisson(&lastTime, 1)); - wait(DiskThrottlingWorkload::getAllStorageWorkers(cx, self, &machines)); + wait(store(machines, getStorageWorkers(cx, self->dbInfo, false))); auto machine = deterministicRandom()->randomChoice(machines); // If we have already chosen this worker, then just continue @@ -145,13 +151,22 @@ struct DiskThrottlingWorkload : TestWorkload { // Keep track of chosen workers for verification purpose self->chosenWorkers.emplace_back(machine.address()); - self->doThrottle(machine, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) + self->injectDiskDelays( + machine, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { + if (&g_simulator == g_network) + g_simulator.corruptWorkerMap[machine.address()] = true; + self->injectBitFlips(machine, self->percentBitFlips); + } } } // Resend the chaos event to previosuly chosen workers, in case some workers got restarted and lost their chaos // config - ACTOR static Future reSendChaos(DiskThrottlingWorkload* self) { + ACTOR static Future reSendChaos(DiskFailureInjectionWorkload* self) { + state int throttledWorkers = 0; + state int corruptedWorkers = 0; std::vector workers = wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); std::map workersMap; @@ -160,57 +175,22 @@ struct DiskThrottlingWorkload : TestWorkload { } for (auto& workerAddress : self->chosenWorkers) { auto itr = workersMap.find(workerAddress); - if (itr != workersMap.end()) - self->doThrottle( - itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + if (itr != workersMap.end()) { + if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) + self->injectDiskDelays( + itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { + if (&g_simulator == g_network) + g_simulator.corruptWorkerMap[workerAddress] = true; + self->injectBitFlips(itr->second, self->percentBitFlips); + } + } } return Void(); } - // For fetching chaosMetrics to ensure chaos events are happening - // This is borrowed code from Status.actor.cpp - struct WorkerEvents : std::map {}; - - ACTOR static Future>>> latestEventOnWorkers( - std::vector workers, - std::string eventName) { - try { - state vector>> eventTraces; - for (int c = 0; c < workers.size(); c++) { - EventLogRequest req = - eventName.size() > 0 ? EventLogRequest(Standalone(eventName)) : EventLogRequest(); - eventTraces.push_back(errorOr(timeoutError(workers[c].interf.eventLogRequest.getReply(req), 2.0))); - } - - wait(waitForAll(eventTraces)); - - std::set failed; - WorkerEvents results; - - for (int i = 0; i < eventTraces.size(); i++) { - const ErrorOr& v = eventTraces[i].get(); - if (v.isError()) { - failed.insert(workers[i].interf.address().toString()); - results[workers[i].interf.address()] = TraceEventFields(); - } else { - results[workers[i].interf.address()] = v.get(); - } - } - - std::pair> val; - val.first = results; - val.second = failed; - - return val; - } catch (Error& e) { - ASSERT(e.code() == - error_code_actor_cancelled); // All errors should be filtering through the errorOr actor above - throw; - } - } - // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers - ACTOR static Future chaosGetStatus(DiskThrottlingWorkload* self) { + ACTOR static Future chaosGetStatus(DiskFailureInjectionWorkload* self) { std::vector workers = wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); @@ -220,38 +200,54 @@ struct DiskThrottlingWorkload : TestWorkload { state WorkerEvents cMetrics = workerEvents.present() ? workerEvents.get().first : WorkerEvents(); - // Now verify that all chosen workers for chaos events have non-zero chaosMetrics - std::vector>>>> futures; + // Check if any of the chosen workers for chaos events have non-zero chaosMetrics + try { + int foundChaosMetrics = 0; + for (auto& workerAddress : self->chosenWorkers) { + auto chaosMetrics = cMetrics.find(workerAddress); + if (chaosMetrics != cMetrics.end()) { + // we expect diskDelays to be non-zero for chosenWorkers for throttleDisk event + if (self->throttleDisk) { + int diskDelays = chaosMetrics->second.getInt("DiskDelays"); + if (diskDelays > 0) { + foundChaosMetrics++; + } + } - for (auto& workerAddress : self->chosenWorkers) { - auto chaosMetrics = cMetrics.find(workerAddress); - if (chaosMetrics != cMetrics.end()) { - int diskDelays = chaosMetrics->second.getInt("DiskDelays"); - - // we expect diskDelays to be non-zero for chosenWorkers - if (diskDelays == 0) { - TraceEvent(SevError, "ChaosGetStatus") - .detail("OnEndpoint", workerAddress.toString()) - .detail("DiskDelays", diskDelays); + // we expect bitFlips to be non-zero for chosenWorkers for corruptFile event + if (self->corruptFile) { + int bitFlips = chaosMetrics->second.getInt("BitFlips"); + if (bitFlips > 0) { + foundChaosMetrics++; + } + } } } + if (foundChaosMetrics == 0) + TraceEvent("DiskFailureInjectionFailed").detail("ChaosMetricCount", foundChaosMetrics); + else + TraceEvent("ChaosGetStatus").detail("ChaosMetricCount", foundChaosMetrics); + } catch (Error& e) { + // it's possible to get an empty event, it's okay to ignore + if (e.code() != error_code_attribute_not_found) { + throw e; + } } return Void(); } - // Periodically fetches chaosMetrics to ensure that chaas events are taking place - ACTOR static Future periodicMetricCheck(DiskThrottlingWorkload* self) { + // Periodically re-send the chaos event in case of a process restart + ACTOR static Future periodicEventBroadcast(DiskFailureInjectionWorkload* self) { state double start = now(); state double elapsed = 0.0; loop { - // re-send the chaos event in case of a process restart wait(reSendChaos(self)); - elapsed += self->periodicCheckInterval; + elapsed += self->periodicBroadcastInterval; wait(delayUntil(start + elapsed)); wait(chaosGetStatus(self)); } } }; -WorkloadFactory DiskThrottlingWorkloadFactory("DiskThrottling"); +WorkloadFactory DiskFailureInjectionWorkloadFactory("DiskFailureInjection"); diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 5233f65757..b506dd46e1 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -66,9 +66,9 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) { init( HUGE_ARENA_LOGGING_BYTES, 100e6 ); init( HUGE_ARENA_LOGGING_INTERVAL, 5.0 ); - // Chaos testing - init( ENABLE_CHAOS_FEATURES, false ); - init( CHAOS_LOGGING_INTERVAL, 5.0 ); + // Chaos testing - enabled for simulation by default + init( ENABLE_CHAOS_FEATURES, isSimulated ); + init( CHAOS_LOGGING_INTERVAL, 5.0 ); init( WRITE_TRACING_ENABLED, true ); if( randomize && BUGGIFY ) WRITE_TRACING_ENABLED = false; diff --git a/flow/network.h b/flow/network.h index c4c8f46ffc..59a1ec9a0a 100644 --- a/flow/network.h +++ b/flow/network.h @@ -666,7 +666,7 @@ struct ChaosMetrics { void clear() { memset(this, 0, sizeof(ChaosMetrics)); - startTime = timer_monotonic(); + startTime = g_network ? g_network->now() : 0; } unsigned int diskDelays; @@ -703,11 +703,11 @@ struct DiskFailureInjector { void setDiskFailure(double interval, double stallFor, double throttleFor) { stallInterval = interval; stallPeriod = stallFor; - stallUntil = std::max(stallUntil, timer_monotonic() + stallFor); + stallUntil = std::max(stallUntil, g_network->now() + stallFor); // random stall duration in ms (chosen once) stallDuration = 0.001 * deterministicRandom()->randomInt(1, 5); throttlePeriod = throttleFor; - throttleUntil = std::max(throttleUntil, timer_monotonic() + throttleFor); + throttleUntil = std::max(throttleUntil, g_network->now() + throttleFor); TraceEvent("SetDiskFailure") .detail("StallInterval", interval) .detail("StallPeriod", stallFor) @@ -719,8 +719,8 @@ struct DiskFailureInjector { double getStallDelay() { // If we are in a stall period and a stallInterval was specified, determine the // delay to be inserted - if (((stallUntil - timer_monotonic()) > 0.0) && stallInterval) { - auto timeElapsed = fmod(timer_monotonic(), stallInterval); + if (((stallUntil - g_network->now()) > 0.0) && stallInterval) { + auto timeElapsed = fmod(g_network->now(), stallInterval); return std::max(0.0, stallDuration - timeElapsed); } return 0.0; @@ -728,7 +728,7 @@ struct DiskFailureInjector { double getThrottleDelay() { // If we are in the throttle period, insert a random delay (in ms) - if ((throttleUntil - timer_monotonic()) > 0.0) + if ((throttleUntil - g_network->now()) > 0.0) return (0.001 * deterministicRandom()->randomInt(1, 3)); return 0.0; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c906f8f93e..f2c1681fee 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -119,14 +119,12 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES fast/BackupCorrectnessClean.toml) add_fdb_test(TEST_FILES fast/BackupToDBCorrectness.toml) add_fdb_test(TEST_FILES fast/BackupToDBCorrectnessClean.toml) - add_fdb_test(TEST_FILES fast/BitFlippedCycle.toml IGNORE) add_fdb_test(TEST_FILES fast/CacheTest.toml) add_fdb_test(TEST_FILES fast/CloggedSideband.toml) add_fdb_test(TEST_FILES fast/ConfigureLocked.toml) add_fdb_test(TEST_FILES fast/ConstrainedRandomSelector.toml) add_fdb_test(TEST_FILES fast/CycleAndLock.toml) add_fdb_test(TEST_FILES fast/CycleTest.toml) - add_fdb_test(TEST_FILES fast/DiskThrottledCycle.toml IGNORE) add_fdb_test(TEST_FILES fast/FuzzApiCorrectness.toml) add_fdb_test(TEST_FILES fast/FuzzApiCorrectnessClean.toml) add_fdb_test(TEST_FILES fast/IncrementalBackup.toml) @@ -231,6 +229,7 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES slow/DDBalanceAndRemove.toml) add_fdb_test(TEST_FILES slow/DDBalanceAndRemoveStatus.toml) add_fdb_test(TEST_FILES slow/DifferentClustersSameRV.toml) + add_fdb_test(TEST_FILES slow/DiskFailureCycle.toml) add_fdb_test(TEST_FILES slow/FastTriggeredWatches.toml) add_fdb_test(TEST_FILES slow/LowLatencyWithFailures.toml) add_fdb_test(TEST_FILES slow/MoveKeysClean.toml) diff --git a/tests/fast/BitFlippedCycle.toml b/tests/fast/BitFlippedCycle.toml deleted file mode 100644 index 3cab1f74fe..0000000000 --- a/tests/fast/BitFlippedCycle.toml +++ /dev/null @@ -1,13 +0,0 @@ -[[test]] -testTitle = 'BitFlippedCycle' - - [[test.workload]] - testName = 'Cycle' - transactionsPerSecond = 2500.0 - testDuration = 60.0 - expectedRate = 0 - - [[test.workload]] - testName = 'BitFlipping' - testDuration = 60.0 - percentBitFlips = 20.0 diff --git a/tests/fast/DiskThrottledCycle.toml b/tests/fast/DiskThrottledCycle.toml deleted file mode 100644 index 83df7fdb1d..0000000000 --- a/tests/fast/DiskThrottledCycle.toml +++ /dev/null @@ -1,16 +0,0 @@ -[[test]] -testTitle = 'DiskThrottledCycle' - - [[test.workload]] - testName = 'Cycle' - transactionsPerSecond = 2500.0 - testDuration = 30.0 - expectedRate = 0 - - [[test.workload]] - testName = 'DiskThrottling' - testDuration = 30.0 - stallInterval = 10.0 - stallPeriod = 30.0 - throttlePeriod = 30.0 - diff --git a/tests/slow/DiskFailureCycle.toml b/tests/slow/DiskFailureCycle.toml new file mode 100644 index 0000000000..0f4d1365f7 --- /dev/null +++ b/tests/slow/DiskFailureCycle.toml @@ -0,0 +1,30 @@ +[configuration] +buggify = false +minimumReplication = 3 +minimumRegions = 3 +logAntiQuorum = 0 + +[[test]] +testTitle = 'DiskFailureCycle' + + [[test.workload]] + testName = 'Cycle' + transactionsPerSecond = 2500.0 + testDuration = 60.0 + expectedRate = 0 + + [[test.workload]] + testName = 'DiskFailureInjection' + testDuration = 20.0 + startDelay = 20.0 + throttleDisk = true + stallInterval = 10.0 + stallPeriod = 20.0 + throttlePeriod = 20.0 + + [[test.workload]] + testName = 'DiskFailureInjection' + testDuration = 20.0 + startDelay = 40.0 + corruptFile = true + percentBitFlips = 10 From a2d8ab71523755aa260574276a7a052a424c68e2 Mon Sep 17 00:00:00 2001 From: negoyal Date: Fri, 30 Jul 2021 13:21:45 -0700 Subject: [PATCH 012/142] Ignore the errors from getStorageServers. --- fdbserver/workloads/DiskFailureInjection.actor.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index bb1cf9b124..27e3cd74b4 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -141,7 +141,14 @@ struct DiskFailureInjectionWorkload : TestWorkload { state int corruptedWorkers = 0; loop { wait(poisson(&lastTime, 1)); - wait(store(machines, getStorageWorkers(cx, self->dbInfo, false))); + try { + wait(store(machines, getStorageWorkers(cx, self->dbInfo, false))); + } catch (Error& e) { + // If we failed to get a list of storage servers, we can't inject failure events + // But don't throw the error in that case + TraceEvent("DiskFailureInjectionFailed"); + return Void(); + } auto machine = deterministicRandom()->randomChoice(machines); // If we have already chosen this worker, then just continue From a8baeb75d00e113e28f2a0c0d9c2b790cf712496 Mon Sep 17 00:00:00 2001 From: negoyal Date: Fri, 3 Sep 2021 15:03:12 -0700 Subject: [PATCH 013/142] Misc fixes. --- fdbrpc/AsyncFileChaos.actor.h | 7 +++-- .../workloads/DiskFailureInjection.actor.cpp | 29 +++++++++++++------ fdbserver/workloads/TargetedKill.actor.cpp | 12 +++++++- flow/network.h | 1 + tests/slow/DiskFailureCycle.toml | 4 ++- 5 files changed, 40 insertions(+), 13 deletions(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 11b60f6692..2aa7ceedcf 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -34,8 +34,11 @@ private: public: explicit AsyncFileChaos(Reference file) : file(file) { - // We onlyl allow chaod events on storage files - enabled = StringRef(file->getFilename()).startsWith(LiteralStringRef("storage-")); + // We only allow chaos events on storage files + enabled = (file->getFilename().find("storage-") != std::string::npos); + //enabled = StringRef(file->getFilename()).startsWith(LiteralStringRef("storage-")); + + TraceEvent("AsyncFileChaos").detail("Enabled", enabled).detail("FileName", file->getFilename()); } void addref() override { ReferenceCounted::addref(); } diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 27e3cd74b4..15bcad5c82 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -43,11 +43,16 @@ struct DiskFailureInjectionWorkload : TestWorkload { double periodicBroadcastInterval; std::vector chosenWorkers; std::vector> clients; + // Verification Mode: We run the workload indefinitely in this mode. + // The idea is to keep going until we get a non-zero chaosMetric to ensure + // that we haven't lost the chaos event. testDuration is ignored in this mode + bool verificationMode; DiskFailureInjectionWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { enabled = !clientId; // only do this on the "first" client startDelay = getOption(options, LiteralStringRef("startDelay"), 0.0); testDuration = getOption(options, LiteralStringRef("testDuration"), 60.0); + verificationMode = getOption(options, LiteralStringRef("verificationMode"), false); throttleDisk = getOption(options, LiteralStringRef("throttleDisk"), false); workersToThrottle = getOption(options, LiteralStringRef("workersToThrottle"), 3); stallInterval = getOption(options, LiteralStringRef("stallInterval"), 0.0); @@ -69,12 +74,18 @@ struct DiskFailureInjectionWorkload : TestWorkload { Future setup(Database const& cx) override { return Void(); } // Starts the workload by - - // 1. Starting the actor to periodically check chaosMetrics, and + // 1. Starting the actor to periodically check chaosMetrics and re-broadcast chaos events, and // 2. Starting the actor that injects failures on chosen storage servers Future start(Database const& cx) override { if (enabled) { clients.push_back(diskFailureInjectionClient(cx, this)); clients.push_back(periodicEventBroadcast(this)); + // In verification mode, we want to wait until the first actor returns which indicates that + // a non-zero chaosMetric was found + if (verificationMode) { + return waitForAny(clients); + } + // Else we honor testDuration return timeout(waitForAll(clients), testDuration, Void()); } else return Void(); @@ -197,7 +208,8 @@ struct DiskFailureInjectionWorkload : TestWorkload { } // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers - ACTOR static Future chaosGetStatus(DiskFailureInjectionWorkload* self) { + ACTOR static Future chaosGetStatus(DiskFailureInjectionWorkload* self) { + state int foundChaosMetrics = 0; std::vector workers = wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); @@ -209,7 +221,6 @@ struct DiskFailureInjectionWorkload : TestWorkload { // Check if any of the chosen workers for chaos events have non-zero chaosMetrics try { - int foundChaosMetrics = 0; for (auto& workerAddress : self->chosenWorkers) { auto chaosMetrics = cMetrics.find(workerAddress); if (chaosMetrics != cMetrics.end()) { @@ -230,10 +241,6 @@ struct DiskFailureInjectionWorkload : TestWorkload { } } } - if (foundChaosMetrics == 0) - TraceEvent("DiskFailureInjectionFailed").detail("ChaosMetricCount", foundChaosMetrics); - else - TraceEvent("ChaosGetStatus").detail("ChaosMetricCount", foundChaosMetrics); } catch (Error& e) { // it's possible to get an empty event, it's okay to ignore if (e.code() != error_code_attribute_not_found) { @@ -241,7 +248,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { } } - return Void(); + return foundChaosMetrics; } // Periodically re-send the chaos event in case of a process restart @@ -253,7 +260,11 @@ struct DiskFailureInjectionWorkload : TestWorkload { wait(reSendChaos(self)); elapsed += self->periodicBroadcastInterval; wait(delayUntil(start + elapsed)); - wait(chaosGetStatus(self)); + int foundChaosMetrics = wait(chaosGetStatus(self)); + if (foundChaosMetrics > 0) { + TraceEvent("FoundChaos").detail("ChaosMetricCount", foundChaosMetrics); + return Void(); + } } } }; diff --git a/fdbserver/workloads/TargetedKill.actor.cpp b/fdbserver/workloads/TargetedKill.actor.cpp index 48d5da4629..1c40cd47b6 100644 --- a/fdbserver/workloads/TargetedKill.actor.cpp +++ b/fdbserver/workloads/TargetedKill.actor.cpp @@ -33,10 +33,14 @@ struct TargetedKillWorkload : TestWorkload { std::string machineToKill; bool enabled, killAllMachineProcesses; double killAt; + bool reboot; + double suspendDuration; TargetedKillWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { enabled = !clientId; // only do this on the "first" client killAt = getOption(options, LiteralStringRef("killAt"), 5.0); + reboot = getOption(options, LiteralStringRef("reboot"), false); + suspendDuration = getOption(options, LiteralStringRef("suspendDuration"), 1.0); machineToKill = getOption(options, LiteralStringRef("machineToKill"), LiteralStringRef("master")).toString(); killAllMachineProcesses = getOption(options, LiteralStringRef("killWholeMachine"), false); } @@ -61,13 +65,19 @@ struct TargetedKillWorkload : TestWorkload { state vector workers = wait(getWorkers(self->dbInfo)); int killed = 0; + state RebootRequest rbReq; + if (self->reboot) { + rbReq.waitForDuration = self->suspendDuration; + } else { + rbReq.waitForDuration = std::numeric_limits::max(); + } for (int i = 0; i < workers.size(); i++) { if (workers[i].interf.master.getEndpoint().getPrimaryAddress() == address || (self->killAllMachineProcesses && workers[i].interf.master.getEndpoint().getPrimaryAddress().ip == address.ip && workers[i].processClass != ProcessClass::TesterClass)) { TraceEvent("WorkerKill").detail("TargetedMachine", address).detail("Worker", workers[i].interf.id()); - workers[i].interf.clientInterface.reboot.send(RebootRequest()); + workers[i].interf.clientInterface.reboot.send(rbReq); } } diff --git a/flow/network.h b/flow/network.h index b70070a9fa..5a692a58d6 100644 --- a/flow/network.h +++ b/flow/network.h @@ -715,6 +715,7 @@ struct DiskFailureInjector { throttlePeriod = throttleFor; throttleUntil = std::max(throttleUntil, g_network->now() + throttleFor); TraceEvent("SetDiskFailure") + .detail("Now", g_network->now()) .detail("StallInterval", interval) .detail("StallPeriod", stallFor) .detail("StallUntil", stallUntil) diff --git a/tests/slow/DiskFailureCycle.toml b/tests/slow/DiskFailureCycle.toml index 0f4d1365f7..da1eee421c 100644 --- a/tests/slow/DiskFailureCycle.toml +++ b/tests/slow/DiskFailureCycle.toml @@ -10,12 +10,13 @@ testTitle = 'DiskFailureCycle' [[test.workload]] testName = 'Cycle' transactionsPerSecond = 2500.0 - testDuration = 60.0 + testDuration = 600.0 expectedRate = 0 [[test.workload]] testName = 'DiskFailureInjection' testDuration = 20.0 + verificationMode = true startDelay = 20.0 throttleDisk = true stallInterval = 10.0 @@ -25,6 +26,7 @@ testTitle = 'DiskFailureCycle' [[test.workload]] testName = 'DiskFailureInjection' testDuration = 20.0 + verificationMode = true startDelay = 40.0 corruptFile = true percentBitFlips = 10 From 337d0df13ce536739205f5a8cb4625e4683a311e Mon Sep 17 00:00:00 2001 From: negoyal Date: Tue, 7 Sep 2021 10:07:01 -0700 Subject: [PATCH 014/142] Add verification mode to chaos workload. --- .../workloads/DiskFailureInjection.actor.cpp | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 15bcad5c82..47fdc9344b 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -78,20 +78,23 @@ struct DiskFailureInjectionWorkload : TestWorkload { // 2. Starting the actor that injects failures on chosen storage servers Future start(Database const& cx) override { if (enabled) { - clients.push_back(diskFailureInjectionClient(cx, this)); - clients.push_back(periodicEventBroadcast(this)); - // In verification mode, we want to wait until the first actor returns which indicates that - // a non-zero chaosMetric was found + clients.push_back(timeout(diskFailureInjectionClient(cx, this), testDuration, Void())); + // In verification mode, we want to wait until periodicEventBroadcast actor returns which indicates that + // a non-zero chaosMetric was found. if (verificationMode) { - return waitForAny(clients); - } - // Else we honor testDuration - return timeout(waitForAll(clients), testDuration, Void()); + clients.push_back(periodicEventBroadcast(this)); + } else + //Else we honor the testDuration + clients.push_back(timeout(periodicEventBroadcast(this), testDuration, Void())); + return waitForAll(clients); } else return Void(); } - Future check(Database const& cx) override { return true; } + Future check(Database const& cx) override { + clients.clear(); + return true; + } void getMetrics(vector& m) override {} @@ -242,6 +245,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { } } } catch (Error& e) { + TraceEvent(SevDebug, "ChaosGetStatus").error(e); // it's possible to get an empty event, it's okay to ignore if (e.code() != error_code_attribute_not_found) { throw e; From 7729a282ceb73a521408d0002200cc1a29fd8456 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 8 Sep 2021 14:31:09 -0700 Subject: [PATCH 015/142] Misc fixes and updated test toml file. --- fdbrpc/AsyncFileChaos.actor.h | 1 + .../workloads/DiskFailureInjection.actor.cpp | 26 +++++++++---------- flow/network.h | 3 +++ tests/slow/DiskFailureCycle.toml | 14 +++++----- 4 files changed, 23 insertions(+), 21 deletions(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 2aa7ceedcf..9b9211cbff 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -87,6 +87,7 @@ public: auto res = g_network->global(INetwork::enBitFlipper); if (enabled && res) { auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); + //TraceEvent("AsyncFileChaosCorrupt").detail("Percentage", bitFlipPercentage); if (bitFlipPercentage > 0.0) { if (deterministicRandom()->random01() < bitFlipPercentage) { pdata = (char*)arena.allocate4kAlignedBuffer(length); diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 47fdc9344b..27b2033a60 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -116,10 +116,8 @@ struct DiskFailureInjectionWorkload : TestWorkload { ACTOR void injectDiskDelays(WorkerInterface worker, double stallInterval, double stallPeriod, - double throttlePeriod, - double startDelay) { + double throttlePeriod) { state Future res; - wait(::delay(startDelay)); SetFailureInjection::DiskFailureCommand diskFailure; diskFailure.stallInterval = stallInterval; diskFailure.stallPeriod = stallPeriod; @@ -132,9 +130,8 @@ struct DiskFailureInjectionWorkload : TestWorkload { } // Sets the disk corruption request - ACTOR void injectBitFlips(WorkerInterface worker, double percentage, double startDelay = 0.0) { + ACTOR void injectBitFlips(WorkerInterface worker, double percentage) { state Future res; - wait(::delay(startDelay)); SetFailureInjection::FlipBitsCommand flipBits; flipBits.percentBitFlips = percentage; SetFailureInjection req; @@ -149,6 +146,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { // other worker types in future ACTOR template Future diskFailureInjectionClient(Database cx, DiskFailureInjectionWorkload* self) { + wait(::delay(self->startDelay)); state double lastTime = now(); state std::vector machines; state int throttledWorkers = 0; @@ -174,7 +172,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { self->chosenWorkers.emplace_back(machine.address()); if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) self->injectDiskDelays( - machine, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + machine, self->stallInterval, self->stallPeriod, self->throttlePeriod); if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { if (&g_simulator == g_network) g_simulator.corruptWorkerMap[machine.address()] = true; @@ -188,9 +186,8 @@ struct DiskFailureInjectionWorkload : TestWorkload { ACTOR static Future reSendChaos(DiskFailureInjectionWorkload* self) { state int throttledWorkers = 0; state int corruptedWorkers = 0; - std::vector workers = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); - std::map workersMap; + state std::map workersMap; + state std::vector workers = wait(getWorkers(self->dbInfo)); for (auto worker : workers) { workersMap[worker.interf.address()] = worker.interf; } @@ -199,7 +196,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { if (itr != workersMap.end()) { if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) self->injectDiskDelays( - itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod, self->startDelay); + itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod); if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { if (&g_simulator == g_network) g_simulator.corruptWorkerMap[workerAddress] = true; @@ -213,8 +210,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { // Fetches chaosMetrics and verifies that chaos events are happening for enabled workers ACTOR static Future chaosGetStatus(DiskFailureInjectionWorkload* self) { state int foundChaosMetrics = 0; - std::vector workers = - wait(self->dbInfo->get().clusterInterface.getWorkers.getReply(GetWorkersRequest{})); + state std::vector workers = wait(getWorkers(self->dbInfo)); Future>>> latestEventsFuture; latestEventsFuture = latestEventOnWorkers(workers, "ChaosMetrics"); @@ -245,9 +241,9 @@ struct DiskFailureInjectionWorkload : TestWorkload { } } } catch (Error& e) { - TraceEvent(SevDebug, "ChaosGetStatus").error(e); // it's possible to get an empty event, it's okay to ignore if (e.code() != error_code_attribute_not_found) { + TraceEvent(SevError, "ChaosGetStatus").error(e); throw e; } } @@ -257,16 +253,18 @@ struct DiskFailureInjectionWorkload : TestWorkload { // Periodically re-send the chaos event in case of a process restart ACTOR static Future periodicEventBroadcast(DiskFailureInjectionWorkload* self) { + wait(::delay(self->startDelay)); state double start = now(); state double elapsed = 0.0; loop { + wait(delayUntil(start + elapsed)); wait(reSendChaos(self)); elapsed += self->periodicBroadcastInterval; wait(delayUntil(start + elapsed)); int foundChaosMetrics = wait(chaosGetStatus(self)); if (foundChaosMetrics > 0) { - TraceEvent("FoundChaos").detail("ChaosMetricCount", foundChaosMetrics); + TraceEvent("FoundChaos").detail("ChaosMetricCount", foundChaosMetrics).detail("ClientID", self->clientId); return Void(); } } diff --git a/flow/network.h b/flow/network.h index 5a692a58d6..7c89ed689c 100644 --- a/flow/network.h +++ b/flow/network.h @@ -735,6 +735,9 @@ struct DiskFailureInjector { double getThrottleDelay() { // If we are in the throttle period, insert a random delay (in ms) + TraceEvent("GetThrottleDelay") + .detail("Now", g_network->now()) + .detail("ThrottleUntil", throttleUntil); if ((throttleUntil - g_network->now()) > 0.0) return (0.001 * deterministicRandom()->randomInt(1, 3)); diff --git a/tests/slow/DiskFailureCycle.toml b/tests/slow/DiskFailureCycle.toml index da1eee421c..29d3f35f36 100644 --- a/tests/slow/DiskFailureCycle.toml +++ b/tests/slow/DiskFailureCycle.toml @@ -10,23 +10,23 @@ testTitle = 'DiskFailureCycle' [[test.workload]] testName = 'Cycle' transactionsPerSecond = 2500.0 - testDuration = 600.0 + testDuration = 300.0 expectedRate = 0 [[test.workload]] testName = 'DiskFailureInjection' - testDuration = 20.0 + testDuration = 120.0 verificationMode = true - startDelay = 20.0 + startDelay = 30.0 throttleDisk = true stallInterval = 10.0 - stallPeriod = 20.0 - throttlePeriod = 20.0 + stallPeriod = 60.0 + throttlePeriod = 60.0 [[test.workload]] testName = 'DiskFailureInjection' - testDuration = 20.0 + testDuration = 240.0 verificationMode = true - startDelay = 40.0 + startDelay = 120.0 corruptFile = true percentBitFlips = 10 From c8e6bb13c4f00f9938cd4cc99d17438d67efffb5 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 8 Sep 2021 15:18:08 -0700 Subject: [PATCH 016/142] Clang format. --- fdbserver/workloads/DiskFailureInjection.actor.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 27b2033a60..8a08dc1c2a 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -84,7 +84,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { if (verificationMode) { clients.push_back(periodicEventBroadcast(this)); } else - //Else we honor the testDuration + // Else we honor the testDuration clients.push_back(timeout(periodicEventBroadcast(this), testDuration, Void())); return waitForAll(clients); } else @@ -171,8 +171,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { // Keep track of chosen workers for verification purpose self->chosenWorkers.emplace_back(machine.address()); if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) - self->injectDiskDelays( - machine, self->stallInterval, self->stallPeriod, self->throttlePeriod); + self->injectDiskDelays(machine, self->stallInterval, self->stallPeriod, self->throttlePeriod); if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { if (&g_simulator == g_network) g_simulator.corruptWorkerMap[machine.address()] = true; @@ -195,8 +194,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { auto itr = workersMap.find(workerAddress); if (itr != workersMap.end()) { if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle)) - self->injectDiskDelays( - itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod); + self->injectDiskDelays(itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod); if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) { if (&g_simulator == g_network) g_simulator.corruptWorkerMap[workerAddress] = true; @@ -264,7 +262,9 @@ struct DiskFailureInjectionWorkload : TestWorkload { wait(delayUntil(start + elapsed)); int foundChaosMetrics = wait(chaosGetStatus(self)); if (foundChaosMetrics > 0) { - TraceEvent("FoundChaos").detail("ChaosMetricCount", foundChaosMetrics).detail("ClientID", self->clientId); + TraceEvent("FoundChaos") + .detail("ChaosMetricCount", foundChaosMetrics) + .detail("ClientID", self->clientId); return Void(); } } From a48148fdb2f23cb5c07fb110ab994abae6c37b62 Mon Sep 17 00:00:00 2001 From: negoyal Date: Wed, 8 Sep 2021 22:53:52 -0700 Subject: [PATCH 017/142] Tweak the chaos toml file. --- tests/slow/DiskFailureCycle.toml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/slow/DiskFailureCycle.toml b/tests/slow/DiskFailureCycle.toml index 29d3f35f36..ca4d5740ca 100644 --- a/tests/slow/DiskFailureCycle.toml +++ b/tests/slow/DiskFailureCycle.toml @@ -22,11 +22,5 @@ testTitle = 'DiskFailureCycle' stallInterval = 10.0 stallPeriod = 60.0 throttlePeriod = 60.0 - - [[test.workload]] - testName = 'DiskFailureInjection' - testDuration = 240.0 - verificationMode = true - startDelay = 120.0 corruptFile = true percentBitFlips = 10 From a63c19c347889c5be26e32c61330967b97926f86 Mon Sep 17 00:00:00 2001 From: negoyal Date: Thu, 9 Sep 2021 11:29:54 -0700 Subject: [PATCH 018/142] Trying clang-format again. --- fdbserver/workloads/DiskFailureInjection.actor.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 8a08dc1c2a..db16901268 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -124,6 +124,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { diskFailure.throttlePeriod = throttlePeriod; SetFailureInjection req; req.diskFailure = diskFailure; + TraceEvent("DiskFailureInjectDiskDelays"); res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); checkDiskFailureInjectionResult(res, worker); @@ -136,6 +137,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { flipBits.percentBitFlips = percentage; SetFailureInjection req; req.flipBits = flipBits; + TraceEvent("DiskFailureInjectBitFlips"); res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); checkDiskFailureInjectionResult(res, worker); @@ -159,14 +161,17 @@ struct DiskFailureInjectionWorkload : TestWorkload { // If we failed to get a list of storage servers, we can't inject failure events // But don't throw the error in that case TraceEvent("DiskFailureInjectionFailed"); - return Void(); + continue; + // return Void(); } auto machine = deterministicRandom()->randomChoice(machines); // If we have already chosen this worker, then just continue if (find(self->chosenWorkers.begin(), self->chosenWorkers.end(), machine.address()) != - self->chosenWorkers.end()) + self->chosenWorkers.end()) { + TraceEvent("DiskFailureInjectionSkipped"); continue; + } // Keep track of chosen workers for verification purpose self->chosenWorkers.emplace_back(machine.address()); From a7721d9786b954415e21f999621268b6a7c801fb Mon Sep 17 00:00:00 2001 From: negoyal Date: Fri, 10 Sep 2021 15:41:22 -0700 Subject: [PATCH 019/142] Remove debug trace events and clang-format. --- fdbrpc/AsyncFileChaos.actor.h | 6 +----- fdbserver/workloads/DiskFailureInjection.actor.cpp | 5 ----- flow/network.h | 5 +---- 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 9b9211cbff..3678af63b6 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -36,9 +36,6 @@ public: explicit AsyncFileChaos(Reference file) : file(file) { // We only allow chaos events on storage files enabled = (file->getFilename().find("storage-") != std::string::npos); - //enabled = StringRef(file->getFilename()).startsWith(LiteralStringRef("storage-")); - - TraceEvent("AsyncFileChaos").detail("Enabled", enabled).detail("FileName", file->getFilename()); } void addref() override { ReferenceCounted::addref(); } @@ -48,7 +45,7 @@ public: double delayFor = 0.0; if (!enabled) return delayFor; - + auto res = g_network->global(INetwork::enDiskFailureInjector); if (res) { DiskFailureInjector* delayInjector = static_cast(res); @@ -87,7 +84,6 @@ public: auto res = g_network->global(INetwork::enBitFlipper); if (enabled && res) { auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); - //TraceEvent("AsyncFileChaosCorrupt").detail("Percentage", bitFlipPercentage); if (bitFlipPercentage > 0.0) { if (deterministicRandom()->random01() < bitFlipPercentage) { pdata = (char*)arena.allocate4kAlignedBuffer(length); diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index db16901268..b9079b39eb 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -124,7 +124,6 @@ struct DiskFailureInjectionWorkload : TestWorkload { diskFailure.throttlePeriod = throttlePeriod; SetFailureInjection req; req.diskFailure = diskFailure; - TraceEvent("DiskFailureInjectDiskDelays"); res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); checkDiskFailureInjectionResult(res, worker); @@ -137,7 +136,6 @@ struct DiskFailureInjectionWorkload : TestWorkload { flipBits.percentBitFlips = percentage; SetFailureInjection req; req.flipBits = flipBits; - TraceEvent("DiskFailureInjectBitFlips"); res = worker.clientInterface.setFailureInjection.getReply(req); wait(ready(res)); checkDiskFailureInjectionResult(res, worker); @@ -160,16 +158,13 @@ struct DiskFailureInjectionWorkload : TestWorkload { } catch (Error& e) { // If we failed to get a list of storage servers, we can't inject failure events // But don't throw the error in that case - TraceEvent("DiskFailureInjectionFailed"); continue; - // return Void(); } auto machine = deterministicRandom()->randomChoice(machines); // If we have already chosen this worker, then just continue if (find(self->chosenWorkers.begin(), self->chosenWorkers.end(), machine.address()) != self->chosenWorkers.end()) { - TraceEvent("DiskFailureInjectionSkipped"); continue; } diff --git a/flow/network.h b/flow/network.h index 7c89ed689c..51fe05d96d 100644 --- a/flow/network.h +++ b/flow/network.h @@ -715,7 +715,7 @@ struct DiskFailureInjector { throttlePeriod = throttleFor; throttleUntil = std::max(throttleUntil, g_network->now() + throttleFor); TraceEvent("SetDiskFailure") - .detail("Now", g_network->now()) + .detail("Now", g_network->now()) .detail("StallInterval", interval) .detail("StallPeriod", stallFor) .detail("StallUntil", stallUntil) @@ -735,9 +735,6 @@ struct DiskFailureInjector { double getThrottleDelay() { // If we are in the throttle period, insert a random delay (in ms) - TraceEvent("GetThrottleDelay") - .detail("Now", g_network->now()) - .detail("ThrottleUntil", throttleUntil); if ((throttleUntil - g_network->now()) > 0.0) return (0.001 * deterministicRandom()->randomInt(1, 3)); From 8d1e97b329cda898e38953aa24bf97e9318915b2 Mon Sep 17 00:00:00 2001 From: negoyal Date: Mon, 4 Oct 2021 22:43:48 -0700 Subject: [PATCH 020/142] Minor changes. --- fdbrpc/AsyncFileChaos.actor.h | 3 ++- fdbserver/workloads/ClearSingleRange.actor.cpp | 15 ++++++++++++--- .../workloads/DiskFailureInjection.actor.cpp | 4 ++-- fdbserver/workloads/Mako.actor.cpp | 2 +- fdbserver/workloads/TargetedKill.actor.cpp | 1 + 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 3678af63b6..affd48da06 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -85,7 +85,8 @@ public: if (enabled && res) { auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); if (bitFlipPercentage > 0.0) { - if (deterministicRandom()->random01() < bitFlipPercentage) { + auto bitFlipProb = bitFlipPercentage/100; + if (deterministicRandom()->random01() < bitFlipProb) { pdata = (char*)arena.allocate4kAlignedBuffer(length); memcpy(pdata, data, length); // flip a random bit in the copied buffer diff --git a/fdbserver/workloads/ClearSingleRange.actor.cpp b/fdbserver/workloads/ClearSingleRange.actor.cpp index f8f48be929..5e21e35254 100644 --- a/fdbserver/workloads/ClearSingleRange.actor.cpp +++ b/fdbserver/workloads/ClearSingleRange.actor.cpp @@ -47,9 +47,18 @@ struct ClearSingleRange : TestWorkload { ACTOR static Future fdbClientClearRange(Database db, ClearSingleRange* self) { state Transaction tr(db); - TraceEvent("ClearSingleRangeWaiting").detail("StartDelay", self->startDelay); - wait(delay(self->startDelay)); - tr.clear(KeyRangeRef(self->begin, self->end)); + try { + TraceEvent("ClearSingleRange"). + detail("Begin", printable(self->begin)). + detail("End", printable(self->end)).detail("StartDelay", self->startDelay); + tr.setOption(FDBTransactionOptions::NEXT_WRITE_NO_WRITE_CONFLICT_RANGE); + wait(delay(self->startDelay)); + tr.clear(KeyRangeRef(self->begin, self->end)); + wait(tr.commit()); + } catch (Error& e) { + TraceEvent("ClearRangeError").error(e); + wait(tr.onError(e)); + } return Void(); } }; diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index b9079b39eb..359e0e19c3 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -225,7 +225,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { if (self->throttleDisk) { int diskDelays = chaosMetrics->second.getInt("DiskDelays"); if (diskDelays > 0) { - foundChaosMetrics++; + foundChaosMetrics += diskDelays; } } @@ -233,7 +233,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { if (self->corruptFile) { int bitFlips = chaosMetrics->second.getInt("BitFlips"); if (bitFlips > 0) { - foundChaosMetrics++; + foundChaosMetrics += bitFlips; } } } diff --git a/fdbserver/workloads/Mako.actor.cpp b/fdbserver/workloads/Mako.actor.cpp index 9611f057bb..67720c2249 100644 --- a/fdbserver/workloads/Mako.actor.cpp +++ b/fdbserver/workloads/Mako.actor.cpp @@ -56,7 +56,7 @@ struct MakoWorkload : TestWorkload { commits("Commits"), totalOps("Operations") { // init parameters from test file // Number of rows populated - rowCount = getOption(options, LiteralStringRef("rows"), 10000); + rowCount = getOption(options, LiteralStringRef("rows"), (uint64_t )10000); // Test duration in seconds testDuration = getOption(options, LiteralStringRef("testDuration"), 30.0); warmingDelay = getOption(options, LiteralStringRef("warmingDelay"), 0.0); diff --git a/fdbserver/workloads/TargetedKill.actor.cpp b/fdbserver/workloads/TargetedKill.actor.cpp index 1c40cd47b6..b77b97a18c 100644 --- a/fdbserver/workloads/TargetedKill.actor.cpp +++ b/fdbserver/workloads/TargetedKill.actor.cpp @@ -78,6 +78,7 @@ struct TargetedKillWorkload : TestWorkload { workers[i].processClass != ProcessClass::TesterClass)) { TraceEvent("WorkerKill").detail("TargetedMachine", address).detail("Worker", workers[i].interf.id()); workers[i].interf.clientInterface.reboot.send(rbReq); + killed++; } } From 518065c3edc9f7ef2cd1333248e89162e04469cb Mon Sep 17 00:00:00 2001 From: negoyal Date: Tue, 19 Oct 2021 17:22:27 -0700 Subject: [PATCH 021/142] TargetedKill fixes. --- .../workloads/ClearSingleRange.actor.cpp | 9 ++-- .../workloads/DiskFailureInjection.actor.cpp | 2 +- fdbserver/workloads/TargetedKill.actor.cpp | 41 +++++++++++++------ 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/fdbserver/workloads/ClearSingleRange.actor.cpp b/fdbserver/workloads/ClearSingleRange.actor.cpp index 5e21e35254..62e7671a7c 100644 --- a/fdbserver/workloads/ClearSingleRange.actor.cpp +++ b/fdbserver/workloads/ClearSingleRange.actor.cpp @@ -43,14 +43,15 @@ struct ClearSingleRange : TestWorkload { Future check(Database const& cx) override { return true; } - void getMetrics(vector& m) override {} + void getMetrics(std::vector& m) override {} ACTOR static Future fdbClientClearRange(Database db, ClearSingleRange* self) { state Transaction tr(db); try { - TraceEvent("ClearSingleRange"). - detail("Begin", printable(self->begin)). - detail("End", printable(self->end)).detail("StartDelay", self->startDelay); + TraceEvent("ClearSingleRange") + .detail("Begin", printable(self->begin)) + .detail("End", printable(self->end)) + .detail("StartDelay", self->startDelay); tr.setOption(FDBTransactionOptions::NEXT_WRITE_NO_WRITE_CONFLICT_RANGE); wait(delay(self->startDelay)); tr.clear(KeyRangeRef(self->begin, self->end)); diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 359e0e19c3..0973a2e61c 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -96,7 +96,7 @@ struct DiskFailureInjectionWorkload : TestWorkload { return true; } - void getMetrics(vector& m) override {} + void getMetrics(std::vector& m) override {} static void checkDiskFailureInjectionResult(Future res, WorkerInterface worker) { if (res.isError()) { diff --git a/fdbserver/workloads/TargetedKill.actor.cpp b/fdbserver/workloads/TargetedKill.actor.cpp index 2270cecab3..cb3d0145c5 100644 --- a/fdbserver/workloads/TargetedKill.actor.cpp +++ b/fdbserver/workloads/TargetedKill.actor.cpp @@ -32,6 +32,7 @@ struct TargetedKillWorkload : TestWorkload { std::string machineToKill; bool enabled, killAllMachineProcesses; + int numKillStorages; double killAt; bool reboot; double suspendDuration; @@ -43,6 +44,7 @@ struct TargetedKillWorkload : TestWorkload { suspendDuration = getOption(options, LiteralStringRef("suspendDuration"), 1.0); machineToKill = getOption(options, LiteralStringRef("machineToKill"), LiteralStringRef("master")).toString(); killAllMachineProcesses = getOption(options, LiteralStringRef("killWholeMachine"), false); + numKillStorages = getOption(options, LiteralStringRef("numKillStorages"), 1); } std::string description() const override { return "TargetedKillWorkload"; } @@ -56,16 +58,17 @@ struct TargetedKillWorkload : TestWorkload { Future check(Database const& cx) override { return true; } void getMetrics(std::vector& m) override {} - ACTOR Future killEndpoint(NetworkAddress address, Database cx, TargetedKillWorkload* self) { + Future killEndpoint(std::vector workers, + NetworkAddress address, + Database cx, + TargetedKillWorkload* self) { if (&g_simulator == g_network) { g_simulator.killInterface(address, ISimulator::KillInstantly); return Void(); } - state std::vector workers = wait(getWorkers(self->dbInfo)); - int killed = 0; - state RebootRequest rbReq; + RebootRequest rbReq; if (self->reboot) { rbReq.waitForDuration = self->suspendDuration; } else { @@ -93,8 +96,13 @@ struct TargetedKillWorkload : TestWorkload { ACTOR Future assassin(Database cx, TargetedKillWorkload* self) { wait(delay(self->killAt)); state std::vector storageServers = wait(getStorageServers(cx)); + state std::vector workers = wait(getWorkers(self->dbInfo)); - NetworkAddress machine; + state NetworkAddress machine; + state NetworkAddress ccAddr; + state int killed = 0; + state int s = 0; + state int j = 0; if (self->machineToKill == "master") { machine = self->dbInfo->get().master.address(); } else if (self->machineToKill == "commitproxy") { @@ -129,13 +137,22 @@ struct TargetedKillWorkload : TestWorkload { } } else if (self->machineToKill == "storage" || self->machineToKill == "ss" || self->machineToKill == "storageserver") { - int o = deterministicRandom()->randomInt(0, storageServers.size()); - for (int i = 0; i < storageServers.size(); i++) { - StorageServerInterface ssi = storageServers[o]; + s = deterministicRandom()->randomInt(0, storageServers.size()); + ccAddr = self->dbInfo->get().clusterInterface.getWorkers.getEndpoint().getPrimaryAddress(); + for (j = 0; j < storageServers.size(); j++) { + StorageServerInterface ssi = storageServers[s]; machine = ssi.address(); - if (machine != self->dbInfo->get().clusterInterface.getWorkers.getEndpoint().getPrimaryAddress()) - break; - o = ++o % storageServers.size(); + if (machine != self->dbInfo->get().clusterInterface.getWorkers.getEndpoint().getPrimaryAddress()) { + TraceEvent("IsolatedMark").detail("TargetedMachine", machine).detail("Role", self->machineToKill); + wait(self->killEndpoint(workers, machine, cx, self)); + killed++; + TraceEvent("SentKillEndpoint") + .detail("Killed", killed) + .detail("NumKillStorages", self->numKillStorages); + if (killed == self->numKillStorages) + return Void(); + } + s = ++s % storageServers.size(); } } else if (self->machineToKill == "clustercontroller" || self->machineToKill == "cc") { machine = self->dbInfo->get().clusterInterface.getWorkers.getEndpoint().getPrimaryAddress(); @@ -143,7 +160,7 @@ struct TargetedKillWorkload : TestWorkload { TraceEvent("IsolatedMark").detail("TargetedMachine", machine).detail("Role", self->machineToKill); - wait(self->killEndpoint(machine, cx, self)); + wait(self->killEndpoint(workers, machine, cx, self)); return Void(); } From 88e66533ad59e90c6befb9b1c8f948ca4de6b874 Mon Sep 17 00:00:00 2001 From: negoyal Date: Thu, 28 Oct 2021 11:13:12 -0700 Subject: [PATCH 022/142] devFormat --- fdbrpc/AsyncFileChaos.actor.h | 2 +- fdbserver/workloads/Mako.actor.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index affd48da06..7f61ab36bb 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -85,7 +85,7 @@ public: if (enabled && res) { auto bitFlipPercentage = static_cast(res)->getBitFlipPercentage(); if (bitFlipPercentage > 0.0) { - auto bitFlipProb = bitFlipPercentage/100; + auto bitFlipProb = bitFlipPercentage / 100; if (deterministicRandom()->random01() < bitFlipProb) { pdata = (char*)arena.allocate4kAlignedBuffer(length); memcpy(pdata, data, length); diff --git a/fdbserver/workloads/Mako.actor.cpp b/fdbserver/workloads/Mako.actor.cpp index 67720c2249..9e32b7aa37 100644 --- a/fdbserver/workloads/Mako.actor.cpp +++ b/fdbserver/workloads/Mako.actor.cpp @@ -56,7 +56,7 @@ struct MakoWorkload : TestWorkload { commits("Commits"), totalOps("Operations") { // init parameters from test file // Number of rows populated - rowCount = getOption(options, LiteralStringRef("rows"), (uint64_t )10000); + rowCount = getOption(options, LiteralStringRef("rows"), (uint64_t)10000); // Test duration in seconds testDuration = getOption(options, LiteralStringRef("testDuration"), 30.0); warmingDelay = getOption(options, LiteralStringRef("warmingDelay"), 0.0); From 13bb7838aa32f20c15f435c72b32ef8ad449d221 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Sat, 30 Oct 2021 21:07:38 -0700 Subject: [PATCH 023/142] Enable clang -Wformat warning --- bindings/c/test/mako/mako.c | 70 +++++++++---------- bindings/c/test/txn_size_test.c | 8 +-- cmake/ConfigureCompiler.cmake | 1 - fdbcli/ChangeFeedCommand.actor.cpp | 2 +- fdbcli/SetClassCommand.actor.cpp | 2 +- fdbclient/BlobGranuleReader.actor.cpp | 6 +- fdbclient/NativeAPI.actor.cpp | 10 +-- fdbserver/BlobManager.actor.cpp | 20 +++--- fdbserver/BlobWorker.actor.cpp | 66 ++++++++--------- fdbserver/SimulatedCluster.actor.cpp | 6 +- fdbserver/VersionedBTree.actor.cpp | 24 +++---- fdbserver/fdbserver.actor.cpp | 4 +- fdbserver/networktest.actor.cpp | 2 +- fdbserver/tester.actor.cpp | 2 +- .../workloads/BlobGranuleVerifier.actor.cpp | 24 +++---- fdbserver/workloads/RyowCorrectness.actor.cpp | 4 +- 16 files changed, 126 insertions(+), 125 deletions(-) diff --git a/bindings/c/test/mako/mako.c b/bindings/c/test/mako/mako.c index 3cbbd7d50f..37deaed08c 100644 --- a/bindings/c/test/mako/mako.c +++ b/bindings/c/test/mako/mako.c @@ -943,7 +943,7 @@ int run_workload(FDBTransaction* transaction, if (tracetimer == dotrace) { fdb_error_t err; tracetimer = 0; - snprintf(traceid, 32, "makotrace%019lld", total_xacts); + snprintf(traceid, 32, "makotrace%019ld", total_xacts); fprintf(debugme, "DEBUG: txn tracing %s\n", traceid); err = fdb_transaction_set_option(transaction, FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER, @@ -1101,7 +1101,7 @@ void* worker_thread(void* thread_args) { } fprintf(debugme, - "DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%d (tid:%lld)\n", + "DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%lu (tid:%lu)\n", worker_id, args->num_processes, thread_id, @@ -1301,7 +1301,7 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi if (err) { fprintf(stderr, "ERROR: fdb_network_set_option (FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION) (%d): %s\n", - (uint8_t*)&args->client_threads_per_version, + args->client_threads_per_version, fdb_get_error(err)); // let's exit here since we do not want to confuse users // that mako is running with multi-threaded client enabled @@ -2038,9 +2038,9 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s for (op = 0; op < MAX_OP; op++) { if (args->txnspec.ops[op][OP_COUNT] > 0) { uint64_t ops_total_diff = ops_total[op] - ops_total_prev[op]; - printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total_diff); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total_diff); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), ops_total_diff); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total_diff); } errors_diff[op] = errors_total[op] - errors_total_prev[op]; print_err = (errors_diff[op] > 0); @@ -2068,7 +2068,7 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s printf("%" STR(STATS_TITLE_WIDTH) "s ", "Errors"); for (op = 0; op < MAX_OP; op++) { if (args->txnspec.ops[op][OP_COUNT] > 0) { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_diff[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_diff[op]); if (fp) { fprintf(fp, "\"errors\": %.2f", conflicts_diff); } @@ -2213,10 +2213,10 @@ void print_report(mako_args_t* args, break; } } - printf("Total Xacts: %8lld\n", totalxacts); - printf("Total Conflicts: %8lld\n", conflicts); - printf("Total Errors: %8lld\n", totalerrors); - printf("Overall TPS: %8lld\n\n", totalxacts * 1000000000 / duration_nsec); + printf("Total Xacts: %8lu\n", totalxacts); + printf("Total Conflicts: %8lu\n", conflicts); + printf("Total Errors: %8lu\n", totalerrors); + printf("Overall TPS: %8lu\n\n", totalxacts * 1000000000 / duration_nsec); if (fp) { fprintf(fp, "\"results\": {"); @@ -2224,10 +2224,10 @@ void print_report(mako_args_t* args, fprintf(fp, "\"totalProcesses\": %d,", args->num_processes); fprintf(fp, "\"totalThreads\": %d,", args->num_threads); fprintf(fp, "\"targetTPS\": %d,", args->tpsmax); - fprintf(fp, "\"totalXacts\": %lld,", totalxacts); - fprintf(fp, "\"totalConflicts\": %lld,", conflicts); - fprintf(fp, "\"totalErrors\": %lld,", totalerrors); - fprintf(fp, "\"overallTPS\": %lld,", totalxacts * 1000000000 / duration_nsec); + fprintf(fp, "\"totalXacts\": %lu,", totalxacts); + fprintf(fp, "\"totalConflicts\": %lu,", conflicts); + fprintf(fp, "\"totalErrors\": %lu,", totalerrors); + fprintf(fp, "\"overallTPS\": %lu,", totalxacts * 1000000000 / duration_nsec); } /* per-op stats */ @@ -2240,9 +2240,9 @@ void print_report(mako_args_t* args, } for (op = 0; op < MAX_OP; op++) { if ((args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) || op == OP_COMMIT) { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total[op]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), ops_total[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total[op]); } } } @@ -2263,9 +2263,9 @@ void print_report(mako_args_t* args, printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Errors"); for (op = 0; op < MAX_OP; op++) { if (args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_total[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_total[op]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), errors_total[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), errors_total[op]); } } } @@ -2282,12 +2282,12 @@ void print_report(mako_args_t* args, for (op = 0; op < MAX_OP; op++) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (lat_total[op]) { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_samples[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_samples[op]); } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); } if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_samples[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_samples[op]); } } } @@ -2303,9 +2303,9 @@ void print_report(mako_args_t* args, if (lat_min[op] == -1) { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); } else { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_min[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_min[op]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_min[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_min[op]); } } } @@ -2320,9 +2320,9 @@ void print_report(mako_args_t* args, for (op = 0; op < MAX_OP; op++) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (lat_total[op]) { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_total[op] / lat_samples[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_total[op] / lat_samples[op]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_total[op] / lat_samples[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_total[op] / lat_samples[op]); } } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); @@ -2341,9 +2341,9 @@ void print_report(mako_args_t* args, if (lat_max[op] == 0) { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); } else { - printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_max[op]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_max[op]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_max[op]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_max[op]); } } } @@ -2393,9 +2393,9 @@ void print_report(mako_args_t* args, } else { median = (dataPoints[op][num_points[op] / 2] + dataPoints[op][num_points[op] / 2 - 1]) >> 1; } - printf("%" STR(STATS_FIELD_WIDTH) "lld ", median); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", median); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), median); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), median); } } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); @@ -2417,9 +2417,9 @@ void print_report(mako_args_t* args, } if (lat_total[op]) { point_95pct = ((float)(num_points[op]) * 0.95) - 1; - printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_95pct]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_95pct]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_95pct]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_95pct]); } } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); @@ -2441,9 +2441,9 @@ void print_report(mako_args_t* args, } if (lat_total[op]) { point_99pct = ((float)(num_points[op]) * 0.99) - 1; - printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99pct]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99pct]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_99pct]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99pct]); } } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); @@ -2465,9 +2465,9 @@ void print_report(mako_args_t* args, } if (lat_total[op]) { point_99_9pct = ((float)(num_points[op]) * 0.999) - 1; - printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99_9pct]); + printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99_9pct]); if (fp) { - fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_99_9pct]); + fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99_9pct]); } } else { printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); @@ -2529,7 +2529,7 @@ int stats_process_main(mako_args_t* args, fprintf(fp, "\"value_length\": %d,", args->value_length); fprintf(fp, "\"commit_get\": %d,", args->commit_get); fprintf(fp, "\"verbose\": %d,", args->verbose); - fprintf(fp, "\"cluster_files\": \"%s\",", args->cluster_files); + fprintf(fp, "\"cluster_files\": \"%s\",", args->cluster_files[0]); fprintf(fp, "\"log_group\": \"%s\",", args->log_group); fprintf(fp, "\"prefixpadding\": %d,", args->prefixpadding); fprintf(fp, "\"trace\": %d,", args->trace); diff --git a/bindings/c/test/txn_size_test.c b/bindings/c/test/txn_size_test.c index f1c90cd720..b8be90ceb1 100644 --- a/bindings/c/test/txn_size_test.c +++ b/bindings/c/test/txn_size_test.c @@ -67,25 +67,25 @@ void runTests(struct ResultSet* rs) { fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE); e = getSize(rs, tr, sizes + i); checkError(e, "transaction get size", rs); - printf("size %d: %u\n", i, sizes[i]); + printf("size %d: %ld\n", i, sizes[i]); i++; fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE); e = getSize(rs, tr, sizes + i); checkError(e, "transaction get size", rs); - printf("size %d: %u\n", i, sizes[i]); + printf("size %d: %ld\n", i, sizes[i]); i++; fdb_transaction_clear(tr, keys[i], KEY_SIZE); e = getSize(rs, tr, sizes + i); checkError(e, "transaction get size", rs); - printf("size %d: %u\n", i, sizes[i]); + printf("size %d: %ld\n", i, sizes[i]); i++; fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE); e = getSize(rs, tr, sizes + i); checkError(e, "transaction get size", rs); - printf("size %d: %u\n", i, sizes[i]); + printf("size %d: %ld\n", i, sizes[i]); i++; for (j = 0; j + 1 < i; j++) { diff --git a/cmake/ConfigureCompiler.cmake b/cmake/ConfigureCompiler.cmake index 6379f7bf14..dccfbcc7ee 100644 --- a/cmake/ConfigureCompiler.cmake +++ b/cmake/ConfigureCompiler.cmake @@ -284,7 +284,6 @@ else() # Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11 -Wno-comment -Wno-delete-non-virtual-dtor - -Wno-format -Wno-mismatched-tags -Wno-missing-field-initializers -Wno-sign-compare diff --git a/fdbcli/ChangeFeedCommand.actor.cpp b/fdbcli/ChangeFeedCommand.actor.cpp index d28d96c367..b5e7a79ff1 100644 --- a/fdbcli/ChangeFeedCommand.actor.cpp +++ b/fdbcli/ChangeFeedCommand.actor.cpp @@ -127,7 +127,7 @@ ACTOR Future changeFeedCommandActor(Database localDb, std::vector> res = waitNext(feedResults.getFuture())) { for (auto& it : res) { for (auto& it2 : it.mutations) { - printf("%lld %s\n", it.version, it2.toString().c_str()); + printf("%ld %s\n", it.version, it2.toString().c_str()); } } } diff --git a/fdbcli/SetClassCommand.actor.cpp b/fdbcli/SetClassCommand.actor.cpp index ee3ebfe454..bec2103287 100644 --- a/fdbcli/SetClassCommand.actor.cpp +++ b/fdbcli/SetClassCommand.actor.cpp @@ -48,7 +48,7 @@ ACTOR Future printProcessClass(Reference db) { ASSERT(processSourceList.size() == processTypeList.size()); if (!processTypeList.size()) printf("No processes are registered in the database.\n"); - printf("There are currently %zu processes in the database:\n", processTypeList.size()); + printf("There are currently %d processes in the database:\n", processTypeList.size()); for (int index = 0; index < processTypeList.size(); index++) { std::string address = processTypeList[index].key.removePrefix(fdb_cli::processClassTypeSpecialKeyRange.begin).toString(); diff --git a/fdbclient/BlobGranuleReader.actor.cpp b/fdbclient/BlobGranuleReader.actor.cpp index 9211124ad5..0c0ac42edf 100644 --- a/fdbclient/BlobGranuleReader.actor.cpp +++ b/fdbclient/BlobGranuleReader.actor.cpp @@ -97,7 +97,7 @@ ACTOR Future readSnapshotFile(Reference bstore } }*/ if (BG_READ_DEBUG) { - printf("Started with %d rows from snapshot file %s after pruning to [%s - %s)\n", + printf("Started with %lu rows from snapshot file %s after pruning to [%s - %s)\n", dataMap->size(), f.toString().c_str(), keyRange.begin.printable().c_str(), @@ -143,7 +143,7 @@ ACTOR Future> readDeltaFile(Reference result[i + 1].version) { - printf("BG VERSION ORDER VIOLATION IN DELTA FILE: '%lld', '%lld'\n", + printf("BG VERSION ORDER VIOLATION IN DELTA FILE: '%ld', '%ld'\n", result[i].version, result[i + 1].version); } @@ -313,7 +313,7 @@ ACTOR Future readBlobGranule(BlobGranuleChunkRef chunk, arena.dependsOn(snapshotArena); if (BG_READ_DEBUG) { - printf("Applying %d delta files\n", readDeltaFutures.size()); + printf("Applying %lu delta files\n", readDeltaFutures.size()); } for (Future> deltaFuture : readDeltaFutures) { Standalone result = wait(deltaFuture); diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 117524a43a..badb03415d 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -7201,7 +7201,7 @@ ACTOR Future readBlobGranulesStreamActor(Reference db, blobGranuleMapping = _bgMapping; if (blobGranuleMapping.more) { if (BG_REQUEST_DEBUG) { - printf("BG Mapping for [%s - %s) too large!\n"); + // printf("BG Mapping for [%s - %s) too large!\n"); } throw unsupported_operation(); } @@ -7215,7 +7215,7 @@ ACTOR Future readBlobGranulesStreamActor(Reference db, } if (BG_REQUEST_DEBUG) { - printf("Doing blob granule request @ %lld\n", endVersion); + printf("Doing blob granule request @ %ld\n", endVersion); printf("blob worker assignments:\n"); } @@ -7290,7 +7290,7 @@ ACTOR Future readBlobGranulesStreamActor(Reference db, nullptr)); if (BG_REQUEST_DEBUG) { - printf("Blob granule request for [%s - %s) @ %lld - %lld got reply from %s:\n", + printf("Blob granule request for [%s - %s) @ %ld - %ld got reply from %s:\n", granuleStartKey.printable().c_str(), granuleEndKey.printable().c_str(), begin, @@ -7311,11 +7311,11 @@ ACTOR Future readBlobGranulesStreamActor(Reference db, } printf(" Deltas: (%d)", chunk.newDeltas.size()); if (chunk.newDeltas.size() > 0) { - printf(" with version [%lld - %lld]", + printf(" with version [%ld - %ld]", chunk.newDeltas[0].version, chunk.newDeltas[chunk.newDeltas.size() - 1].version); } - printf(" IncludedVersion: %lld\n", chunk.includedVersion); + printf(" IncludedVersion: %ld\n", chunk.includedVersion); printf("\n\n"); } Arena a; diff --git a/fdbserver/BlobManager.actor.cpp b/fdbserver/BlobManager.actor.cpp index 557352f11c..a312f248c9 100644 --- a/fdbserver/BlobManager.actor.cpp +++ b/fdbserver/BlobManager.actor.cpp @@ -235,7 +235,7 @@ ACTOR Future>> splitRange(ReferencegetTransaction().getStorageMetrics(range, CLIENT_KNOBS->TOO_MANY)); if (BM_DEBUG) { - printf("Estimated bytes for [%s - %s): %lld\n", + printf("Estimated bytes for [%s - %s): %ld\n", range.begin.printable().c_str(), range.end.printable().c_str(), estimated.bytes); @@ -300,7 +300,7 @@ static UID pickWorkerForAssign(BlobManagerData* bmData) { ACTOR Future doRangeAssignment(BlobManagerData* bmData, RangeAssignment assignment, UID workerID, int64_t seqNo) { if (BM_DEBUG) { - printf("BM %s %s range [%s - %s) @ (%lld, %lld)\n", + printf("BM %s %s range [%s - %s) @ (%ld, %ld)\n", bmData->id.toString().c_str(), assignment.isAssign ? "assigning" : "revoking", assignment.keyRange.begin.printable().c_str(), @@ -379,7 +379,7 @@ ACTOR Future doRangeAssignment(BlobManagerData* bmData, RangeAssignment as // FIXME: improvement would be to add history of failed workers to assignment so it can try other ones first } else { if (BM_DEBUG) { - printf("BM got error revoking range [%s - %s) from worker %s", + printf("BM got error revoking range [%s - %s) from worker", assignment.keyRange.begin.printable().c_str(), assignment.keyRange.end.printable().c_str()); } @@ -472,7 +472,7 @@ ACTOR Future checkManagerLock(Reference tr, Blo ASSERT(currentEpoch > bmData->epoch); if (BM_DEBUG) { - printf("BM %s found new epoch %d > %d in lock check\n", + printf("BM %s found new epoch %ld > %ld in lock check\n", bmData->id.toString().c_str(), currentEpoch, bmData->epoch); @@ -625,7 +625,7 @@ ACTOR Future maybeSplitRange(BlobManagerData* bmData, std::tuple prevGranuleLock = decodeBlobGranuleLockValue(lockValue.get()); if (std::get<0>(prevGranuleLock) > bmData->epoch) { if (BM_DEBUG) { - printf("BM %s found a higher epoch %d than %d for granule lock of [%s - %s)\n", + printf("BM %s found a higher epoch %ld than %ld for granule lock of [%s - %s)\n", bmData->id.toString().c_str(), std::get<0>(prevGranuleLock), bmData->epoch, @@ -770,7 +770,7 @@ ACTOR Future monitorBlobWorkerStatus(BlobManagerData* bmData, BlobWorkerIn GranuleStatusReply rep = waitNext(statusStream.getFuture()); if (BM_DEBUG) { - printf("BM %lld got status of [%s - %s) @ (%lld, %lld) from BW %s: %s\n", + printf("BM %ld got status of [%s - %s) @ (%ld, %ld) from BW %s: %s\n", bmData->epoch, rep.granuleRange.begin.printable().c_str(), rep.granuleRange.end.printable().c_str(), @@ -806,14 +806,14 @@ ACTOR Future monitorBlobWorkerStatus(BlobManagerData* bmData, BlobWorkerIn rep.granuleRange.end == lastReqForGranule.end() && rep.epoch == lastReqForGranule.value().first && rep.seqno == lastReqForGranule.value().second) { if (BM_DEBUG) { - printf("Manager %lld received repeat status for the same granule [%s - %s) @ %lld, ignoring.", + printf("Manager %ld received repeat status for the same granule [%s - %s), ignoring.", bmData->epoch, rep.granuleRange.begin.printable().c_str(), rep.granuleRange.end.printable().c_str()); } } else { if (BM_DEBUG) { - printf("Manager %lld evaluating [%s - %s) for split\n", + printf("Manager %ld evaluating [%s - %s) for split\n", bmData->epoch, rep.granuleRange.begin.printable().c_str(), rep.granuleRange.end.printable().c_str()); @@ -858,7 +858,7 @@ ACTOR Future monitorBlobWorker(BlobManagerData* bmData, BlobWorkerInterfac choose { when(wait(waitFailure)) { if (BM_DEBUG) { - printf("BM %lld detected BW %s is dead\n", bmData->epoch, bwInterf.id().toString().c_str()); + printf("BM %ld detected BW %s is dead\n", bmData->epoch, bwInterf.id().toString().c_str()); } TraceEvent("BlobWorkerFailed", bmData->id).detail("BlobWorkerID", bwInterf.id()); } @@ -1115,7 +1115,7 @@ ACTOR Future blobManager(BlobManagerInterface bmInterf, } if (BM_DEBUG) { - printf("Blob manager acquired lock at epoch %lld\n", epoch); + printf("Blob manager acquired lock at epoch %ld\n", epoch); } // needed to pick up changes to dbinfo in case new CC comes along diff --git a/fdbserver/BlobWorker.actor.cpp b/fdbserver/BlobWorker.actor.cpp index d85d113e99..35cf40241d 100644 --- a/fdbserver/BlobWorker.actor.cpp +++ b/fdbserver/BlobWorker.actor.cpp @@ -193,7 +193,7 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted { bool managerEpochOk(int64_t epoch) { if (epoch < currentManagerEpoch) { if (BW_DEBUG) { - printf("BW %s got request from old epoch %lld, notifying manager it is out of date\n", + printf("BW %s got request from old epoch %ld, notifying manager it is out of date\n", id.toString().c_str(), epoch); } @@ -202,7 +202,7 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted { if (epoch > currentManagerEpoch) { currentManagerEpoch = epoch; if (BW_DEBUG) { - printf("BW %s found new manager epoch %lld\n", id.toString().c_str(), currentManagerEpoch); + printf("BW %s found new manager epoch %ld\n", id.toString().c_str(), currentManagerEpoch); } } @@ -216,7 +216,7 @@ static void acquireGranuleLock(int64_t epoch, int64_t seqno, int64_t prevOwnerEp // returns true if our lock (E, S) >= (Eprev, Sprev) if (epoch < prevOwnerEpoch || (epoch == prevOwnerEpoch && seqno < prevOwnerSeqno)) { if (BW_DEBUG) { - printf("Lock acquire check failed. Proposed (%lld, %lld) < previous (%lld, %lld)\n", + printf("Lock acquire check failed. Proposed (%ld, %ld) < previous (%ld, %ld)\n", epoch, seqno, prevOwnerEpoch, @@ -239,7 +239,7 @@ static void checkGranuleLock(int64_t epoch, int64_t seqno, int64_t ownerEpoch, i // returns true if we still own the lock, false if someone else does if (epoch != ownerEpoch || seqno != ownerSeqno) { if (BW_DEBUG) { - printf("Lock assignment check failed. Expected (%lld, %lld), got (%lld, %lld)\n", + printf("Lock assignment check failed. Expected (%ld, %ld), got (%ld, %ld)\n", epoch, seqno, ownerEpoch, @@ -303,7 +303,7 @@ ACTOR Future readGranuleFiles(Transaction* tr, Key* startKey, Key endKey, } } if (BW_DEBUG) { - printf("Loaded %d snapshot and %d delta files for %s\n", + printf("Loaded %lu snapshot and %lu delta files for %s\n", files->snapshotFiles.size(), files->deltaFiles.size(), granuleID.toString().c_str()); @@ -546,7 +546,7 @@ ACTOR Future writeDeltaFile(Reference bwData, wait(tr->commit()); if (BW_DEBUG) { - printf("Granule %s [%s - %s) updated fdb with delta file %s of size %d at version %lld, cv=%lld\n", + printf("Granule %s [%s - %s) updated fdb with delta file %s of size %d at version %ld, cv=%ld\n", granuleID.toString().c_str(), keyRange.begin.printable().c_str(), keyRange.end.printable().c_str(), @@ -812,7 +812,7 @@ ACTOR Future compactFromBlob(Reference bwData, chunk.includedVersion = version; if (BW_DEBUG) { - printf("Re-snapshotting [%s - %s) @ %lld from blob\n", + printf("Re-snapshotting [%s - %s) @ %ld from blob\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), version); @@ -911,7 +911,7 @@ ACTOR Future handleCompletedDeltaFile(Reference bwData, if (completedDeltaFile.version > cfStartVersion) { if (BW_DEBUG) { - printf("Popping change feed %s at %lld\n", cfKey.printable().c_str(), completedDeltaFile.version); + printf("Popping change feed %s at %ld\n", cfKey.printable().c_str(), completedDeltaFile.version); } // FIXME: for a write-hot shard, we could potentially batch these and only pop the largest one after several // have completed @@ -968,7 +968,7 @@ static Version doGranuleRollback(Reference metadata, metadata->bytesInNewDeltaFiles -= df.bytes; toPop++; if (BW_DEBUG) { - printf("[%s - %s) rollback cancelling delta file @ %lld\n", + printf("[%s - %s) rollback cancelling delta file @ %ld\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), df.version); @@ -1013,7 +1013,7 @@ static Version doGranuleRollback(Reference metadata, } mIdx++; if (BW_DEBUG) { - printf("[%s - %s) rollback discarding %d in-memory mutations, %d mutations and %lld bytes left\n", + printf("[%s - %s) rollback discarding %d in-memory mutations, %d mutations and %ld bytes left\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), metadata->currentDeltas.size() - mIdx, @@ -1030,7 +1030,7 @@ static Version doGranuleRollback(Reference metadata, } if (BW_DEBUG) { - printf("[%s - %s) finishing rollback to %lld\n", + printf("[%s - %s) finishing rollback to %ld\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), cfRollbackVersion); @@ -1093,8 +1093,8 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str()); printf(" CFID: %s\n", startState.granuleID.toString().c_str()); - printf(" CF Start Version: %lld\n", startState.changeFeedStartVersion); - printf(" Previous Durable Version: %lld\n", startState.previousDurableVersion); + printf(" CF Start Version: %ld\n", startState.changeFeedStartVersion); + printf(" Previous Durable Version: %ld\n", startState.previousDurableVersion); printf(" doSnapshot=%s\n", startState.doSnapshot ? "T" : "F"); printf(" Prev CFID: %s\n", startState.parentGranule.present() ? startState.parentGranule.get().second.toString().c_str() : ""); @@ -1259,7 +1259,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (metadata->bufferedDeltaBytes >= SERVER_KNOBS->BG_DELTA_FILE_TARGET_BYTES && deltas.version > lastVersion) { if (BW_DEBUG) { - printf("Granule [%s - %s) flushing delta file after %d bytes @ %lld %lld%s\n", + printf("Granule [%s - %s) flushing delta file after %lu bytes @ %ld %ld%s\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), metadata->bufferedDeltaBytes, @@ -1321,7 +1321,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (snapshotEligible && metadata->bytesInNewDeltaFiles >= SERVER_KNOBS->BG_DELTA_BYTES_BEFORE_COMPACT && !readOldChangeFeed) { if (BW_DEBUG && (inFlightBlobSnapshot.isValid() || !inFlightDeltaFiles.empty())) { - printf("Granule [%s - %s) ready to re-snapshot, waiting for outstanding %d snapshot and %d " + printf("Granule [%s - %s) ready to re-snapshot, waiting for outstanding %d snapshot and %lu " "deltas to " "finish\n", metadata->keyRange.begin.printable().c_str(), @@ -1350,7 +1350,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, inFlightDeltaFiles.clear(); if (BW_DEBUG) { - printf("Granule [%s - %s) checking with BM for re-snapshot after %d bytes\n", + printf("Granule [%s - %s) checking with BM for re-snapshot after %lu bytes\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), metadata->bytesInNewDeltaFiles); @@ -1400,7 +1400,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, } if (BW_DEBUG) { - printf("Granule [%s - %s) re-snapshotting after %d bytes\n", + printf("Granule [%s - %s) re-snapshotting after %lu bytes\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), metadata->bytesInNewDeltaFiles); @@ -1467,7 +1467,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (!rollbacksInProgress.empty()) { ASSERT(rollbacksInProgress.front().first == rollbackVersion); ASSERT(rollbacksInProgress.front().second == deltas.version); - printf("Passed rollback %lld -> %lld\n", deltas.version, rollbackVersion); + printf("Passed rollback %ld -> %ld\n", deltas.version, rollbackVersion); rollbacksCompleted.push_back(rollbacksInProgress.front()); rollbacksInProgress.pop_front(); } else { @@ -1479,13 +1479,13 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, metadata->currentDeltas.back().version <= rollbackVersion)) { if (BW_DEBUG) { - printf("BW skipping rollback %lld -> %lld completely\n", + printf("BW skipping rollback %ld -> %ld completely\n", deltas.version, rollbackVersion); } } else { if (BW_DEBUG) { - printf("BW [%s - %s) ROLLBACK @ %lld -> %lld\n", + printf("BW [%s - %s) ROLLBACK @ %ld -> %ld\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), deltas.version, @@ -1527,7 +1527,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, } else if (!rollbacksInProgress.empty() && rollbacksInProgress.front().first < deltas.version && rollbacksInProgress.front().second > deltas.version) { if (BW_DEBUG) { - printf("Skipping mutations @ %lld b/c prior rollback\n", deltas.version); + printf("Skipping mutations @ %ld b/c prior rollback\n", deltas.version); } } else { for (auto& delta : deltas.mutations) { @@ -1555,7 +1555,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, ASSERT(startState.parentGranule.present()); oldChangeFeedDataComplete = startState.parentGranule.get(); if (BW_DEBUG) { - printf("Granule [%s - %s) switching to new change feed %s @ %lld\n", + printf("Granule [%s - %s) switching to new change feed %s @ %ld\n", metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), startState.granuleID.toString().c_str(), @@ -1676,7 +1676,7 @@ ACTOR Future blobGranuleLoadHistory(Reference bwData, } if (BW_DEBUG) { - printf("Loaded %d history entries for granule [%s - %s) (%d skipped)\n", + printf("Loaded %lu history entries for granule [%s - %s) (%d skipped)\n", historyEntryStack.size(), metadata->keyRange.begin.printable().c_str(), metadata->keyRange.end.printable().c_str(), @@ -1855,7 +1855,7 @@ ACTOR Future handleBlobGranuleFileRequest(Reference bwData } if (BW_REQUEST_DEBUG) { - printf("[%s - %s) @ %lld time traveled back to %s [%s - %s) @ [%lld - %lld)\n", + printf("[%s - %s) @ %ld time traveled back to %s [%s - %s) @ [%ld - %ld)\n", req.keyRange.begin.printable().c_str(), req.keyRange.end.printable().c_str(), req.readVersion, @@ -1894,7 +1894,7 @@ ACTOR Future handleBlobGranuleFileRequest(Reference bwData if (rollbackCount == metadata->rollbackCount.get()) { break; } else if (BW_REQUEST_DEBUG) { - printf("[%s - %s) @ %lld hit rollback, restarting waitForVersion\n", + printf("[%s - %s) @ %ld hit rollback, restarting waitForVersion\n", req.keyRange.begin.printable().c_str(), req.keyRange.end.printable().c_str(), req.readVersion); @@ -2222,7 +2222,7 @@ ACTOR Future changeBlobRange(Reference bwData, bool disposeOnCleanup, bool selfReassign) { if (BW_DEBUG) { - printf("%s range for [%s - %s): %s @ (%lld, %lld)\n", + printf("%s range for [%s - %s): %s @ (%ld, %ld)\n", selfReassign ? "Re-assigning" : "Changing", keyRange.begin.printable().c_str(), keyRange.end.printable().c_str(), @@ -2273,7 +2273,7 @@ ACTOR Future changeBlobRange(Reference bwData, if (r.value().activeMetadata.isValid() && thisAssignmentNewer) { // cancel actors for old range and clear reference if (BW_DEBUG) { - printf(" [%s - %s): @ (%lld, %lld) (cancelling)\n", + printf(" [%s - %s): @ (%ld, %ld) (cancelling)\n", r.begin().printable().c_str(), r.end().printable().c_str(), r.value().lastEpoch, @@ -2298,7 +2298,7 @@ ACTOR Future changeBlobRange(Reference bwData, bwData->granuleMetadata.insert(keyRange, newMetadata); if (BW_DEBUG) { - printf("Inserting new range [%s - %s): %s @ (%lld, %lld)\n", + printf("Inserting new range [%s - %s): %s @ (%ld, %ld)\n", keyRange.begin.printable().c_str(), keyRange.end.printable().c_str(), newMetadata.activeMetadata.isValid() ? "T" : "F", @@ -2308,7 +2308,7 @@ ACTOR Future changeBlobRange(Reference bwData, for (auto& it : newerRanges) { if (BW_DEBUG) { - printf("Re-inserting newer range [%s - %s): %s @ (%lld, %lld)\n", + printf("Re-inserting newer range [%s - %s): %s @ (%ld, %ld)\n", it.first.begin.printable().c_str(), it.first.end.printable().c_str(), it.second.activeMetadata.isValid() ? "T" : "F", @@ -2332,8 +2332,8 @@ static bool resumeBlobRange(Reference bwData, KeyRange keyRange, !existingRange.value().activeMetadata.isValid()) { if (BW_DEBUG) { - printf("BW %s got out of date resume range for [%s - %s) @ (%lld, %lld). Currently [%s - %s) @ (%lld, " - "%lld): %s\n", + printf("BW %s got out of date resume range for [%s - %s) @ (%ld, %ld). Currently [%s - %s) @ (%ld, " + "%ld): %s\n", bwData->id.toString().c_str(), existingRange.begin().printable().c_str(), existingRange.end().printable().c_str(), @@ -2555,7 +2555,7 @@ ACTOR Future blobWorker(BlobWorkerInterface bwInterf, --self->stats.numRangesAssigned; state AssignBlobRangeRequest assignReq = _req; if (BW_DEBUG) { - printf("Worker %s assigned range [%s - %s) @ (%lld, %lld):\n continue=%s\n", + printf("Worker %s assigned range [%s - %s) @ (%ld, %ld):\n continue=%s\n", self->id.toString().c_str(), assignReq.keyRange.begin.printable().c_str(), assignReq.keyRange.end.printable().c_str(), @@ -2574,7 +2574,7 @@ ACTOR Future blobWorker(BlobWorkerInterface bwInterf, state RevokeBlobRangeRequest revokeReq = _req; --self->stats.numRangesAssigned; if (BW_DEBUG) { - printf("Worker %s revoked range [%s - %s) @ (%lld, %lld):\n dispose=%s\n", + printf("Worker %s revoked range [%s - %s) @ (%ld, %ld):\n dispose=%s\n", self->id.toString().c_str(), revokeReq.keyRange.begin.printable().c_str(), revokeReq.keyRange.end.printable().c_str(), diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 9062c20b58..2c812eca2a 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -221,7 +221,9 @@ class TestConfig { } if (attrib == "configureLocked") { - sscanf(value.c_str(), "%d", &configureLocked); + int configureLockedInt; + sscanf(value.c_str(), "%d", &configureLockedInt); + configureLocked = (configureLockedInt != 0); } if (attrib == "startIncompatibleProcess") { @@ -2301,4 +2303,4 @@ ACTOR void setupAndRun(std::string dataFolder, destructed = true; wait(Never()); ASSERT(false); -} \ No newline at end of file +} diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index bec413db23..4462e1a1d5 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -7929,7 +7929,7 @@ TEST_CASE("/redwood/correctness/unit/RedwoodRecordRef") { ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[2] == 6); ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[3] == 8); - printf("sizeof(RedwoodRecordRef) = %d\n", sizeof(RedwoodRecordRef)); + printf("sizeof(RedwoodRecordRef) = %lu\n", sizeof(RedwoodRecordRef)); // Test pageID stuff. { @@ -8862,7 +8862,7 @@ TEST_CASE("/redwood/correctness/unit/deltaTree/IntIntPair") { pos = newPos; } double elapsed = timer() - start; - printf("Seek/skip test, count=%d jumpMax=%d, items=%d, oldSeek=%d useHint=%d: Elapsed %f seconds %.2f M/s\n", + printf("Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds %.2f M/s\n", count, jumpMax, items.size(), @@ -8905,7 +8905,7 @@ TEST_CASE("/redwood/correctness/unit/deltaTree/IntIntPair") { pos = newPos; } double elapsed = timer() - start; - printf("DeltaTree2 Seek/skip test, count=%d jumpMax=%d, items=%d, oldSeek=%d useHint=%d: Elapsed %f seconds " + printf("DeltaTree2 Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds " "%.2f M/s\n", count, jumpMax, @@ -8983,7 +8983,7 @@ TEST_CASE(":/redwood/performance/mutationBuffer") { strings.push_back(randomString(arena, 5)); } - printf("Inserting and then finding each string...\n", count); + printf("Inserting %d elements and then finding each string...\n", count); double start = timer(); VersionedBTree::MutationBuffer m; for (int i = 0; i < count; ++i) { @@ -9254,7 +9254,7 @@ TEST_CASE("/redwood/correctness/btree") { commit = map(btree->commit(version), [=, &ops = totalPageOps, v = version](Void) { // Update pager ops before clearing metrics ops += g_redwoodMetrics.pageOps(); - printf("Committed %s PageOps %" PRId64 "/%" PRId64 " (%.2f%%) VerificationMapEntries %d/%d (%.2f%%)\n", + printf("Committed %s PageOps %" PRId64 "/%" PRId64 " (%.2f%%) VerificationMapEntries %lu/%d (%.2f%%)\n", toString(v).c_str(), ops, targetPageOps, @@ -9508,7 +9508,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { for (v = 1; v <= numEntries; ++v) { // Sometimes do a commit if (currentCommitSize >= targetCommitSize) { - printf("currentCommitSize: %d, cumulativeCommitSize: %d, pageCacheCount: %d\n", + printf("currentCommitSize: %d, cumulativeCommitSize: %ld, pageCacheCount: %ld\n", currentCommitSize, cumulativeCommitSize, pager->getPageCacheCount()); @@ -9531,7 +9531,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { } cumulativeCommitSize += currentCommitSize; printf( - "Final cumulativeCommitSize: %d, pageCacheCount: %d\n", cumulativeCommitSize, pager->getPageCacheCount()); + "Final cumulativeCommitSize: %ld, pageCacheCount: %ld\n", cumulativeCommitSize, pager->getPageCacheCount()); wait(m_extentQueue.flush()); extentQueueState = m_extentQueue.getState(); printf("Commit ExtentQueue getState(): %s\n", extentQueueState.toString().c_str()); @@ -9592,7 +9592,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { entriesRead, cumulativeCommitSize / elapsed / 1e6); - printf("pageCacheCount: %d extentCacheCount: %d\n", pager->getPageCacheCount(), pager->getExtentCacheCount()); + printf("pageCacheCount: %ld extentCacheCount: %ld\n", pager->getPageCacheCount(), pager->getExtentCacheCount()); pager->extentCacheClear(); m_extentQueue.resetHeadReader(); @@ -9985,7 +9985,7 @@ ACTOR Future prefixClusteredInsert(IKeyValueStore* kvs, state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize; state int recordsPerPrefix = recordCountTarget / source.numPrefixes(); - printf("\nstoreType: %d\n", kvs->getType()); + printf("\nstoreType: %d\n", static_cast(kvs->getType())); printf("commitTarget: %d\n", commitTarget); printf("prefixSource: %s\n", source.toString().c_str()); printf("usePrefixesInOrder: %d\n", usePrefixesInOrder); @@ -10074,7 +10074,7 @@ ACTOR Future sequentialInsert(IKeyValueStore* kvs, int prefixLen, int valu state int recordSize = source.prefixLen + sizeof(uint64_t) + valueSize; state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize; - printf("\nstoreType: %d\n", kvs->getType()); + printf("\nstoreType: %d\n", static_cast(kvs->getType())); printf("commitTarget: %d\n", commitTarget); printf("valueSize: %d\n", valueSize); printf("recordSize: %d\n", recordSize); @@ -10208,7 +10208,7 @@ ACTOR Future randomRangeScans(IKeyValueStore* kvs, int recordCountTarget, bool singlePrefix, int rowLimit) { - printf("\nstoreType: %d\n", kvs->getType()); + printf("\nstoreType: %d\n", static_cast(kvs->getType())); printf("prefixSource: %s\n", source.toString().c_str()); printf("suffixSize: %d\n", suffixSize); printf("recordCountTarget: %d\n", recordCountTarget); @@ -10224,7 +10224,7 @@ ACTOR Future randomRangeScans(IKeyValueStore* kvs, state double start = timer(); state std::function stats = [&]() { double elapsed = timer() - start; - printf("Cumulative stats: %.2f seconds %d queries %.2f MB %d records %.2f qps %.2f MB/s %.2f rec/s\r\n", + printf("Cumulative stats: %.2f seconds %d queries %.2f MB %ld records %.2f qps %.2f MB/s %.2f rec/s\r\n", elapsed, queries, bytesRead / 1e6, diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 1889b9525b..41f01b91c9 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -529,7 +529,7 @@ static void printOptionUsage(std::string option, std::string description) { std::stringstream sstream(description); if (sstream.eof()) { - printf(result.c_str()); + printf("%s", result.c_str()); return; } @@ -552,7 +552,7 @@ static void printOptionUsage(std::string option, std::string description) { } result += currLine + '\n'; - printf(result.c_str()); + printf("%s", result.c_str()); } static void printUsage(const char* name, bool devhelp) { diff --git a/fdbserver/networktest.actor.cpp b/fdbserver/networktest.actor.cpp index 9149d6ec8a..654cf617f4 100644 --- a/fdbserver/networktest.actor.cpp +++ b/fdbserver/networktest.actor.cpp @@ -584,7 +584,7 @@ struct P2PNetworkTest { self->startTime = now(); - printf("%d listeners, %d remotes, %d outgoing connections\n", + printf("%lu listeners, %lu remotes, %d outgoing connections\n", self->listeners.size(), self->remotes.size(), self->connectionsOut); diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index 8c98b3cf54..f60dac24c4 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -422,7 +422,7 @@ void printSimulatedTopology() { printf("%smachineId: %s\n", indent.c_str(), p->locality.describeMachineId().c_str()); } indent += " "; - printf("%sAddress: %s\n", indent.c_str(), p->address.toString().c_str(), p->name); + printf("%sAddress: %s\n", indent.c_str(), p->address.toString().c_str()); indent += " "; printf("%sClass: %s\n", indent.c_str(), p->startingClass.toString().c_str()); printf("%sName: %s\n", indent.c_str(), p->name); diff --git a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp index 5b451f3e3e..cb3812074f 100644 --- a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp +++ b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp @@ -237,7 +237,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload { .detail("BlobSize", blob.first.size()); if (BGV_DEBUG) { - printf("\nMismatch for [%s - %s) @ %lld (%s). F(%d) B(%d):\n", + printf("\nMismatch for [%s - %s) @ %ld (%s). F(%d) B(%d):\n", range.begin.printable().c_str(), range.end.printable().c_str(), v, @@ -291,11 +291,11 @@ struct BlobGranuleVerifierWorkload : TestWorkload { } printf(" Deltas: (%d)", chunk.newDeltas.size()); if (chunk.newDeltas.size() > 0) { - printf(" with version [%lld - %lld]", + printf(" with version [%ld - %ld]", chunk.newDeltas[0].version, chunk.newDeltas[chunk.newDeltas.size() - 1].version); } - printf(" IncludedVersion: %lld\n", chunk.includedVersion); + printf(" IncludedVersion: %ld\n", chunk.includedVersion); } printf("\n"); } @@ -416,7 +416,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload { state KeyRange r = range; state PromiseStream> chunkStream; if (BGV_DEBUG) { - printf("Final availability check [%s - %s) @ %lld\n", + printf("Final availability check [%s - %s) @ %ld\n", r.begin.printable().c_str(), r.end.printable().c_str(), readVersion); @@ -435,7 +435,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload { break; } if (BGV_DEBUG) { - printf("BG Verifier failed final availability check for [%s - %s) @ %lld with error %s. Last " + printf("BG Verifier failed final availability check for [%s - %s) @ %ld with error %s. Last " "Success=[%s - %s)\n", r.begin.printable().c_str(), r.end.printable().c_str(), @@ -452,13 +452,13 @@ struct BlobGranuleVerifierWorkload : TestWorkload { printf("Blob Granule Verifier finished with:\n"); printf(" %d successful final granule checks\n", checks); printf(" %d failed final granule checks\n", availabilityPassed ? 0 : 1); - printf(" %lld mismatches\n", self->mismatches); - printf(" %lld time travel too old\n", self->timeTravelTooOld); - printf(" %lld errors\n", self->errors); - printf(" %lld initial reads\n", self->initialReads); - printf(" %lld time travel reads\n", self->timeTravelReads); - printf(" %lld rows\n", self->rowsRead); - printf(" %lld bytes\n", self->bytesRead); + printf(" %ld mismatches\n", self->mismatches); + printf(" %ld time travel too old\n", self->timeTravelTooOld); + printf(" %ld errors\n", self->errors); + printf(" %ld initial reads\n", self->initialReads); + printf(" %ld time travel reads\n", self->timeTravelReads); + printf(" %ld rows\n", self->rowsRead); + printf(" %ld bytes\n", self->bytesRead); // FIXME: add above as details TraceEvent("BlobGranuleVerifierChecked"); return availabilityPassed && self->mismatches == 0 && checks > 0 && self->timeTravelTooOld == 0; diff --git a/fdbserver/workloads/RyowCorrectness.actor.cpp b/fdbserver/workloads/RyowCorrectness.actor.cpp index 2d905b230c..eb68fea0c1 100644 --- a/fdbserver/workloads/RyowCorrectness.actor.cpp +++ b/fdbserver/workloads/RyowCorrectness.actor.cpp @@ -299,14 +299,14 @@ struct RyowCorrectnessWorkload : ApiWorkload { printable(op.beginKey).c_str(), printable(op.endKey).c_str(), op.limit, - op.reverse); + static_cast(op.reverse)); break; case Operation::GET_RANGE_SELECTOR: printf("Operation GET_RANGE_SELECTOR failed: begin = %s, end = %s, limit = %d, reverse = %d\n", op.beginSelector.toString().c_str(), op.endSelector.toString().c_str(), op.limit, - op.reverse); + static_cast(op.reverse)); break; case Operation::GET_KEY: printf("Operation GET_KEY failed: selector = %s\n", op.beginSelector.toString().c_str()); From 67bd4ddea0203bb271bbad44ed2bb36a3ad1146b Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Tue, 2 Nov 2021 16:24:40 -0400 Subject: [PATCH 024/142] Add a wait(delay(0)) to storage server termination to avoid a rare segfault --- fdbserver/storageserver.actor.cpp | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index c7bb89afc6..467793bf2f 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -6110,9 +6110,16 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, // (ClusterController, and from there to the DataDistributionTeamCollection) if (!recruitReply.isSet()) recruitReply.sendError(recruitment_failed()); - if (storageServerTerminated(self, persistentData, e)) + if (e.code() == error_code_actor_cancelled) + throw e; + + // If the storage server dies while something that uses self is still on the stack, + // we want that actor to complete before we terminate and that memory goes out of scope + state Error err = e; + wait(delay(0)); + if (storageServerTerminated(self, persistentData, err)) return Void(); - throw e; + throw err; } } @@ -6320,9 +6327,17 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, } catch (Error& e) { if (recovered.canBeSet()) recovered.send(Void()); - if (storageServerTerminated(self, persistentData, e)) + + if (e.code() == error_code_actor_cancelled) + throw e; + + // If the storage server dies while something that uses self is still on the stack, + // we want that actor to complete before we terminate and that memory goes out of scope + state Error err = e; + wait(delay(0)); + if (storageServerTerminated(self, persistentData, err)) return Void(); - throw e; + throw err; } } From 5767701e1d2a945247b3afa93d77f394f7687a08 Mon Sep 17 00:00:00 2001 From: John Brownlee Date: Tue, 2 Nov 2021 18:53:25 -0700 Subject: [PATCH 025/142] Fix bugs with missing logger. --- fdbkubernetesmonitor/kubernetes.go | 8 +++++--- fdbkubernetesmonitor/main.go | 5 +++-- fdbkubernetesmonitor/monitor.go | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/fdbkubernetesmonitor/kubernetes.go b/fdbkubernetesmonitor/kubernetes.go index dab1f23911..3378f75819 100644 --- a/fdbkubernetesmonitor/kubernetes.go +++ b/fdbkubernetesmonitor/kubernetes.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "os" + "path" "strconv" "github.com/go-logr/logr" @@ -67,7 +68,7 @@ type PodClient struct { } // CreatePodClient creates a new client for working with the pod object. -func CreatePodClient() (*PodClient, error) { +func CreatePodClient(logger logr.Logger) (*PodClient, error) { config, err := rest.InClusterConfig() if err != nil { return nil, err @@ -83,7 +84,7 @@ func CreatePodClient() (*PodClient, error) { return nil, err } - podClient := &PodClient{podApi: podApi, pod: pod, TimestampFeed: make(chan int64, 10)} + podClient := &PodClient{podApi: podApi, pod: pod, TimestampFeed: make(chan int64, 10), Logger: logger} err = podClient.watchPod() if err != nil { return nil, err @@ -112,6 +113,7 @@ func (client *PodClient) UpdateAnnotations(monitor *Monitor) error { for _, argument := range monitor.ActiveConfiguration.Arguments { retrieveEnvironmentVariables(argument, environment) } + environment["BINARY_DIR"] = path.Dir(monitor.ActiveConfiguration.BinaryPath) jsonEnvironment, err := json.Marshal(environment) if err != nil { return err @@ -180,7 +182,7 @@ func (client *PodClient) processPodUpdate(pod *corev1.Pod) { } timestamp, err := strconv.ParseInt(annotation, 10, 64) if err != nil { - client.Logger.Error(err, "Error parsing annotation", "key", OutdatedConfigMapAnnotation, "rawAnnotation", annotation, err) + client.Logger.Error(err, "Error parsing annotation", "key", OutdatedConfigMapAnnotation, "rawAnnotation", annotation) return } diff --git a/fdbkubernetesmonitor/main.go b/fdbkubernetesmonitor/main.go index 821ee13b50..8bc1b57b09 100644 --- a/fdbkubernetesmonitor/main.go +++ b/fdbkubernetesmonitor/main.go @@ -124,9 +124,10 @@ func main() { logger.Error(err, "Error copying files") os.Exit(1) } - done := make(chan bool) - <-done } + logger.Info("Waiting for process to be terminated") + done := make(chan bool) + <-done default: logger.Error(nil, "Unknown execution mode", "mode", mode) os.Exit(1) diff --git a/fdbkubernetesmonitor/monitor.go b/fdbkubernetesmonitor/monitor.go index 2db0a469c9..9ba3542971 100644 --- a/fdbkubernetesmonitor/monitor.go +++ b/fdbkubernetesmonitor/monitor.go @@ -83,7 +83,7 @@ type Monitor struct { // StartMonitor starts the monitor loop. func StartMonitor(logger logr.Logger, configFile string, customEnvironment map[string]string) { - podClient, err := CreatePodClient() + podClient, err := CreatePodClient(logger) if err != nil { panic(err) } From 59f0a2c3e5df81f85f14d2f1e48fefd74c7dfac7 Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Wed, 3 Nov 2021 15:51:21 -0400 Subject: [PATCH 026/142] Change dbinfo broadcast to be explicitly requested by the worker registration message --- fdbserver/ClusterController.actor.cpp | 11 ++++++++--- fdbserver/WorkerInterface.actor.h | 7 +++++-- fdbserver/worker.actor.cpp | 5 +++++ 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 8691e11d73..da208e7a73 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -3876,8 +3876,6 @@ ACTOR Future workerAvailabilityWatch(WorkerInterface worker, : waitFailureClient(worker.waitFailure, SERVER_KNOBS->WORKER_FAILURE_TIME); cluster->updateWorkerList.set(worker.locality.processId(), ProcessData(worker.locality, startingClass, worker.stableAddress())); - cluster->updateDBInfoEndpoints.insert(worker.updateServerDBInfo.getEndpoint()); - cluster->updateDBInfo.trigger(); // This switching avoids a race where the worker can be added to id_worker map after the workerAvailabilityWatch // fails for the worker. wait(delay(0)); @@ -4295,6 +4293,10 @@ void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, Conf self->id_worker[w.locality.processId()].watcher, self->id_worker[w.locality.processId()].details.interf.configBroadcastInterface)); } + if (req.requestDbInfo) { + self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint()); + self->updateDBInfo.trigger(); + } checkOutstandingRequests(self); } else if (info->second.details.interf.id() != w.id() || req.generation >= info->second.gen) { if (!info->second.reply.isSet()) { @@ -4309,10 +4311,13 @@ void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, Conf info->second.issues = req.issues; if (info->second.details.interf.id() != w.id()) { - self->removedDBInfoEndpoints.insert(info->second.details.interf.updateServerDBInfo.getEndpoint()); info->second.details.interf = w; info->second.watcher = workerAvailabilityWatch(w, newProcessClass, self); } + if (req.requestDbInfo) { + self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint()); + self->updateDBInfo.trigger(); + } if (configBroadcaster != nullptr) { self->addActor.send( configBroadcaster->registerWorker(req.lastSeenKnobVersion, diff --git a/fdbserver/WorkerInterface.actor.h b/fdbserver/WorkerInterface.actor.h index 0deedd73c6..84670f0a6f 100644 --- a/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/WorkerInterface.actor.h @@ -413,6 +413,7 @@ struct RegisterWorkerRequest { bool degraded; Version lastSeenKnobVersion; ConfigClassSet knobConfigClassSet; + bool requestDbInfo; RegisterWorkerRequest() : priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown), degraded(false) {} @@ -429,7 +430,8 @@ struct RegisterWorkerRequest { ConfigClassSet knobConfigClassSet) : wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation), distributorInterf(ddInterf), ratekeeperInterf(rkInterf), blobManagerInterf(bmInterf), - degraded(degraded), lastSeenKnobVersion(lastSeenKnobVersion), knobConfigClassSet(knobConfigClassSet) {} + degraded(degraded), lastSeenKnobVersion(lastSeenKnobVersion), knobConfigClassSet(knobConfigClassSet), + requestDbInfo(false) {} template void serialize(Ar& ar) { @@ -447,7 +449,8 @@ struct RegisterWorkerRequest { reply, degraded, lastSeenKnobVersion, - knobConfigClassSet); + knobConfigClassSet, + requestDbInfo); } }; diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index c42fcff082..e14e73d197 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -533,6 +533,7 @@ ACTOR Future registrationClient(Reference cacheProcessFuture; state Future cacheErrorsFuture; state Optional incorrectTime; + state UID lastCCInterfaceId = UID(); loop { state ClusterConnectionString storedConnectionString; state bool upToDate = true; @@ -585,10 +586,14 @@ ACTOR Future registrationClient(Referenceget().present(); + if (ccInterfacePresent) { + request.requestDbInfo = (ccInterface->get().get().id() != lastCCInterfaceId); + } state Future registrationReply = ccInterfacePresent ? brokenPromiseToNever(ccInterface->get().get().registerWorker.getReply(request)) : Never(); state double startTime = now(); + lastCCInterfaceId = ccInterface->get().present() ? ccInterface->get().get().id() : UID(); loop choose { when(RegisterWorkerReply reply = wait(registrationReply)) { processClass = reply.processClass; From 4e8625ccc04b6006ff4f1510ab0ca70ba41a1983 Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Wed, 3 Nov 2021 17:23:07 -0400 Subject: [PATCH 027/142] retain old behaviour along with explicit request --- fdbserver/ClusterController.actor.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index da208e7a73..e513ee4ef4 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -3876,6 +3876,8 @@ ACTOR Future workerAvailabilityWatch(WorkerInterface worker, : waitFailureClient(worker.waitFailure, SERVER_KNOBS->WORKER_FAILURE_TIME); cluster->updateWorkerList.set(worker.locality.processId(), ProcessData(worker.locality, startingClass, worker.stableAddress())); + cluster->updateDBInfoEndpoints.insert(worker.updateServerDBInfo.getEndpoint()); + cluster->updateDBInfo.trigger(); // This switching avoids a race where the worker can be added to id_worker map after the workerAvailabilityWatch // fails for the worker. wait(delay(0)); @@ -4311,6 +4313,7 @@ void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, Conf info->second.issues = req.issues; if (info->second.details.interf.id() != w.id()) { + self->removedDBInfoEndpoints.insert(info->second.details.interf.updateServerDBInfo.getEndpoint()); info->second.details.interf = w; info->second.watcher = workerAvailabilityWatch(w, newProcessClass, self); } From 396cd58b2107310ab67570cad5cf8f3e58d83184 Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Thu, 4 Nov 2021 16:05:23 -0400 Subject: [PATCH 028/142] cancel ss core and ss actor collection after termination and before context switch --- fdbserver/storageserver.actor.cpp | 33 ++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 467793bf2f..0b535fc528 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -6063,6 +6063,7 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, Reference const> db, std::string folder) { state StorageServer self(persistentData, db, ssi); + state Future ssCore; if (ssi.isTss()) { self.setTssPair(ssi.tssPairID.get()); ASSERT(self.isTss()); @@ -6102,7 +6103,8 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, recruitReply.send(rep); self.byteSampleRecovery = Void(); - wait(storageServerCore(&self, ssi)); + ssCore = storageServerCore(&self, ssi); + wait(ssCore); throw internal_error(); } catch (Error& e) { @@ -6110,15 +6112,19 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, // (ClusterController, and from there to the DataDistributionTeamCollection) if (!recruitReply.isSet()) recruitReply.sendError(recruitment_failed()); - if (e.code() == error_code_actor_cancelled) - throw e; // If the storage server dies while something that uses self is still on the stack, // we want that actor to complete before we terminate and that memory goes out of scope state Error err = e; - wait(delay(0)); - if (storageServerTerminated(self, persistentData, err)) + if (storageServerTerminated(self, persistentData, err)) { + ssCore.cancel(); + self.actors.clear(true); + wait(delay(0)); return Void(); + } + ssCore.cancel(); + self.actors.clear(true); + wait(delay(0)); throw err; } } @@ -6267,6 +6273,7 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, Promise recovered, Reference connRecord) { state StorageServer self(persistentData, db, ssi); + state Future ssCore; self.folder = folder; try { @@ -6321,22 +6328,26 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, TraceEvent("StorageServerStartingCore", self.thisServerID).detail("TimeTaken", now() - start); // wait( delay(0) ); // To make sure self->zkMasterInfo.onChanged is available to wait on - wait(storageServerCore(&self, ssi)); + ssCore = storageServerCore(&self, ssi); + wait(ssCore); throw internal_error(); } catch (Error& e) { if (recovered.canBeSet()) recovered.send(Void()); - if (e.code() == error_code_actor_cancelled) - throw e; - // If the storage server dies while something that uses self is still on the stack, // we want that actor to complete before we terminate and that memory goes out of scope state Error err = e; - wait(delay(0)); - if (storageServerTerminated(self, persistentData, err)) + if (storageServerTerminated(self, persistentData, err)) { + ssCore.cancel(); + self.actors.clear(true); + wait(delay(0)); return Void(); + } + ssCore.cancel(); + self.actors.clear(true); + wait(delay(0)); throw err; } } From 00f4bd8536c8a0f1bd203d741c3688750a9ed9b3 Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Mon, 8 Nov 2021 12:43:02 -0500 Subject: [PATCH 029/142] Check ccInterface against serverDbInfo's cc and make broadcast unconditional for first registration --- fdbserver/ClusterController.actor.cpp | 8 ++------ fdbserver/worker.actor.cpp | 10 +++++----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index e513ee4ef4..26dcc5c26d 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -3876,8 +3876,6 @@ ACTOR Future workerAvailabilityWatch(WorkerInterface worker, : waitFailureClient(worker.waitFailure, SERVER_KNOBS->WORKER_FAILURE_TIME); cluster->updateWorkerList.set(worker.locality.processId(), ProcessData(worker.locality, startingClass, worker.stableAddress())); - cluster->updateDBInfoEndpoints.insert(worker.updateServerDBInfo.getEndpoint()); - cluster->updateDBInfo.trigger(); // This switching avoids a race where the worker can be added to id_worker map after the workerAvailabilityWatch // fails for the worker. wait(delay(0)); @@ -4295,10 +4293,8 @@ void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, Conf self->id_worker[w.locality.processId()].watcher, self->id_worker[w.locality.processId()].details.interf.configBroadcastInterface)); } - if (req.requestDbInfo) { - self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint()); - self->updateDBInfo.trigger(); - } + self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint()); + self->updateDBInfo.trigger(); checkOutstandingRequests(self); } else if (info->second.details.interf.id() != w.id() || req.generation >= info->second.gen) { if (!info->second.reply.isSet()) { diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index e14e73d197..a0909a3ef6 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -522,7 +522,8 @@ ACTOR Future registrationClient(Reference const> degraded, Reference connRecord, Reference> const> issues, - Reference localConfig) { + Reference localConfig, + Reference> dbInfo) { // Keeps the cluster controller (as it may be re-elected) informed that this worker exists // The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply // (requiring us to re-register) The registration request piggybacks optional distributor interface if it exists. @@ -533,7 +534,6 @@ ACTOR Future registrationClient(Reference cacheProcessFuture; state Future cacheErrorsFuture; state Optional incorrectTime; - state UID lastCCInterfaceId = UID(); loop { state ClusterConnectionString storedConnectionString; state bool upToDate = true; @@ -587,13 +587,12 @@ ACTOR Future registrationClient(Referenceget().present(); if (ccInterfacePresent) { - request.requestDbInfo = (ccInterface->get().get().id() != lastCCInterfaceId); + request.requestDbInfo = (ccInterface->get().get().id() != dbInfo->get().clusterInterface.id()); } state Future registrationReply = ccInterfacePresent ? brokenPromiseToNever(ccInterface->get().get().registerWorker.getReply(request)) : Never(); state double startTime = now(); - lastCCInterfaceId = ccInterface->get().present() ? ccInterface->get().get().id() : UID(); loop choose { when(RegisterWorkerReply reply = wait(registrationReply)) { processClass = reply.processClass; @@ -1614,7 +1613,8 @@ ACTOR Future workerServer(Reference connRecord, degraded, connRecord, issues, - localConfig)); + localConfig, + dbInfo)); if (configDBType != ConfigDBType::DISABLED) { errorForwarders.add(localConfig->consume(interf.configBroadcastInterface)); From 5e4f25f96d820d5898063c03e0fd92d59a3ecd50 Mon Sep 17 00:00:00 2001 From: Ata E Husain Bohra Date: Mon, 8 Nov 2021 16:18:17 -0800 Subject: [PATCH 030/142] Improve tester actor logging to track workload run & check status Patch improves logging in "tester.actor" to assist better tracking of workload run & check status --- fdbserver/tester.actor.cpp | 42 ++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index 8c98b3cf54..f9c9f51814 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -284,15 +284,49 @@ struct CompoundWorkload : TestWorkload { Future start(Database const& cx) override { std::vector> all; all.reserve(workloads.size()); - for (int w = 0; w < workloads.size(); w++) - all.push_back(workloads[w]->start(cx)); + auto wCount = std::make_shared(0); + for (int i = 0; i < workloads.size(); i++) { + std::string workloadName = workloads[i]->description(); + ++(*wCount); + TraceEvent("WorkloadRunStatus") + .detail("Name", workloadName) + .detail("Count", *wCount) + .detail("Phase", "Start"); + all.push_back(fmap( + [workloadName, wCount](Void value) { + --(*wCount); + TraceEvent("WorkloadRunStatus") + .detail("Name", workloadName) + .detail("Remaining", *wCount) + .detail("Phase", "End"); + return Void(); + }, + workloads[i]->start(cx))); + } return waitForAll(all); } Future check(Database const& cx) override { std::vector> all; all.reserve(workloads.size()); - for (int w = 0; w < workloads.size(); w++) - all.push_back(workloads[w]->check(cx)); + auto wCount = std::make_shared(0); + for (int i = 0; i < workloads.size(); i++) { + ++(*wCount); + std::string workloadName = workloads[i]->description(); + TraceEvent("WorkloadCheckStatus") + .detail("Name", workloadName) + .detail("Count", *wCount) + .detail("Phase", "Start"); + all.push_back(fmap( + [workloadName, wCount](bool ret) { + --(*wCount); + TraceEvent("WorkloadCheckStatus") + .detail("Name", workloadName) + .detail("Remaining", *wCount) + .detail("Phase", "End"); + return true; + }, + workloads[i]->check(cx))); + } return allTrue(all); } void getMetrics(std::vector& m) override { From 6cf0f8121da70768df9b5095d4c5538011ee8777 Mon Sep 17 00:00:00 2001 From: QA Hoang Date: Mon, 8 Nov 2021 17:54:32 -0800 Subject: [PATCH 031/142] remove mako temp shared memory file --- bindings/c/test/mako/mako.c | 1 + 1 file changed, 1 insertion(+) diff --git a/bindings/c/test/mako/mako.c b/bindings/c/test/mako/mako.c index 3cbbd7d50f..050d1279fb 100644 --- a/bindings/c/test/mako/mako.c +++ b/bindings/c/test/mako/mako.c @@ -2818,6 +2818,7 @@ failExit: if (shmfd) { close(shmfd); shm_unlink(shmpath); + unlink(shmpath); } return 0; From 2887e1c30ababfaf280d1979f243f3ba34f712ec Mon Sep 17 00:00:00 2001 From: Jon Fu Date: Tue, 9 Nov 2021 12:44:07 -0500 Subject: [PATCH 032/142] set flag to true when doing first registration --- fdbserver/worker.actor.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index a0909a3ef6..8652a8c8cc 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -534,6 +534,7 @@ ACTOR Future registrationClient(Reference cacheProcessFuture; state Future cacheErrorsFuture; state Optional incorrectTime; + state bool firstReg = true; loop { state ClusterConnectionString storedConnectionString; state bool upToDate = true; @@ -588,6 +589,10 @@ ACTOR Future registrationClient(Referenceget().present(); if (ccInterfacePresent) { request.requestDbInfo = (ccInterface->get().get().id() != dbInfo->get().clusterInterface.id()); + if (firstReg) { + request.requestDbInfo = true; + firstReg = false; + } } state Future registrationReply = ccInterfacePresent ? brokenPromiseToNever(ccInterface->get().get().registerWorker.getReply(request)) From 8fac798ece05816d6dcdddc62914e374caded34b Mon Sep 17 00:00:00 2001 From: negoyal Date: Tue, 9 Nov 2021 10:32:17 -0800 Subject: [PATCH 033/142] Fix a memory bug. --- fdbrpc/AsyncFileChaos.actor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbrpc/AsyncFileChaos.actor.h b/fdbrpc/AsyncFileChaos.actor.h index 7f61ab36bb..2e72a826e2 100644 --- a/fdbrpc/AsyncFileChaos.actor.h +++ b/fdbrpc/AsyncFileChaos.actor.h @@ -113,7 +113,7 @@ public: // Wait for diskDelay before submitting the I/O return mapAsync(Void)>, Void>(delay(diskDelay), [=](Void _) -> Future { if (pdata) - return holdWhile(pdata, file->write(pdata, length, offset)); + return holdWhile(arena, file->write(pdata, length, offset)); return file->write(data, length, offset); }); From 30867750b5ee29eab7901bdbef113d7c6daa0165 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Tue, 21 Sep 2021 11:51:20 -0700 Subject: [PATCH 034/142] Add protection against storage and tlog data deletion when joining a new cluster --- fdbclient/SystemData.cpp | 2 + fdbclient/SystemData.h | 2 + fdbserver/ApplyMetadataMutation.cpp | 2 +- fdbserver/ClusterController.actor.cpp | 6 +- fdbserver/DataDistribution.actor.cpp | 17 +++ fdbserver/LogSystem.h | 1 + fdbserver/OldTLogServer_6_0.actor.cpp | 1 + fdbserver/OldTLogServer_6_2.actor.cpp | 1 + fdbserver/TLogServer.actor.cpp | 138 +++++++++++++++----- fdbserver/TagPartitionedLogSystem.actor.cpp | 17 ++- fdbserver/TagPartitionedLogSystem.actor.h | 5 +- fdbserver/WorkerInterface.actor.h | 11 +- fdbserver/masterserver.actor.cpp | 35 ++++- fdbserver/storageserver.actor.cpp | 85 +++++++++++- fdbserver/worker.actor.cpp | 4 + flow/error_definitions.h | 1 + 16 files changed, 277 insertions(+), 51 deletions(-) diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index 4de1b4aeb6..a256356c82 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -213,6 +213,8 @@ const KeyRangeRef writeConflictRangeKeysRange = KeyRangeRef(LiteralStringRef("\xff\xff/transaction/write_conflict_range/"), LiteralStringRef("\xff\xff/transaction/write_conflict_range/\xff\xff")); +const KeyRef clusterIdKey = LiteralStringRef("\xff/clusterId"); + // "\xff/cacheServer/[[UID]] := StorageServerInterface" const KeyRangeRef storageCacheServerKeys(LiteralStringRef("\xff/cacheServer/"), LiteralStringRef("\xff/cacheServer0")); const KeyRef storageCacheServersPrefix = storageCacheServerKeys.begin; diff --git a/fdbclient/SystemData.h b/fdbclient/SystemData.h index 4b9c7a22f5..4cf8f1a49d 100644 --- a/fdbclient/SystemData.h +++ b/fdbclient/SystemData.h @@ -67,6 +67,8 @@ void decodeKeyServersValue(std::map const& tag_uid, std::vector& src, std::vector& dest); +extern const KeyRef clusterIdKey; + // "\xff/storageCacheServer/[[UID]] := StorageServerInterface" // This will be added by the cache server on initialization and removed by DD // TODO[mpilman]: We will need a way to map uint16_t ids to UIDs in a future diff --git a/fdbserver/ApplyMetadataMutation.cpp b/fdbserver/ApplyMetadataMutation.cpp index 5a720ca594..47871915f9 100644 --- a/fdbserver/ApplyMetadataMutation.cpp +++ b/fdbserver/ApplyMetadataMutation.cpp @@ -543,7 +543,7 @@ private: m.param1.startsWith(applyMutationsAddPrefixRange.begin) || m.param1.startsWith(applyMutationsRemovePrefixRange.begin) || m.param1.startsWith(tagLocalityListPrefix) || m.param1.startsWith(serverTagHistoryPrefix) || - m.param1.startsWith(testOnlyTxnStateStorePrefixRange.begin)) { + m.param1.startsWith(testOnlyTxnStateStorePrefixRange.begin) || m.param1 == clusterIdKey) { txnStateStore->set(KeyValueRef(m.param1, m.param2)); } diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 8691e11d73..c06285aa5f 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -3410,6 +3410,10 @@ ACTOR Future clusterWatchDatabase(ClusterControllerData* cluster, ClusterC .detail("ChangeID", dbInfo.id); db->serverInfo->set(dbInfo); + // TODO: CC should store cluster ID + // UID clusterId = wait(getClusterId(cluster)); + // cluster->clusterId = clusterId; + state Future spinDelay = delay( SERVER_KNOBS ->MASTER_SPIN_DELAY); // Don't retry master recovery more than once per second, but don't delay @@ -3438,7 +3442,7 @@ ACTOR Future clusterWatchDatabase(ClusterControllerData* cluster, ClusterC TEST(true); // clusterWatchDatabase() master failed TraceEvent(SevWarn, "DetectedFailedMaster", cluster->id).detail("OldMaster", iMaster.id()); } else { - TEST(true); // clusterWatchDatabas() !newMaster.present() + TEST(true); // clusterWatchDatabase() !newMaster.present() wait(delay(SERVER_KNOBS->MASTER_SPIN_DELAY)); } } catch (Error& e) { diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index 84f13137a7..16e1b42387 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -5261,6 +5261,19 @@ struct TSSPairState : ReferenceCounted, NonCopyable { Future waitComplete() { return complete.getFuture(); } }; +ACTOR Future getClusterId(DDTeamCollection* self) { + state Transaction tr(self->cx); + loop { + try { + Optional clusterId = wait(tr.get(clusterIdKey, Snapshot::False)); + ASSERT(clusterId.present()); + return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + } catch (Error& e) { + wait(tr.onError(e)); + } + } +} + ACTOR Future initializeStorage(DDTeamCollection* self, RecruitStorageReply candidateWorker, const DDEnabledState* ddEnabledState, @@ -5278,12 +5291,16 @@ ACTOR Future initializeStorage(DDTeamCollection* self, // Ask the candidateWorker to initialize a SS only if the worker does not have a pending request state UID interfaceId = deterministicRandom()->randomUniqueID(); + // TODO: Move to CC + UID clusterId = wait(getClusterId(self)); + state InitializeStorageRequest isr; isr.storeType = recruitTss ? self->configuration.testingStorageServerStoreType : self->configuration.storageServerStoreType; isr.seedTag = invalidTag; isr.reqId = deterministicRandom()->randomUniqueID(); isr.interfaceId = interfaceId; + isr.clusterId = clusterId; self->recruitingIds.insert(interfaceId); self->recruitingLocalities.insert(candidateWorker.worker.stableAddress()); diff --git a/fdbserver/LogSystem.h b/fdbserver/LogSystem.h index 557bab5ffc..55a493d951 100644 --- a/fdbserver/LogSystem.h +++ b/fdbserver/LogSystem.h @@ -637,6 +637,7 @@ struct ILogSystem { virtual Future> newEpoch( RecruitFromConfigurationReply const& recr, Future const& fRemoteWorkers, + UID clusterId, DatabaseConfiguration const& config, LogEpoch recoveryCount, int8_t primaryLocality, diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index de3fb42a5e..5551413939 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -2776,6 +2776,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, diff --git a/fdbserver/OldTLogServer_6_2.actor.cpp b/fdbserver/OldTLogServer_6_2.actor.cpp index 4893a5da03..db53a4ee8e 100644 --- a/fdbserver/OldTLogServer_6_2.actor.cpp +++ b/fdbserver/OldTLogServer_6_2.actor.cpp @@ -3267,6 +3267,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index c487fe15da..9d5d5a93c5 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -26,6 +26,7 @@ #include "fdbclient/RunTransaction.actor.h" #include "fdbclient/SystemData.h" #include "fdbclient/FDBTypes.h" +#include "fdbclient/ManagementAPI.actor.h" #include "fdbserver/WorkerInterface.actor.h" #include "fdbserver/LogProtocolMessage.h" #include "fdbserver/SpanContextMessage.h" @@ -225,6 +226,8 @@ static const KeyRange persistTagMessagesKeys = prefixRange(LiteralStringRef("Tag static const KeyRange persistTagMessageRefsKeys = prefixRange(LiteralStringRef("TagMsgRef/")); static const KeyRange persistTagPoppedKeys = prefixRange(LiteralStringRef("TagPop/")); +static const KeyRef persistClusterIdKey = LiteralStringRef("clusterId"); + static Key persistTagMessagesKey(UID id, Tag tag, Version version) { BinaryWriter wr(Unversioned()); wr.serializeBytes(persistTagMessagesKeys.begin); @@ -312,6 +315,7 @@ struct TLogData : NonCopyable { Deque spillOrder; std::map> id_data; + UID clusterId; UID dbgid; UID workerID; @@ -2807,6 +2811,7 @@ ACTOR Future restorePersistentState(TLogData* self, wait(storage->init()); state Future> fFormat = storage->readValue(persistFormat.key); state Future> fRecoveryLocation = storage->readValue(persistRecoveryLocationKey); + state Future> fClusterId = storage->readValue(persistClusterIdKey); state Future fVers = storage->readRange(persistCurrentVersionKeys); state Future fKnownCommitted = storage->readRange(persistKnownCommittedVersionKeys); state Future fLocality = storage->readRange(persistLocalityKeys); @@ -2818,7 +2823,7 @@ ACTOR Future restorePersistentState(TLogData* self, // FIXME: metadata in queue? - wait(waitForAll(std::vector{ fFormat, fRecoveryLocation })); + wait(waitForAll(std::vector{ fFormat, fRecoveryLocation, fClusterId })); wait(waitForAll(std::vector{ fVers, fKnownCommitted, fLocality, @@ -2828,6 +2833,10 @@ ACTOR Future restorePersistentState(TLogData* self, fProtocolVersions, fTLogSpillTypes })); + if (fClusterId.get().present()) { + self->clusterId = BinaryReader::fromStringRef(fClusterId.get().get(), Unversioned()); + } + if (fFormat.get().present() && !persistFormatReadableRange.contains(fFormat.get().get())) { // FIXME: remove when we no longer need to test upgrades from 4.X releases if (g_network->isSimulated()) { @@ -3334,12 +3343,29 @@ ACTOR Future startSpillingInTenSeconds(TLogData* self, UID tlogId, Referen return Void(); } +ACTOR Future getClusterId(TLogData* self) { + state Transaction tr(self->cx); + loop { + try { + Optional clusterId = wait(tr.get(clusterIdKey)); + if (clusterId.present()) { + return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + } else { + return UID(); + } + } catch (Error& e) { + wait(tr.onError(e)); + } + } +} + // New tLog (if !recoverFrom.size()) or restore from network ACTOR Future tLog(IKeyValueStore* persistentData, IDiskQueue* persistentQueue, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -3353,48 +3379,90 @@ ACTOR Future tLog(IKeyValueStore* persistentData, TraceEvent("SharedTlog", tlogId).log(); try { - if (restoreFromDisk) { - wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests)); - } else { - wait(ioTimeoutError(checkEmptyQueue(&self) && checkRecovered(&self), - SERVER_KNOBS->TLOG_MAX_CREATE_DURATION)); - } + try { + if (restoreFromDisk) { + wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests)); + } else { + wait(ioTimeoutError(checkEmptyQueue(&self) && checkRecovered(&self), + SERVER_KNOBS->TLOG_MAX_CREATE_DURATION)); + } - // Disk errors need a chance to kill this actor. - wait(delay(0.000001)); + // Disk errors need a chance to kill this actor. + wait(delay(0.000001)); - if (recovered.canBeSet()) - recovered.send(Void()); + if (recovered.canBeSet()) + recovered.send(Void()); - self.sharedActors.send(commitQueue(&self)); - self.sharedActors.send(updateStorageLoop(&self)); - self.sharedActors.send(traceRole(Role::SHARED_TRANSACTION_LOG, tlogId)); - state Future activeSharedChange = Void(); + self.sharedActors.send(commitQueue(&self)); + self.sharedActors.send(updateStorageLoop(&self)); + self.sharedActors.send(traceRole(Role::SHARED_TRANSACTION_LOG, tlogId)); + state Future activeSharedChange = Void(); - loop { - choose { - when(InitializeTLogRequest req = waitNext(tlogRequests.getFuture())) { - if (!self.tlogCache.exists(req.recruitmentID)) { - self.tlogCache.set(req.recruitmentID, req.reply.getFuture()); - self.sharedActors.send( - self.tlogCache.removeOnReady(req.recruitmentID, tLogStart(&self, req, locality))); - } else { - forwardPromise(req.reply, self.tlogCache.get(req.recruitmentID)); + loop { + choose { + when(state InitializeTLogRequest req = waitNext(tlogRequests.getFuture())) { + ASSERT(req.clusterId.isValid()); + // Disallow recruitment of this TLog into a new + // cluster. To prevent accidental data loss, an + // operator must first manually clear the data files on + // the TLog before adding it to a new cluster. + if (self.clusterId.isValid() && self.clusterId != req.clusterId) { + throw worker_removed(); + } + + if (!self.clusterId.isValid()) { + self.clusterId = req.clusterId; + // Will let commit loop durably write the cluster ID. + self.persistentData->set( + KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(req.clusterId, Unversioned()))); + } + + if (!self.tlogCache.exists(req.recruitmentID)) { + self.tlogCache.set(req.recruitmentID, req.reply.getFuture()); + self.sharedActors.send( + self.tlogCache.removeOnReady(req.recruitmentID, tLogStart(&self, req, locality))); + } else { + forwardPromise(req.reply, self.tlogCache.get(req.recruitmentID)); + } } - } - when(wait(error)) { throw internal_error(); } - when(wait(activeSharedChange)) { - if (activeSharedTLog->get() == tlogId) { - TraceEvent("SharedTLogNowActive", self.dbgid).detail("NowActive", activeSharedTLog->get()); - self.targetVolatileBytes = SERVER_KNOBS->TLOG_SPILL_THRESHOLD; - } else { - stopAllTLogs(&self, tlogId); - TraceEvent("SharedTLogQueueSpilling", self.dbgid).detail("NowActive", activeSharedTLog->get()); - self.sharedActors.send(startSpillingInTenSeconds(&self, tlogId, activeSharedTLog)); + when(wait(error)) { throw internal_error(); } + when(wait(activeSharedChange)) { + if (activeSharedTLog->get() == tlogId) { + TraceEvent("SharedTLogNowActive", self.dbgid).detail("NowActive", activeSharedTLog->get()); + self.targetVolatileBytes = SERVER_KNOBS->TLOG_SPILL_THRESHOLD; + } else { + stopAllTLogs(&self, tlogId); + TraceEvent("SharedTLogQueueSpilling", self.dbgid) + .detail("NowActive", activeSharedTLog->get()); + self.sharedActors.send(startSpillingInTenSeconds(&self, tlogId, activeSharedTLog)); + } + activeSharedChange = activeSharedTLog->onChange(); } - activeSharedChange = activeSharedTLog->onChange(); } } + } catch (Error& e) { + if (e.code() != error_code_worker_removed) { + throw; + } + // It's possible to have an invalid cluster ID when restoring from + // disk if this is an upgrade from an FDB version that did not + // support cluster IDs. + ASSERT(self.clusterId.isValid() || (!self.clusterId.isValid() && restoreFromDisk)); + state UID fetchedClusterId; + if (!clusterId.present()) { + // TODO: This hangs sometimes... + UID tmpClusterId = wait(getClusterId(&self)); + fetchedClusterId = tmpClusterId; + } else { + fetchedClusterId = clusterId.get(); + } + if (!fetchedClusterId.isValid() || fetchedClusterId == self.clusterId) { + throw worker_removed(); + } + // Exclude the TLog to prevent its data files from being deleted. + NetworkAddress address = g_network->getLocalAddress(); + wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + throw invalid_cluster_id(); } } catch (Error& e) { self.terminated.send(Void()); diff --git a/fdbserver/TagPartitionedLogSystem.actor.cpp b/fdbserver/TagPartitionedLogSystem.actor.cpp index 6b6fd140cc..96cc285272 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.cpp +++ b/fdbserver/TagPartitionedLogSystem.actor.cpp @@ -1574,6 +1574,7 @@ Future TagPartitionedLogSystem::endEpoch() { Future> TagPartitionedLogSystem::newEpoch( RecruitFromConfigurationReply const& recr, Future const& fRemoteWorkers, + UID clusterId, DatabaseConfiguration const& config, LogEpoch recoveryCount, int8_t primaryLocality, @@ -1583,6 +1584,7 @@ Future> TagPartitionedLogSystem::newEpoch( return newEpoch(Reference::addRef(this), recr, fRemoteWorkers, + clusterId, config, recoveryCount, primaryLocality, @@ -2437,6 +2439,7 @@ std::vector TagPartitionedLogSystem::getLocalTags(int8_t locality, const st ACTOR Future TagPartitionedLogSystem::newRemoteEpoch(TagPartitionedLogSystem* self, Reference oldLogSystem, Future fRemoteWorkers, + UID clusterId, DatabaseConfiguration configuration, LogEpoch recoveryCount, int8_t remoteLocality, @@ -2576,6 +2579,7 @@ ACTOR Future TagPartitionedLogSystem::newRemoteEpoch(TagPartitionedLogSyst req.startVersion = logSet->startVersion; req.logRouterTags = 0; req.txsTags = self->txsTags; + req.clusterId = clusterId; } remoteTLogInitializationReplies.reserve(remoteWorkers.remoteTLogs.size()); @@ -2624,6 +2628,7 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( Reference oldLogSystem, RecruitFromConfigurationReply recr, Future fRemoteWorkers, + UID clusterId, DatabaseConfiguration configuration, LogEpoch recoveryCount, int8_t primaryLocality, @@ -2844,6 +2849,7 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( req.startVersion = logSystem->tLogs[0]->startVersion; req.logRouterTags = logSystem->logRouterTags; req.txsTags = logSystem->txsTags; + req.clusterId = clusterId; } initializationReplies.reserve(recr.tLogs.size()); @@ -2910,6 +2916,7 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( req.startVersion = oldLogSystem->knownCommittedVersion + 1; req.logRouterTags = logSystem->logRouterTags; req.txsTags = logSystem->txsTags; + req.clusterId = clusterId; } satelliteInitializationReplies.reserve(recr.satelliteTLogs.size()); @@ -2961,8 +2968,14 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( if (configuration.usableRegions > 1) { logSystem->hasRemoteServers = true; - logSystem->remoteRecovery = TagPartitionedLogSystem::newRemoteEpoch( - logSystem.getPtr(), oldLogSystem, fRemoteWorkers, configuration, recoveryCount, remoteLocality, allTags); + logSystem->remoteRecovery = TagPartitionedLogSystem::newRemoteEpoch(logSystem.getPtr(), + oldLogSystem, + fRemoteWorkers, + clusterId, + configuration, + recoveryCount, + remoteLocality, + allTags); if (oldLogSystem->tLogs.size() > 0 && oldLogSystem->tLogs[0]->locality == tagLocalitySpecial) { // The wait is required so that we know both primary logs and remote logs have copied the data between // the known committed version and the recovery version. diff --git a/fdbserver/TagPartitionedLogSystem.actor.h b/fdbserver/TagPartitionedLogSystem.actor.h index dfcfa83f1a..628dcd3647 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.h +++ b/fdbserver/TagPartitionedLogSystem.actor.h @@ -263,6 +263,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted> newEpoch(RecruitFromConfigurationReply const& recr, Future const& fRemoteWorkers, + UID clusterId, DatabaseConfiguration const& config, LogEpoch recoveryCount, int8_t primaryLocality, @@ -342,6 +343,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted newRemoteEpoch(TagPartitionedLogSystem* self, Reference oldLogSystem, Future fRemoteWorkers, + UID clusterId, DatabaseConfiguration configuration, LogEpoch recoveryCount, int8_t remoteLocality, @@ -350,6 +352,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted> newEpoch(Reference oldLogSystem, RecruitFromConfigurationReply recr, Future fRemoteWorkers, + UID clusterId, DatabaseConfiguration configuration, LogEpoch recoveryCount, int8_t primaryLocality, @@ -380,4 +383,4 @@ std::vector TagPartitionedLogSystem::getReadyNonError(std::vector> } #include "flow/unactorcompiler.h" -#endif // FDBSERVER_TAGPARTITIONEDLOGSYSTEM_ACTOR_H \ No newline at end of file +#endif // FDBSERVER_TAGPARTITIONEDLOGSYSTEM_ACTOR_H diff --git a/fdbserver/WorkerInterface.actor.h b/fdbserver/WorkerInterface.actor.h index 0deedd73c6..4a1cd6938e 100644 --- a/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/WorkerInterface.actor.h @@ -499,6 +499,7 @@ struct InitializeTLogRequest { Version startVersion; int logRouterTags; int txsTags; + UID clusterId; ReplyPromise reply; @@ -523,7 +524,8 @@ struct InitializeTLogRequest { reply, logVersion, spillType, - txsTags); + txsTags, + clusterId); } }; @@ -693,11 +695,12 @@ struct InitializeStorageRequest { KeyValueStoreType storeType; Optional> tssPairIDAndVersion; // Only set if recruiting a tss. Will be the UID and Version of its SS pair. + UID clusterId; // Unique cluster identifier. Only needed at recruitment, will be read from txnStateStore on recovery ReplyPromise reply; template void serialize(Ar& ar) { - serializer(ar, seedTag, reqId, interfaceId, storeType, reply, tssPairIDAndVersion); + serializer(ar, seedTag, reqId, interfaceId, storeType, reply, tssPairIDAndVersion, clusterId); } }; @@ -992,6 +995,7 @@ class IDiskQueue; ACTOR Future storageServer(IKeyValueStore* persistentData, StorageServerInterface ssi, Tag seedTag, + UID clusterId, Version tssSeedVersion, ReplyPromise recruitReply, Reference const> db, @@ -1022,6 +1026,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -1067,6 +1072,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -1082,6 +1088,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, + Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index 292a758f42..f68c6ac403 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -216,6 +216,7 @@ struct MasterData : NonCopyable, ReferenceCounted { std::map lastCommitProxyVersionReplies; + UID clusterId; Standalone dbId; MasterInterface myInterface; @@ -402,6 +403,7 @@ ACTOR Future newTLogServers(Reference self, self->logSystem = Reference(); // Cancels the actors in the previous log system. Reference newLogSystem = wait(oldLogSystem->newEpoch(recr, fRemoteWorkers, + self->clusterId, self->configuration, self->cstate.myDBState.recoveryCount + 1, self->primaryLocality, @@ -414,6 +416,7 @@ ACTOR Future newTLogServers(Reference self, self->logSystem = Reference(); // Cancels the actors in the previous log system. Reference newLogSystem = wait(oldLogSystem->newEpoch(recr, Never(), + self->clusterId, self->configuration, self->cstate.myDBState.recoveryCount + 1, self->primaryLocality, @@ -447,6 +450,7 @@ ACTOR Future newSeedServers(Reference self, isr.storeType = self->configuration.storageServerStoreType; isr.reqId = deterministicRandom()->randomUniqueID(); isr.interfaceId = deterministicRandom()->randomUniqueID(); + isr.clusterId = self->clusterId; ErrorOr newServer = wait(recruits.storageServers[idx].storage.tryGetReply(isr)); @@ -1053,7 +1057,8 @@ ACTOR Future recoverFrom(Reference self, Reference oldLogSystem, std::vector* seedServers, std::vector>* initialConfChanges, - Future poppedTxsVersion) { + Future poppedTxsVersion, + bool* clusterIdExists) { TraceEvent("MasterRecoveryState", self->dbgid) .detail("StatusCode", RecoveryStatus::reading_transaction_system_state) .detail("Status", RecoveryStatus::names[RecoveryStatus::reading_transaction_system_state]) @@ -1077,6 +1082,16 @@ ACTOR Future recoverFrom(Reference self, debug_checkMaxRestoredVersion(UID(), self->lastEpochEnd, "DBRecovery"); + // Generate a cluster ID to uniquely identify the cluster if it doesn't + // already exist in the txnStateStore. + Optional clusterId = self->txnStateStore->readValue(clusterIdKey).get(); + *clusterIdExists = clusterId.present(); + if (!clusterId.present()) { + self->clusterId = deterministicRandom()->randomUniqueID(); + } else { + self->clusterId = BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + } + // Ordinarily we pass through this loop once and recover. We go around the loop if recovery stalls for more than a // second, a provisional master is initialized, and an "emergency transaction" is submitted that might change the // configuration so that we can finish recovery. @@ -1490,6 +1505,7 @@ ACTOR Future trackTlogRecovery(Reference self, .detail("StatusCode", RecoveryStatus::fully_recovered) .detail("Status", RecoveryStatus::names[RecoveryStatus::fully_recovered]) .detail("FullyRecoveredAtVersion", self->version) + .detail("ClusterId", self->clusterId) .trackLatest(self->masterRecoveryStateEventHolder->trackingKey); TraceEvent("MasterRecoveryGenerations", self->dbgid) @@ -1757,6 +1773,7 @@ ACTOR Future masterCore(Reference self) { state Future logChanges; state Future minRecoveryDuration; state Future poppedTxsVersion; + state bool clusterIdExists = false; loop { Reference oldLogSystem = oldLogSystems->get(); @@ -1772,9 +1789,13 @@ ACTOR Future masterCore(Reference self) { self->registrationTrigger.trigger(); choose { - when(wait(oldLogSystem - ? recoverFrom(self, oldLogSystem, &seedServers, &initialConfChanges, poppedTxsVersion) - : Never())) { + when(wait(oldLogSystem ? recoverFrom(self, + oldLogSystem, + &seedServers, + &initialConfChanges, + poppedTxsVersion, + std::addressof(clusterIdExists)) + : Never())) { reg.cancel(); break; } @@ -1803,6 +1824,7 @@ ACTOR Future masterCore(Reference self) { .detail("Status", RecoveryStatus::names[RecoveryStatus::recovery_transaction]) .detail("PrimaryLocality", self->primaryLocality) .detail("DcId", self->myInterface.locality.dcId()) + .detail("ClusterId", self->clusterId) .trackLatest(self->masterRecoveryStateEventHolder->trackingKey); // Recovery transaction @@ -1883,6 +1905,11 @@ ACTOR Future masterCore(Reference self) { } } + // Write cluster ID into txnStateStore if it is missing. + if (!clusterIdExists) { + tr.set(recoveryCommitRequest.arena, clusterIdKey, BinaryWriter::toValue(self->clusterId, Unversioned())); + } + applyMetadataMutations(SpanID(), self->dbgid, recoveryCommitRequest.arena, diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index c7bb89afc6..b64fff0fbf 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -643,6 +643,7 @@ public: Reference logSystem; Reference logCursor; + UID clusterId; UID thisServerID; Optional tssPairID; // if this server is a tss, this is the id of its (ss) pair Optional ssPairID; // if this server is an ss, this is the id of its (tss) pair @@ -3270,6 +3271,7 @@ static const KeyRangeRef persistFormatReadableRange(LiteralStringRef("Foundation static const KeyRef persistID = LiteralStringRef(PERSIST_PREFIX "ID"); static const KeyRef persistTssPairID = LiteralStringRef(PERSIST_PREFIX "tssPairID"); static const KeyRef persistTssQuarantine = LiteralStringRef(PERSIST_PREFIX "tssQ"); +static const KeyRef persistClusterIdKey = LiteralStringRef(PERSIST_PREFIX "clusterId"); // (Potentially) change with the durable version or when fetchKeys completes static const KeyRef persistVersion = LiteralStringRef(PERSIST_PREFIX "Version"); @@ -4951,6 +4953,8 @@ void StorageServerDisk::makeNewStorageServerDurable() { if (data->tssPairID.present()) { storage->set(KeyValueRef(persistTssPairID, BinaryWriter::toValue(data->tssPairID.get(), Unversioned()))); } + ASSERT(data->clusterId.isValid()); + storage->set(KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(data->clusterId, Unversioned()))); storage->set(KeyValueRef(persistVersion, BinaryWriter::toValue(data->version.get(), Unversioned()))); storage->set(KeyValueRef(persistShardAssignedKeys.begin.toString(), LiteralStringRef("0"))); storage->set(KeyValueRef(persistShardAvailableKeys.begin.toString(), LiteralStringRef("0"))); @@ -5180,9 +5184,50 @@ ACTOR Future restoreByteSample(StorageServer* data, return Void(); } +// Reads the cluster ID from the transaction state store. +ACTOR Future getClusterId(StorageServer* self) { + state Transaction tr(self->cx); + loop { + try { + Optional clusterId = wait(tr.get(clusterIdKey)); + ASSERT(clusterId.present()); + return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + } catch (Error& e) { + wait(tr.onError(e)); + } + } +} + +// Read the cluster ID from the transaction state store and persist it to local +// storage. This function should only be necessary during an upgrade when the +// prior FDB version did not support cluster IDs. The normal path for storage +// server recruitment will include the cluster ID in the initial recruitment +// message. +ACTOR Future persistClusterId(StorageServer* self) { + state Transaction tr(self->cx); + loop { + try { + Optional clusterId = wait(tr.get(clusterIdKey)); + if (clusterId.present()) { + auto uid = BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + self->storage.writeKeyValue( + KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(uid, Unversioned()))); + // Purposely not calling commit here, and letting the recurring + // commit handle save this value to disk + self->clusterId = uid; + } + break; + } catch (Error& e) { + wait(tr.onError(e)); + } + } + return Void(); +} + ACTOR Future restoreDurableState(StorageServer* data, IKeyValueStore* storage) { state Future> fFormat = storage->readValue(persistFormat.key); state Future> fID = storage->readValue(persistID); + state Future> fClusterID = storage->readValue(persistClusterIdKey); state Future> ftssPairID = storage->readValue(persistTssPairID); state Future> fTssQuarantine = storage->readValue(persistTssQuarantine); state Future> fVersion = storage->readValue(persistVersion); @@ -5198,7 +5243,8 @@ ACTOR Future restoreDurableState(StorageServer* data, IKeyValueStore* stor restoreByteSample(data, storage, byteSampleSampleRecovered, startByteSampleRestore.getFuture()); TraceEvent("ReadingDurableState", data->thisServerID).log(); - wait(waitForAll(std::vector{ fFormat, fID, ftssPairID, fTssQuarantine, fVersion, fLogProtocol, fPrimaryLocality })); + wait(waitForAll( + std::vector{ fFormat, fID, fClusterID, ftssPairID, fTssQuarantine, fVersion, fLogProtocol, fPrimaryLocality })); wait(waitForAll(std::vector{ fShardAssigned, fShardAvailable, fChangeFeeds })); wait(byteSampleSampleRecovered.getFuture()); TraceEvent("RestoringDurableState", data->thisServerID).log(); @@ -5222,6 +5268,13 @@ ACTOR Future restoreDurableState(StorageServer* data, IKeyValueStore* stor data->setTssPair(BinaryReader::fromStringRef(ftssPairID.get().get(), Unversioned())); } + if (fClusterID.get().present()) { + data->clusterId = BinaryReader::fromStringRef(fClusterID.get().get(), Unversioned()); + } else { + TEST(true); // storage server upgraded to version supporting cluster IDs + data->actors.add(persistClusterId(data)); + } + // It's a bit sketchy to rely on an untrusted storage engine to persist its quarantine state when the quarantine // state means the storage engine already had a durability or correctness error, but it should get re-quarantined // very quickly because of a mismatch if it starts trying to do things again @@ -6058,11 +6111,13 @@ ACTOR Future memoryStoreRecover(IKeyValueStore* store, Reference storageServer(IKeyValueStore* persistentData, StorageServerInterface ssi, Tag seedTag, + UID clusterId, Version tssSeedVersion, ReplyPromise recruitReply, Reference const> db, std::string folder) { state StorageServer self(persistentData, db, ssi); + self.clusterId = clusterId; if (ssi.isTss()) { self.setTssPair(ssi.tssPairID.get()); ASSERT(self.isTss()); @@ -6305,10 +6360,30 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, if (recovered.canBeSet()) recovered.send(Void()); - if (self.isTss()) { - wait(replaceTSSInterface(&self, ssi)); - } else { - wait(replaceInterface(&self, ssi)); + try { + if (self.isTss()) { + wait(replaceTSSInterface(&self, ssi)); + } else { + wait(replaceInterface(&self, ssi)); + } + } catch (Error& e) { + if (e.code() != error_code_worker_removed) { + throw; + } + ASSERT(self.clusterId.isValid()); + UID clusterId = wait(getClusterId(&self)); + if (clusterId == self.clusterId) { + throw worker_removed(); + } + // When a storage server connects to a new cluster, it deletes its + // old data and creates a new, empty data file for the new cluster. + // We want to avoid that and force a manual removal of the storage + // servers old data when being assigned to a new cluster to avoid + // accidental data loss. + TraceEvent(SevError, "StorageServerBelongsToExistingCluster") + .detail("ClusterID", self.clusterId) + .detail("NewClusterID", clusterId); + wait(Future(Never())); } TraceEvent("StorageServerStartingCore", self.thisServerID).detail("TimeTaken", now() - start); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index c42fcff082..3a6e4a7833 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1538,6 +1538,7 @@ ACTOR Future workerServer(Reference connRecord, locality, !logData.actor.isValid() || logData.actor.isReady() ? logData.requests : PromiseStream(), + Optional(), s.storeID, interf.id(), true, @@ -1875,6 +1876,7 @@ ACTOR Future workerServer(Reference connRecord, dbInfo, locality, logData.requests, + req.clusterId, logId, interf.id(), false, @@ -1903,6 +1905,7 @@ ACTOR Future workerServer(Reference connRecord, runningStorages.end(), [&req](const auto& p) { return p.second != req.storeType; }) || req.seedTag != invalidTag)) { + ASSERT(req.clusterId.isValid()); LocalLineage _; getCurrentLineage()->modify(&RoleLineage::role) = ProcessClass::ClusterRole::Storage; bool isTss = req.tssPairIDAndVersion.present(); @@ -1946,6 +1949,7 @@ ACTOR Future workerServer(Reference connRecord, Future s = storageServer(data, recruited, req.seedTag, + req.clusterId, isTss ? req.tssPairIDAndVersion.get().second : 0, storageReady, dbInfo, diff --git a/flow/error_definitions.h b/flow/error_definitions.h index e468e46801..fa6a385163 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -105,6 +105,7 @@ ERROR( tag_throttled, 1213, "Transaction tag is being throttled" ) ERROR( grv_proxy_failed, 1214, "Master terminating because a GRVProxy failed" ) ERROR( dd_tracker_cancelled, 1215, "The data distribution tracker has been cancelled" ) ERROR( failed_to_progress, 1216, "Process has failed to make sufficient progress" ) +ERROR( invalid_cluster_id, 1217, "Attempted to join cluster with a different cluster ID" ) // TODO: Rename // 15xx Platform errors ERROR( platform_error, 1500, "Platform error" ) From 3e2c65bb111e91c6a90c031f4c585c5f3ad21b5f Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Mon, 4 Oct 2021 16:29:00 -0700 Subject: [PATCH 035/142] Allow tlog to join another cluster but retain its data --- fdbserver/ClusterController.actor.cpp | 4 -- fdbserver/TLogServer.actor.cpp | 68 +++++++++++++++------ fdbserver/TagPartitionedLogSystem.actor.cpp | 1 + fdbserver/storageserver.actor.cpp | 4 +- 4 files changed, 52 insertions(+), 25 deletions(-) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index c06285aa5f..9312bd3dd4 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -3410,10 +3410,6 @@ ACTOR Future clusterWatchDatabase(ClusterControllerData* cluster, ClusterC .detail("ChangeID", dbInfo.id); db->serverInfo->set(dbInfo); - // TODO: CC should store cluster ID - // UID clusterId = wait(getClusterId(cluster)); - // cluster->clusterId = clusterId; - state Future spinDelay = delay( SERVER_KNOBS ->MASTER_SPIN_DELAY); // Don't retry master recovery more than once per second, but don't delay diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 9d5d5a93c5..5db80e15f5 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -3098,7 +3098,7 @@ bool tlogTerminated(TLogData* self, IKeyValueStore* persistentData, TLogQueue* p } if (e.code() == error_code_worker_removed || e.code() == error_code_recruitment_failed || - e.code() == error_code_file_not_found) { + e.code() == error_code_file_not_found || e.code() == error_code_invalid_cluster_id) { TraceEvent("TLogTerminated", self->dbgid).error(e, true); return true; } else @@ -3407,9 +3407,16 @@ ACTOR Future tLog(IKeyValueStore* persistentData, // operator must first manually clear the data files on // the TLog before adding it to a new cluster. if (self.clusterId.isValid() && self.clusterId != req.clusterId) { - throw worker_removed(); + // throw worker_removed(); + NetworkAddress address = g_network->getLocalAddress(); + wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + // throw invalid_cluster_id(); } + // Durably persist the cluster ID if it is not already + // durable. This should only occur for new tlogs or + // existing tlogs being upgraded from an older FDB + // version. if (!self.clusterId.isValid()) { self.clusterId = req.clusterId; // Will let commit loop durably write the cluster ID. @@ -3444,24 +3451,47 @@ ACTOR Future tLog(IKeyValueStore* persistentData, if (e.code() != error_code_worker_removed) { throw; } - // It's possible to have an invalid cluster ID when restoring from - // disk if this is an upgrade from an FDB version that did not - // support cluster IDs. - ASSERT(self.clusterId.isValid() || (!self.clusterId.isValid() && restoreFromDisk)); - state UID fetchedClusterId; - if (!clusterId.present()) { - // TODO: This hangs sometimes... - UID tmpClusterId = wait(getClusterId(&self)); - fetchedClusterId = tmpClusterId; - } else { - fetchedClusterId = clusterId.get(); + // // It's possible to have an invalid cluster ID when restoring from + // // disk if this is an upgrade from an FDB version that did not + // // support cluster IDs. + // ASSERT(self.clusterId.isValid() || (!self.clusterId.isValid() && restoreFromDisk)); + // auto recoveryState = self.dbInfo->get().recoveryState; + // // When starting a tlog from durable data on disk, we need to check + // // its cluster ID against the cluster ID of the system. Read the + // // systems cluster ID from the txnStateStore and don't delete this + // // tlogs data if its cluster ID is different from that of the + // // system. Otherwise, the tlog has just been recruited and should + // // have been passed a cluster ID as part of its recruitment. + // state UID fetchedClusterId; + // if (!clusterId.present()) { + // // TODO: This hangs sometimes... + // if (recoveryState == RecoveryState::FULLY_RECOVERED) { + // UID tmpClusterId = wait(getClusterId(&self)); + // fetchedClusterId = tmpClusterId; + // } else { + // // TODO: Make sure tlog doesn't delete data? + // throw invalid_cluster_id(); + // } + // } else { + // fetchedClusterId = clusterId.get(); + // } + // // If cluster ID of this tlog matches the cluster ID read from the + // // txnStateStore, this was a valid tlog recruitment message and the + // // tlog should delete its old data. + // if (!fetchedClusterId.isValid() || fetchedClusterId == self.clusterId) { + // throw worker_removed(); + // } + // // Otherwise, the tlog is being recruited to join a new cluster. + // // Exclude the TLog to prevent its data files from being deleted. + // NetworkAddress address = g_network->getLocalAddress(); + // wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + // throw invalid_cluster_id(); + + if (!self.clusterId.isValid()) { + throw; } - if (!fetchedClusterId.isValid() || fetchedClusterId == self.clusterId) { - throw worker_removed(); - } - // Exclude the TLog to prevent its data files from being deleted. - NetworkAddress address = g_network->getLocalAddress(); - wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + // NetworkAddress address = g_network->getLocalAddress(); + // wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); throw invalid_cluster_id(); } } catch (Error& e) { diff --git a/fdbserver/TagPartitionedLogSystem.actor.cpp b/fdbserver/TagPartitionedLogSystem.actor.cpp index 96cc285272..cf64579f8b 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.cpp +++ b/fdbserver/TagPartitionedLogSystem.actor.cpp @@ -2917,6 +2917,7 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( req.logRouterTags = logSystem->logRouterTags; req.txsTags = logSystem->txsTags; req.clusterId = clusterId; + // TraceEvent("LUKAS_TPLS_InitializeTLogRequest").detail("ClusterId", clusterId).detail("To", recr.tLogs[i].address().toString()); } satelliteInitializationReplies.reserve(recr.satelliteTLogs.size()); diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index b64fff0fbf..bcffa3dd1a 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -6377,8 +6377,8 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, } // When a storage server connects to a new cluster, it deletes its // old data and creates a new, empty data file for the new cluster. - // We want to avoid that and force a manual removal of the storage - // servers old data when being assigned to a new cluster to avoid + // We want to avoid this and force a manual removal of the storage + // servers' old data when being assigned to a new cluster to avoid // accidental data loss. TraceEvent(SevError, "StorageServerBelongsToExistingCluster") .detail("ClusterID", self.clusterId) From aa3383f0e3a433fdf1de2ded0567a24b64baff46 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Thu, 7 Oct 2021 18:02:08 -0700 Subject: [PATCH 036/142] Exclude when joining new cluster --- fdbserver/TLogServer.actor.cpp | 127 +++++++++++++----------------- fdbserver/storageserver.actor.cpp | 15 ++-- 2 files changed, 62 insertions(+), 80 deletions(-) diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 5db80e15f5..f5f214f17e 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -315,7 +315,13 @@ struct TLogData : NonCopyable { Deque spillOrder; std::map> id_data; - UID clusterId; + // The durable cluster ID identifies which cluster the tlogs persistent + // data is written from. This value is restored from disk when the tlog + // restarts. + UID durableClusterId; + // The master cluster ID stores the cluster ID read from the txnStateStore. + // It is cached in this variable. + UID masterClusterId; UID dbgid; UID workerID; @@ -2218,6 +2224,22 @@ ACTOR Future initPersistentState(TLogData* self, Reference logDat return Void(); } +ACTOR Future getClusterId(TLogData* self) { + state Transaction tr(self->cx); + loop { + try { + Optional clusterId = wait(tr.get(clusterIdKey)); + if (clusterId.present()) { + return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); + } else { + return UID(); + } + } catch (Error& e) { + wait(tr.onError(e)); + } + } +} + ACTOR Future rejoinMasters(TLogData* self, TLogInterface tli, DBRecoveryCount recoveryCount, @@ -2238,14 +2260,21 @@ ACTOR Future rejoinMasters(TLogData* self, } isDisplaced = isDisplaced && !inf.logSystemConfig.hasTLog(tli.id()); if (isDisplaced) { - TraceEvent("TLogDisplaced", tli.id()) - .detail("Reason", "DBInfoDoesNotContain") + state TraceEvent ev("TLogDisplaced", tli.id()); + ev.detail("Reason", "DBInfoDoesNotContain") .detail("RecoveryCount", recoveryCount) .detail("InfRecoveryCount", inf.recoveryCount) .detail("RecoveryState", (int)inf.recoveryState) .detail("LogSysConf", describe(inf.logSystemConfig.tLogs)) .detail("PriorLogs", describe(inf.priorCommittedLogServers)) .detail("OldLogGens", inf.logSystemConfig.oldTLogs.size()); + // Read and cache cluster ID before displacing this tlog. We want + // to avoid removing the tlogs data if it has joined a new cluster + // with a different cluster ID. + state UID clusterId = wait(getClusterId(self)); + ASSERT(clusterId.isValid()); + self->masterClusterId = clusterId; + ev.detail("ClusterId", clusterId).detail("SelfClusterId", self->durableClusterId); if (BUGGIFY) wait(delay(SERVER_KNOBS->BUGGIFY_WORKER_REMOVED_MAX_LAG * deterministicRandom()->random01())); throw worker_removed(); @@ -2834,7 +2863,7 @@ ACTOR Future restorePersistentState(TLogData* self, fTLogSpillTypes })); if (fClusterId.get().present()) { - self->clusterId = BinaryReader::fromStringRef(fClusterId.get().get(), Unversioned()); + self->durableClusterId = BinaryReader::fromStringRef(fClusterId.get().get(), Unversioned()); } if (fFormat.get().present() && !persistFormatReadableRange.contains(fFormat.get().get())) { @@ -3343,22 +3372,6 @@ ACTOR Future startSpillingInTenSeconds(TLogData* self, UID tlogId, Referen return Void(); } -ACTOR Future getClusterId(TLogData* self) { - state Transaction tr(self->cx); - loop { - try { - Optional clusterId = wait(tr.get(clusterIdKey)); - if (clusterId.present()) { - return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); - } else { - return UID(); - } - } catch (Error& e) { - wait(tr.onError(e)); - } - } -} - // New tLog (if !recoverFrom.size()) or restore from network ACTOR Future tLog(IKeyValueStore* persistentData, IDiskQueue* persistentQueue, @@ -3402,23 +3415,12 @@ ACTOR Future tLog(IKeyValueStore* persistentData, choose { when(state InitializeTLogRequest req = waitNext(tlogRequests.getFuture())) { ASSERT(req.clusterId.isValid()); - // Disallow recruitment of this TLog into a new - // cluster. To prevent accidental data loss, an - // operator must first manually clear the data files on - // the TLog before adding it to a new cluster. - if (self.clusterId.isValid() && self.clusterId != req.clusterId) { - // throw worker_removed(); - NetworkAddress address = g_network->getLocalAddress(); - wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); - // throw invalid_cluster_id(); - } - // Durably persist the cluster ID if it is not already // durable. This should only occur for new tlogs or // existing tlogs being upgraded from an older FDB // version. - if (!self.clusterId.isValid()) { - self.clusterId = req.clusterId; + if (!self.durableClusterId.isValid()) { + self.durableClusterId = req.clusterId; // Will let commit loop durably write the cluster ID. self.persistentData->set( KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(req.clusterId, Unversioned()))); @@ -3451,47 +3453,26 @@ ACTOR Future tLog(IKeyValueStore* persistentData, if (e.code() != error_code_worker_removed) { throw; } - // // It's possible to have an invalid cluster ID when restoring from - // // disk if this is an upgrade from an FDB version that did not - // // support cluster IDs. - // ASSERT(self.clusterId.isValid() || (!self.clusterId.isValid() && restoreFromDisk)); - // auto recoveryState = self.dbInfo->get().recoveryState; - // // When starting a tlog from durable data on disk, we need to check - // // its cluster ID against the cluster ID of the system. Read the - // // systems cluster ID from the txnStateStore and don't delete this - // // tlogs data if its cluster ID is different from that of the - // // system. Otherwise, the tlog has just been recruited and should - // // have been passed a cluster ID as part of its recruitment. - // state UID fetchedClusterId; - // if (!clusterId.present()) { - // // TODO: This hangs sometimes... - // if (recoveryState == RecoveryState::FULLY_RECOVERED) { - // UID tmpClusterId = wait(getClusterId(&self)); - // fetchedClusterId = tmpClusterId; - // } else { - // // TODO: Make sure tlog doesn't delete data? - // throw invalid_cluster_id(); - // } - // } else { - // fetchedClusterId = clusterId.get(); - // } - // // If cluster ID of this tlog matches the cluster ID read from the - // // txnStateStore, this was a valid tlog recruitment message and the - // // tlog should delete its old data. - // if (!fetchedClusterId.isValid() || fetchedClusterId == self.clusterId) { - // throw worker_removed(); - // } - // // Otherwise, the tlog is being recruited to join a new cluster. - // // Exclude the TLog to prevent its data files from being deleted. - // NetworkAddress address = g_network->getLocalAddress(); - // wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); - // throw invalid_cluster_id(); - - if (!self.clusterId.isValid()) { - throw; + // It's possible to have an invalid cluster ID when restoring from + // disk if this is an upgrade from an FDB version that did not + // support cluster IDs. + ASSERT(self.durableClusterId.isValid() || (!self.durableClusterId.isValid() && restoreFromDisk)); + // Don't need to worry about deleting data if there is no durable + // cluster ID. + if (!self.durableClusterId.isValid()) { + // throw; + throw worker_removed(); } - // NetworkAddress address = g_network->getLocalAddress(); - // wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + // When a tlog joins a new cluster and has data for an old cluster, + // it should automatically exclude itself to avoid being used in + // the new cluster while also not blocking recovery. + if (self.masterClusterId.isValid() && self.masterClusterId != self.durableClusterId) { + state NetworkAddress address = g_network->getLocalAddress(); + wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + } + // If the tlog has a valid durable cluster ID, we don't want it to + // wipe its data! Throw this error to signal to `tlogTerminated` to + // close the persistent data store instead of deleting it. throw invalid_cluster_id(); } } catch (Error& e) { diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index bcffa3dd1a..3d3d4096b0 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -643,7 +643,7 @@ public: Reference logSystem; Reference logCursor; - UID clusterId; + Future clusterId; UID thisServerID; Optional tssPairID; // if this server is a tss, this is the id of its (ss) pair Optional ssPairID; // if this server is an ss, this is the id of its (tss) pair @@ -4953,8 +4953,8 @@ void StorageServerDisk::makeNewStorageServerDurable() { if (data->tssPairID.present()) { storage->set(KeyValueRef(persistTssPairID, BinaryWriter::toValue(data->tssPairID.get(), Unversioned()))); } - ASSERT(data->clusterId.isValid()); - storage->set(KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(data->clusterId, Unversioned()))); + ASSERT(data->clusterId.isReady() && data->clusterId.get().isValid()); + storage->set(KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(data->clusterId.get(), Unversioned()))); storage->set(KeyValueRef(persistVersion, BinaryWriter::toValue(data->version.get(), Unversioned()))); storage->set(KeyValueRef(persistShardAssignedKeys.begin.toString(), LiteralStringRef("0"))); storage->set(KeyValueRef(persistShardAvailableKeys.begin.toString(), LiteralStringRef("0"))); @@ -6370,9 +6370,10 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, if (e.code() != error_code_worker_removed) { throw; } - ASSERT(self.clusterId.isValid()); - UID clusterId = wait(getClusterId(&self)); - if (clusterId == self.clusterId) { + state UID clusterId = wait(getClusterId(&self)); + UID durableClusterId = wait(self.clusterId); + ASSERT(durableClusterId.isValid()); + if (clusterId == durableClusterId) { throw worker_removed(); } // When a storage server connects to a new cluster, it deletes its @@ -6381,7 +6382,7 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, // servers' old data when being assigned to a new cluster to avoid // accidental data loss. TraceEvent(SevError, "StorageServerBelongsToExistingCluster") - .detail("ClusterID", self.clusterId) + .detail("ClusterID", durableClusterId) .detail("NewClusterID", clusterId); wait(Future(Never())); } From 3988b11fd60929a6cbc40824e233cddbd885a849 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Fri, 8 Oct 2021 09:44:01 -0700 Subject: [PATCH 037/142] Cleanup --- fdbserver/DataDistribution.actor.cpp | 1 - fdbserver/TLogServer.actor.cpp | 4 +--- fdbserver/TagPartitionedLogSystem.actor.cpp | 1 - fdbserver/WorkerInterface.actor.h | 3 --- fdbserver/worker.actor.cpp | 2 -- flow/error_definitions.h | 2 +- 6 files changed, 2 insertions(+), 11 deletions(-) diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index 16e1b42387..0fb11534a3 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -5291,7 +5291,6 @@ ACTOR Future initializeStorage(DDTeamCollection* self, // Ask the candidateWorker to initialize a SS only if the worker does not have a pending request state UID interfaceId = deterministicRandom()->randomUniqueID(); - // TODO: Move to CC UID clusterId = wait(getClusterId(self)); state InitializeStorageRequest isr; diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index f5f214f17e..49b4c3576c 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -3378,7 +3378,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -3460,8 +3459,7 @@ ACTOR Future tLog(IKeyValueStore* persistentData, // Don't need to worry about deleting data if there is no durable // cluster ID. if (!self.durableClusterId.isValid()) { - // throw; - throw worker_removed(); + throw; } // When a tlog joins a new cluster and has data for an old cluster, // it should automatically exclude itself to avoid being used in diff --git a/fdbserver/TagPartitionedLogSystem.actor.cpp b/fdbserver/TagPartitionedLogSystem.actor.cpp index cf64579f8b..96cc285272 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.cpp +++ b/fdbserver/TagPartitionedLogSystem.actor.cpp @@ -2917,7 +2917,6 @@ ACTOR Future> TagPartitionedLogSystem::newEpoch( req.logRouterTags = logSystem->logRouterTags; req.txsTags = logSystem->txsTags; req.clusterId = clusterId; - // TraceEvent("LUKAS_TPLS_InitializeTLogRequest").detail("ClusterId", clusterId).detail("To", recr.tLogs[i].address().toString()); } satelliteInitializationReplies.reserve(recr.satelliteTLogs.size()); diff --git a/fdbserver/WorkerInterface.actor.h b/fdbserver/WorkerInterface.actor.h index 4a1cd6938e..ac6e549087 100644 --- a/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/WorkerInterface.actor.h @@ -1026,7 +1026,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -1072,7 +1071,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, @@ -1088,7 +1086,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 3a6e4a7833..c0c70d27ab 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1538,7 +1538,6 @@ ACTOR Future workerServer(Reference connRecord, locality, !logData.actor.isValid() || logData.actor.isReady() ? logData.requests : PromiseStream(), - Optional(), s.storeID, interf.id(), true, @@ -1876,7 +1875,6 @@ ACTOR Future workerServer(Reference connRecord, dbInfo, locality, logData.requests, - req.clusterId, logId, interf.id(), false, diff --git a/flow/error_definitions.h b/flow/error_definitions.h index fa6a385163..69c0867fa4 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -105,7 +105,7 @@ ERROR( tag_throttled, 1213, "Transaction tag is being throttled" ) ERROR( grv_proxy_failed, 1214, "Master terminating because a GRVProxy failed" ) ERROR( dd_tracker_cancelled, 1215, "The data distribution tracker has been cancelled" ) ERROR( failed_to_progress, 1216, "Process has failed to make sufficient progress" ) -ERROR( invalid_cluster_id, 1217, "Attempted to join cluster with a different cluster ID" ) // TODO: Rename +ERROR( invalid_cluster_id, 1217, "Attempted to join cluster with a different cluster ID" ) // 15xx Platform errors ERROR( platform_error, 1500, "Platform error" ) From 1fa726ca73410dac7ae88bbe4f66622990cb0783 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Fri, 8 Oct 2021 10:07:58 -0700 Subject: [PATCH 038/142] Fix compilation issue --- fdbserver/OldTLogServer_6_0.actor.cpp | 1 - fdbserver/OldTLogServer_6_2.actor.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index 5551413939..de3fb42a5e 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -2776,7 +2776,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, diff --git a/fdbserver/OldTLogServer_6_2.actor.cpp b/fdbserver/OldTLogServer_6_2.actor.cpp index db53a4ee8e..4893a5da03 100644 --- a/fdbserver/OldTLogServer_6_2.actor.cpp +++ b/fdbserver/OldTLogServer_6_2.actor.cpp @@ -3267,7 +3267,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, Reference const> db, LocalityData locality, PromiseStream tlogRequests, - Optional clusterId, UID tlogId, UID workerID, bool restoreFromDisk, From 46400452436112261f19b2be438097257ddd32d1 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Wed, 13 Oct 2021 10:55:49 -0700 Subject: [PATCH 039/142] Fix rare simulation failures When partitions appear before a cluster has fully recovered, it was possible to have different tlogs persist different cluster IDs because they were involved in different partitions. This would affect recovery when a quorum was eventually reached. The solution to this is to avoid persisting the cluster ID before a cluster has fully recovered, to make sure all nodes agree on the cluster ID. --- fdbserver/TLogServer.actor.cpp | 40 +++++++++++++++++++++++-------- fdbserver/storageserver.actor.cpp | 16 +++++++------ 2 files changed, 39 insertions(+), 17 deletions(-) diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 49b4c3576c..55fae203b0 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -315,6 +315,10 @@ struct TLogData : NonCopyable { Deque spillOrder; std::map> id_data; + // The temporary durable cluster ID stores the cluster ID if the tlog is + // recruited during a recovery. The cluster ID is not persisted to disk + // until recovery is complete. + UID tmpDurableClusterId; // The durable cluster ID identifies which cluster the tlogs persistent // data is written from. This value is restored from disk when the tlog // restarts. @@ -2493,6 +2497,16 @@ ACTOR Future serveTLogInterface(TLogData* self, } else { logData->logSystem->set(Reference()); } + + // Persist cluster ID once cluster has recovered. + if (self->dbInfo->get().recoveryState == RecoveryState::FULLY_RECOVERED && + !self->durableClusterId.isValid() && self->tmpDurableClusterId.isValid()) { + self->durableClusterId = self->tmpDurableClusterId; + self->persistentData->set( + KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(self->tmpDurableClusterId, Unversioned()))); + self->tmpDurableClusterId = UID(); + wait(self->persistentData->commit()); + } } when(TLogPeekStreamRequest req = waitNext(tli.peekStreamMessages.getFuture())) { TraceEvent(SevDebug, "TLogPeekStream", logData->logId) @@ -3415,14 +3429,19 @@ ACTOR Future tLog(IKeyValueStore* persistentData, when(state InitializeTLogRequest req = waitNext(tlogRequests.getFuture())) { ASSERT(req.clusterId.isValid()); // Durably persist the cluster ID if it is not already - // durable. This should only occur for new tlogs or - // existing tlogs being upgraded from an older FDB - // version. - if (!self.durableClusterId.isValid()) { + // durable and the cluster has progressed far enough + // through recovery. To avoid different partitions from + // persisting different cluster IDs, we need to wait + // until a single cluster ID has been persisted in the + // txnStateStore before finally writing it to disk. + auto recoveryState = self.dbInfo->get().recoveryState; + if (!self.durableClusterId.isValid() && recoveryState >= RecoveryState::ACCEPTING_COMMITS) { self.durableClusterId = req.clusterId; // Will let commit loop durably write the cluster ID. self.persistentData->set( KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(req.clusterId, Unversioned()))); + } else { + self.tmpDurableClusterId = req.clusterId; } if (!self.tlogCache.exists(req.recruitmentID)) { @@ -3452,10 +3471,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, if (e.code() != error_code_worker_removed) { throw; } - // It's possible to have an invalid cluster ID when restoring from - // disk if this is an upgrade from an FDB version that did not - // support cluster IDs. - ASSERT(self.durableClusterId.isValid() || (!self.durableClusterId.isValid() && restoreFromDisk)); // Don't need to worry about deleting data if there is no durable // cluster ID. if (!self.durableClusterId.isValid()) { @@ -3463,10 +3478,15 @@ ACTOR Future tLog(IKeyValueStore* persistentData, } // When a tlog joins a new cluster and has data for an old cluster, // it should automatically exclude itself to avoid being used in - // the new cluster while also not blocking recovery. - if (self.masterClusterId.isValid() && self.masterClusterId != self.durableClusterId) { + // the new cluster. + auto recoveryState = self.dbInfo->get().recoveryState; + if (recoveryState == RecoveryState::FULLY_RECOVERED && self.masterClusterId.isValid() && + self.durableClusterId.isValid() && self.masterClusterId != self.durableClusterId) { state NetworkAddress address = g_network->getLocalAddress(); wait(excludeServers(self.cx, { AddressExclusion{ address.ip, address.port } })); + TraceEvent(SevWarnAlways, "TLogBelongsToExistingCluster") + .detail("ClusterId", self.durableClusterId) + .detail("NewClusterId", self.masterClusterId); } // If the tlog has a valid durable cluster ID, we don't want it to // wipe its data! Throw this error to signal to `tlogTerminated` to diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 3d3d4096b0..06e3354c82 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -643,7 +643,7 @@ public: Reference logSystem; Reference logCursor; - Future clusterId; + Promise clusterId; UID thisServerID; Optional tssPairID; // if this server is a tss, this is the id of its (ss) pair Optional ssPairID; // if this server is an ss, this is the id of its (tss) pair @@ -4953,8 +4953,9 @@ void StorageServerDisk::makeNewStorageServerDurable() { if (data->tssPairID.present()) { storage->set(KeyValueRef(persistTssPairID, BinaryWriter::toValue(data->tssPairID.get(), Unversioned()))); } - ASSERT(data->clusterId.isReady() && data->clusterId.get().isValid()); - storage->set(KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(data->clusterId.get(), Unversioned()))); + ASSERT(data->clusterId.getFuture().isReady() && data->clusterId.getFuture().get().isValid()); + storage->set( + KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(data->clusterId.getFuture().get(), Unversioned()))); storage->set(KeyValueRef(persistVersion, BinaryWriter::toValue(data->version.get(), Unversioned()))); storage->set(KeyValueRef(persistShardAssignedKeys.begin.toString(), LiteralStringRef("0"))); storage->set(KeyValueRef(persistShardAvailableKeys.begin.toString(), LiteralStringRef("0"))); @@ -5214,7 +5215,7 @@ ACTOR Future persistClusterId(StorageServer* self) { KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(uid, Unversioned()))); // Purposely not calling commit here, and letting the recurring // commit handle save this value to disk - self->clusterId = uid; + self->clusterId.send(uid); } break; } catch (Error& e) { @@ -5269,7 +5270,7 @@ ACTOR Future restoreDurableState(StorageServer* data, IKeyValueStore* stor } if (fClusterID.get().present()) { - data->clusterId = BinaryReader::fromStringRef(fClusterID.get().get(), Unversioned()); + data->clusterId.send(BinaryReader::fromStringRef(fClusterID.get().get(), Unversioned())); } else { TEST(true); // storage server upgraded to version supporting cluster IDs data->actors.add(persistClusterId(data)); @@ -6117,7 +6118,7 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, Reference const> db, std::string folder) { state StorageServer self(persistentData, db, ssi); - self.clusterId = clusterId; + self.clusterId.send(clusterId); if (ssi.isTss()) { self.setTssPair(ssi.tssPairID.get()); ASSERT(self.isTss()); @@ -6371,7 +6372,8 @@ ACTOR Future storageServer(IKeyValueStore* persistentData, throw; } state UID clusterId = wait(getClusterId(&self)); - UID durableClusterId = wait(self.clusterId); + ASSERT(self.clusterId.isValid()); + UID durableClusterId = wait(self.clusterId.getFuture()); ASSERT(durableClusterId.isValid()); if (clusterId == durableClusterId) { throw worker_removed(); From 74cf64fe0fee4aaeff9182c580db1c3a82bd75a5 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Wed, 13 Oct 2021 22:22:49 -0700 Subject: [PATCH 040/142] Sync cluster ID through ServerDBInfo --- fdbserver/ClusterController.actor.cpp | 8 +++++++- fdbserver/ServerDBInfo.actor.h | 4 +++- fdbserver/TLogServer.actor.cpp | 15 +++++---------- fdbserver/WorkerInterface.actor.h | 2 ++ fdbserver/masterserver.actor.cpp | 1 + 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 9312bd3dd4..9a88748986 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -4065,7 +4065,8 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co .detail("GrvProxies", req.grvProxies.size()) .detail("RecoveryCount", req.recoveryCount) .detail("Stalled", req.recoveryStalled) - .detail("OldestBackupEpoch", req.logSystemConfig.oldestBackupEpoch); + .detail("OldestBackupEpoch", req.logSystemConfig.oldestBackupEpoch) + .detail("ClusterId", req.clusterId); // make sure the request comes from an active database auto db = &self->db; @@ -4149,6 +4150,11 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co dbInfo.recoveryCount = req.recoveryCount; } + if (dbInfo.clusterId != req.clusterId) { + isChanged = true; + dbInfo.clusterId = req.clusterId; + } + if (isChanged) { dbInfo.id = deterministicRandom()->randomUniqueID(); dbInfo.infoGeneration = ++self->db.dbInfoCount; diff --git a/fdbserver/ServerDBInfo.actor.h b/fdbserver/ServerDBInfo.actor.h index e134b9ba82..47a6c35415 100644 --- a/fdbserver/ServerDBInfo.actor.h +++ b/fdbserver/ServerDBInfo.actor.h @@ -64,6 +64,7 @@ struct ServerDBInfo { // which need to stay alive in case this recovery fails Optional latencyBandConfig; int64_t infoGeneration; + UID clusterId; ServerDBInfo() : recoveryCount(0), recoveryState(RecoveryState::UNINITIALIZED), logSystemConfig(0), infoGeneration(0) {} @@ -88,7 +89,8 @@ struct ServerDBInfo { logSystemConfig, priorCommittedLogServers, latencyBandConfig, - infoGeneration); + infoGeneration, + clusterId); } }; diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 55fae203b0..bceeacafb1 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -315,10 +315,6 @@ struct TLogData : NonCopyable { Deque spillOrder; std::map> id_data; - // The temporary durable cluster ID stores the cluster ID if the tlog is - // recruited during a recovery. The cluster ID is not persisted to disk - // until recovery is complete. - UID tmpDurableClusterId; // The durable cluster ID identifies which cluster the tlogs persistent // data is written from. This value is restored from disk when the tlog // restarts. @@ -2499,12 +2495,13 @@ ACTOR Future serveTLogInterface(TLogData* self, } // Persist cluster ID once cluster has recovered. + auto masterClusterId = self->dbInfo->get().clusterId; if (self->dbInfo->get().recoveryState == RecoveryState::FULLY_RECOVERED && - !self->durableClusterId.isValid() && self->tmpDurableClusterId.isValid()) { - self->durableClusterId = self->tmpDurableClusterId; + !self->durableClusterId.isValid()) { + ASSERT(masterClusterId.isValid()); + self->durableClusterId = masterClusterId; self->persistentData->set( - KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(self->tmpDurableClusterId, Unversioned()))); - self->tmpDurableClusterId = UID(); + KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(masterClusterId, Unversioned()))); wait(self->persistentData->commit()); } } @@ -3440,8 +3437,6 @@ ACTOR Future tLog(IKeyValueStore* persistentData, // Will let commit loop durably write the cluster ID. self.persistentData->set( KeyValueRef(persistClusterIdKey, BinaryWriter::toValue(req.clusterId, Unversioned()))); - } else { - self.tmpDurableClusterId = req.clusterId; } if (!self.tlogCache.exists(req.recruitmentID)) { diff --git a/fdbserver/WorkerInterface.actor.h b/fdbserver/WorkerInterface.actor.h index ac6e549087..d66ec5acc0 100644 --- a/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/WorkerInterface.actor.h @@ -238,6 +238,7 @@ struct RegisterMasterRequest { std::vector priorCommittedLogServers; RecoveryState recoveryState; bool recoveryStalled; + UID clusterId; ReplyPromise reply; @@ -261,6 +262,7 @@ struct RegisterMasterRequest { priorCommittedLogServers, recoveryState, recoveryStalled, + clusterId, reply); } }; diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index f68c6ac403..abc63fe641 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -586,6 +586,7 @@ Future sendMasterRegistration(MasterData* self, masterReq.priorCommittedLogServers = priorCommittedLogServers; masterReq.recoveryState = self->recoveryState; masterReq.recoveryStalled = self->recruitmentStalled->get(); + masterReq.clusterId = self->clusterId; return brokenPromiseToNever(self->clusterController.registerMaster.getReply(masterReq)); } From 15e0d5b29f2136af1610102ee3540bed9ddee3a0 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Sun, 17 Oct 2021 23:51:50 -0700 Subject: [PATCH 041/142] Add explicit transaction options when reading cluster ID --- fdbserver/DataDistribution.actor.cpp | 6 ++++-- fdbserver/TLogServer.actor.cpp | 8 +++++--- fdbserver/storageserver.actor.cpp | 4 +++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index 0fb11534a3..a26d51f403 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -5262,10 +5262,12 @@ struct TSSPairState : ReferenceCounted, NonCopyable { }; ACTOR Future getClusterId(DDTeamCollection* self) { - state Transaction tr(self->cx); + state ReadYourWritesTransaction tr(self->cx); loop { try { - Optional clusterId = wait(tr.get(clusterIdKey, Snapshot::False)); + tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); + tr.setOption(FDBTransactionOptions::LOCK_AWARE); + Optional clusterId = wait(tr.get(clusterIdKey)); ASSERT(clusterId.present()); return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); } catch (Error& e) { diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index bceeacafb1..fbfa0ec79a 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -2225,9 +2225,11 @@ ACTOR Future initPersistentState(TLogData* self, Reference logDat } ACTOR Future getClusterId(TLogData* self) { - state Transaction tr(self->cx); + state ReadYourWritesTransaction tr(self->cx); loop { try { + tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); + tr.setOption(FDBTransactionOptions::LOCK_AWARE); Optional clusterId = wait(tr.get(clusterIdKey)); if (clusterId.present()) { return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); @@ -2494,9 +2496,9 @@ ACTOR Future serveTLogInterface(TLogData* self, logData->logSystem->set(Reference()); } - // Persist cluster ID once cluster has recovered. + // Persist cluster ID once cluster has sufficientnly recovered. auto masterClusterId = self->dbInfo->get().clusterId; - if (self->dbInfo->get().recoveryState == RecoveryState::FULLY_RECOVERED && + if (self->dbInfo->get().recoveryState == RecoveryState::ACCEPTING_COMMITS && !self->durableClusterId.isValid()) { ASSERT(masterClusterId.isValid()); self->durableClusterId = masterClusterId; diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 06e3354c82..4c9de2e2bf 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -5187,9 +5187,11 @@ ACTOR Future restoreByteSample(StorageServer* data, // Reads the cluster ID from the transaction state store. ACTOR Future getClusterId(StorageServer* self) { - state Transaction tr(self->cx); + state ReadYourWritesTransaction tr(self->cx); loop { try { + tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); + tr.setOption(FDBTransactionOptions::LOCK_AWARE); Optional clusterId = wait(tr.get(clusterIdKey)); ASSERT(clusterId.present()); return BinaryReader::fromStringRef(clusterId.get(), Unversioned()); From 95aa3f9e0056e98b8e36526f5ff58d69ea7d8a53 Mon Sep 17 00:00:00 2001 From: Tao Lin Date: Wed, 10 Nov 2021 14:22:06 -0800 Subject: [PATCH 042/142] Change SSI endpoints order to be consistent with 7.0 --- fdbclient/StorageServerInterface.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fdbclient/StorageServerInterface.h b/fdbclient/StorageServerInterface.h index ba912adbb1..f5f15df2ee 100644 --- a/fdbclient/StorageServerInterface.h +++ b/fdbclient/StorageServerInterface.h @@ -125,14 +125,14 @@ struct StorageServerInterface { RequestStream(getValue.getEndpoint().getAdjustedEndpoint(12)); getKeyValuesStream = RequestStream(getValue.getEndpoint().getAdjustedEndpoint(13)); - changeFeedStream = - RequestStream(getValue.getEndpoint().getAdjustedEndpoint(14)); - overlappingChangeFeeds = - RequestStream(getValue.getEndpoint().getAdjustedEndpoint(15)); - changeFeedPop = - RequestStream(getValue.getEndpoint().getAdjustedEndpoint(16)); getKeyValuesAndFlatMap = - RequestStream(getValue.getEndpoint().getAdjustedEndpoint(17)); + RequestStream(getValue.getEndpoint().getAdjustedEndpoint(14)); + changeFeedStream = + RequestStream(getValue.getEndpoint().getAdjustedEndpoint(15)); + overlappingChangeFeeds = + RequestStream(getValue.getEndpoint().getAdjustedEndpoint(16)); + changeFeedPop = + RequestStream(getValue.getEndpoint().getAdjustedEndpoint(17)); } } else { ASSERT(Ar::isDeserializing); @@ -175,10 +175,10 @@ struct StorageServerInterface { streams.push_back(getReadHotRanges.getReceiver()); streams.push_back(getRangeSplitPoints.getReceiver()); streams.push_back(getKeyValuesStream.getReceiver(TaskPriority::LoadBalancedEndpoint)); + streams.push_back(getKeyValuesAndFlatMap.getReceiver(TaskPriority::LoadBalancedEndpoint)); streams.push_back(changeFeedStream.getReceiver()); streams.push_back(overlappingChangeFeeds.getReceiver()); streams.push_back(changeFeedPop.getReceiver()); - streams.push_back(getKeyValuesAndFlatMap.getReceiver(TaskPriority::LoadBalancedEndpoint)); FlowTransport::transport().addEndpoints(streams); } }; From e4c3f886dabe6863dbfefeff8af5cc3941936eb1 Mon Sep 17 00:00:00 2001 From: Lukas Joswiak Date: Wed, 10 Nov 2021 16:15:13 -0800 Subject: [PATCH 043/142] Fix recovery issue --- fdbserver/TLogServer.actor.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index fbfa0ec79a..3354819c34 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -2496,9 +2496,9 @@ ACTOR Future serveTLogInterface(TLogData* self, logData->logSystem->set(Reference()); } - // Persist cluster ID once cluster has sufficientnly recovered. + // Persist cluster ID once cluster has recovered. auto masterClusterId = self->dbInfo->get().clusterId; - if (self->dbInfo->get().recoveryState == RecoveryState::ACCEPTING_COMMITS && + if (self->dbInfo->get().recoveryState == RecoveryState::FULLY_RECOVERED && !self->durableClusterId.isValid()) { ASSERT(masterClusterId.isValid()); self->durableClusterId = masterClusterId; From 5af465aa2969eabee63ee45660c58a1f47899fbb Mon Sep 17 00:00:00 2001 From: Markus Pilman Date: Wed, 10 Nov 2021 20:05:38 -0700 Subject: [PATCH 044/142] FDB compiles on Apple Sillicon --- bindings/c/CMakeLists.txt | 26 +++++++++++++++------- bindings/c/generate_asm.py | 37 +++++++++++++++++------------- cmake/CompileBoost.cmake | 6 ++--- fdbclient/IKnobCollection.cpp | 17 +++++++++----- fdbclient/IKnobCollection.h | 2 +- flow/IndexedSet.actor.h | 2 +- flow/Platform.h | 42 +++++++++++++++++------------------ 7 files changed, 76 insertions(+), 56 deletions(-) diff --git a/bindings/c/CMakeLists.txt b/bindings/c/CMakeLists.txt index 00847c7268..01b63b7a2a 100644 --- a/bindings/c/CMakeLists.txt +++ b/bindings/c/CMakeLists.txt @@ -7,18 +7,26 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb) set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S) -set(platform "linux") +set(os "linux") +set(cpu "intel") if(APPLE) - set(platform "osx") + set(os "osx") elseif(WIN32) - set(platform "windows") + set(os "windows") set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm) -elseif(CMAKE_SYSTEM_NAME MATCHES "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") - set(platform "linux-aarch64") +endif() + +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") + set(cpu "aarch64") +endif() + +set(IS_ARM_MAC NO) +if(APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") + set(IS_ARM_MAC YES) endif() add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h - COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform} + COMMAND $ ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu} ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h @@ -66,8 +74,10 @@ if(WIN32) set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM) endif() -# The tests don't build on windows -if(NOT WIN32) +# The tests don't build on windows and ARM macs +# doctest doesn't seem to compile on ARM macs, we should +# check later whether this works +if(NOT WIN32 AND NOT IS_ARM_MAC) set(MAKO_SRCS test/mako/mako.c test/mako/mako.h diff --git a/bindings/c/generate_asm.py b/bindings/c/generate_asm.py index 2f1628fa00..485ace0163 100755 --- a/bindings/c/generate_asm.py +++ b/bindings/c/generate_asm.py @@ -23,7 +23,7 @@ import re import sys -(platform, source, asm, h) = sys.argv[1:] +(os, cpu, source, asm, h) = sys.argv[1:] functions = {} @@ -59,17 +59,18 @@ def write_windows_asm(asmfile, functions): def write_unix_asm(asmfile, functions, prefix): - if platform != "linux-aarch64": + if cpu != "aarch64": asmfile.write(".intel_syntax noprefix\n") - if platform.startswith('linux') or platform == "freebsd": + if cpu == 'aarch64' or os == 'linux' or os == 'freebsd': asmfile.write("\n.data\n") for f in functions: asmfile.write("\t.extern fdb_api_ptr_%s\n" % f) - - asmfile.write("\n.text\n") - for f in functions: - asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f)) + + if os == 'linux' or os == 'freebsd': + asmfile.write("\n.text\n") + for f in functions: + asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f)) for f in functions: asmfile.write("\n.globl %s%s\n" % (prefix, f)) @@ -104,10 +105,16 @@ def write_unix_asm(asmfile, functions, prefix): # .size g, .-g # .ident "GCC: (GNU) 8.3.1 20190311 (Red Hat 8.3.1-3)" - if platform == "linux-aarch64": - asmfile.write("\tadrp x8, :got:fdb_api_ptr_%s\n" % (f)) - asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f)) - asmfile.write("\tldr x8, [x8]\n") + p = '' + if os == 'osx': + p = '_' + if cpu == "aarch64": + asmfile.write("\tldr x16, =%sfdb_api_ptr_%s\n" % (p, f)) + if os == 'osx': + asmfile.write("\tldr x16, [x16]\n") + else: + asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f)) + asmfile.write("\tldr x8, [x8]\n") asmfile.write("\tbr x8\n") else: asmfile.write( @@ -123,15 +130,15 @@ with open(asm, 'w') as asmfile: hfile.write( "void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n") - if platform.startswith('linux'): + if os == 'linux': write_unix_asm(asmfile, functions, '') - elif platform == "osx": + elif os == "osx": write_unix_asm(asmfile, functions, '_') - elif platform == "windows": + elif os == "windows": write_windows_asm(asmfile, functions) for f in functions: - if platform == "windows": + if os == "windows": hfile.write("extern \"C\" ") hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f) for v in functions[f]: diff --git a/cmake/CompileBoost.cmake b/cmake/CompileBoost.cmake index 57ddbd41df..6e5dcd75d7 100644 --- a/cmake/CompileBoost.cmake +++ b/cmake/CompileBoost.cmake @@ -49,8 +49,8 @@ function(compile_boost) include(ExternalProject) set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install") ExternalProject_add("${COMPILE_BOOST_TARGET}Project" - URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2" - URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 + URL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.bz2" + URL_HASH SHA256=fc9f85fc030e233142908241af7a846e60630aa7388de9a5fafb1f3a26840854 CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET} BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install BUILD_IN_SOURCE ON @@ -113,7 +113,7 @@ if(WIN32) return() endif() -find_package(Boost 1.72.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS}) +find_package(Boost 1.77.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS}) set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost") if(Boost_FOUND AND NOT FORCE_BOOST_BUILD) diff --git a/fdbclient/IKnobCollection.cpp b/fdbclient/IKnobCollection.cpp index cef5b18135..a1821696bf 100644 --- a/fdbclient/IKnobCollection.cpp +++ b/fdbclient/IKnobCollection.cpp @@ -73,20 +73,25 @@ KnobValue IKnobCollection::parseKnobValue(std::string const& knobName, std::stri UNSTOPPABLE_ASSERT(false); } -std::unique_ptr IKnobCollection::globalKnobCollection = - IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False); +std::unique_ptr& IKnobCollection::globalKnobCollection() { + static std::unique_ptr res; + if (!res) { + res = IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False); + } + return res; +} void IKnobCollection::setGlobalKnobCollection(Type type, Randomize randomize, IsSimulated isSimulated) { - globalKnobCollection = create(type, randomize, isSimulated); - FLOW_KNOBS = &globalKnobCollection->getFlowKnobs(); + globalKnobCollection() = create(type, randomize, isSimulated); + FLOW_KNOBS = &globalKnobCollection()->getFlowKnobs(); } IKnobCollection const& IKnobCollection::getGlobalKnobCollection() { - return *globalKnobCollection; + return *globalKnobCollection(); } IKnobCollection& IKnobCollection::getMutableGlobalKnobCollection() { - return *globalKnobCollection; + return *globalKnobCollection(); } ConfigMutationRef IKnobCollection::createSetMutation(Arena arena, KeyRef key, ValueRef value) { diff --git a/fdbclient/IKnobCollection.h b/fdbclient/IKnobCollection.h index 0cc06b3972..092bce84e7 100644 --- a/fdbclient/IKnobCollection.h +++ b/fdbclient/IKnobCollection.h @@ -36,7 +36,7 @@ * - TestKnobs */ class IKnobCollection { - static std::unique_ptr globalKnobCollection; + static std::unique_ptr& globalKnobCollection(); public: virtual ~IKnobCollection() = default; diff --git a/flow/IndexedSet.actor.h b/flow/IndexedSet.actor.h index 2bd4f1a127..cca6ffccb8 100644 --- a/flow/IndexedSet.actor.h +++ b/flow/IndexedSet.actor.h @@ -47,7 +47,7 @@ ACTOR template while (!prefetchQueue.empty() || !toFree.empty()) { while (prefetchQueue.size() < 10 && !toFree.empty()) { - _mm_prefetch((const char*)toFree.back(), _MM_HINT_T0); + __builtin_prefetch((const char*)toFree.back(), _MM_HINT_T0); prefetchQueue.push_back(toFree.back()); toFree.pop_back(); } diff --git a/flow/Platform.h b/flow/Platform.h index 889d2a0b17..6f51594906 100644 --- a/flow/Platform.h +++ b/flow/Platform.h @@ -416,28 +416,8 @@ typedef struct { dev_t getDeviceId(std::string path); #endif -#ifdef __linux__ -#ifndef __aarch64__ -#include -#else +#if defined(__aarch64__) #include "sse2neon.h" -#endif -#include -#include -#endif - -#if defined(__APPLE__) -// Version of CLang bundled with XCode doesn't yet include ia32intrin.h. -#if !(__has_builtin(__rdtsc)) -inline static uint64_t timestampCounter() { - uint64_t lo, hi; - asm("rdtsc" : "=a"(lo), "=d"(hi)); - return (lo | (hi << 32)); -} -#else -#define timestampCounter() __rdtsc() -#endif -#elif defined(__aarch64__) // aarch64 does not have rdtsc counter // Use cntvct_el0 virtual counter instead inline static uint64_t timestampCounter() { @@ -445,8 +425,21 @@ inline static uint64_t timestampCounter() { asm volatile("mrs %0, cntvct_el0" : "=r"(timer)); return timer; } +#elif defined(__linux__) +#include +#define timestampCounter() __rdtsc() +#elif defined(__APPLE__) // macOS on Intel +// Version of CLang bundled with XCode doesn't yet include ia32intrin.h. +#if !(__has_builtin(__rdtsc)) +inline static uint64_t timestampCounter() { + uint64_t lo, hi; + asm( "rdtsc" : "=a" (lo), "=d" (hi) ); + return( lo | (hi << 32) ); +} +#else +#define timestampCounter() __rdtsc() +#endif #else -// all other platforms including Linux x86_64 #define timestampCounter() __rdtsc() #endif @@ -460,6 +453,11 @@ inline static uint64_t __rdtsc() { #endif #endif +#if defined(__linux__) +#include +#endif +#include + #ifdef _WIN32 #include inline static int32_t interlockedIncrement(volatile int32_t* a) { From 0e6dd46f5af25f13963a8b534d3e9386116f9099 Mon Sep 17 00:00:00 2001 From: Markus Pilman Date: Thu, 11 Nov 2021 08:38:25 -0700 Subject: [PATCH 045/142] Correct code formatting --- flow/Platform.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/Platform.h b/flow/Platform.h index 6f51594906..3417e81577 100644 --- a/flow/Platform.h +++ b/flow/Platform.h @@ -433,8 +433,8 @@ inline static uint64_t timestampCounter() { #if !(__has_builtin(__rdtsc)) inline static uint64_t timestampCounter() { uint64_t lo, hi; - asm( "rdtsc" : "=a" (lo), "=d" (hi) ); - return( lo | (hi << 32) ); + asm("rdtsc" : "=a"(lo), "=d"(hi)); + return (lo | (hi << 32)); } #else #define timestampCounter() __rdtsc() From 28dde27cb16d5813c9fc75977fea03633071d804 Mon Sep 17 00:00:00 2001 From: Markus Pilman Date: Thu, 11 Nov 2021 08:49:51 -0700 Subject: [PATCH 046/142] Fix Linux compiler errors --- flow/IndexedSet.actor.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/IndexedSet.actor.h b/flow/IndexedSet.actor.h index cca6ffccb8..1d7598f1d4 100644 --- a/flow/IndexedSet.actor.h +++ b/flow/IndexedSet.actor.h @@ -29,6 +29,7 @@ #define FLOW_INDEXEDSET_ACTOR_H #include "flow/flow.h" +#include "flow/Platform.h" #include "flow/actorcompiler.h" // This must be the last #include. ACTOR template @@ -47,7 +48,7 @@ ACTOR template while (!prefetchQueue.empty() || !toFree.empty()) { while (prefetchQueue.size() < 10 && !toFree.empty()) { - __builtin_prefetch((const char*)toFree.back(), _MM_HINT_T0); + _mm_prefetch((const char*)toFree.back(), _MM_HINT_T0); prefetchQueue.push_back(toFree.back()); toFree.pop_back(); } From ed46a81bbc276450d6df036b1b1c37d0b104338e Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Thu, 11 Nov 2021 16:05:33 -0800 Subject: [PATCH 047/142] Fix (test only) heap use after free --- bindings/c/test/unit/unit_tests.cpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/bindings/c/test/unit/unit_tests.cpp b/bindings/c/test/unit/unit_tests.cpp index 1107961002..7bf9b39cc2 100644 --- a/bindings/c/test/unit/unit_tests.cpp +++ b/bindings/c/test/unit/unit_tests.cpp @@ -876,23 +876,29 @@ TEST_CASE("fdb_transaction_set_read_version future_version") { const std::string EMPTY = Tuple().pack().toString(); const KeyRef RECORD = "RECORD"_sr; const KeyRef INDEX = "INDEX"_sr; -static KeyRef primaryKey(const int i) { - return KeyRef(format("primary-key-of-record-%08d", i)); +static Key primaryKey(const int i) { + return Key(format("primary-key-of-record-%08d", i)); } -static KeyRef indexKey(const int i) { - return KeyRef(format("index-key-of-record-%08d", i)); +static Key indexKey(const int i) { + return Key(format("index-key-of-record-%08d", i)); } -static ValueRef dataOfRecord(const int i) { - return KeyRef(format("data-of-record-%08d", i)); +static Value dataOfRecord(const int i) { + return Value(format("data-of-record-%08d", i)); } static std::string indexEntryKey(const int i) { - return Tuple().append(prefix).append(INDEX).append(indexKey(i)).append(primaryKey(i)).pack().toString(); + return Tuple() + .append(StringRef(prefix)) + .append(INDEX) + .append(indexKey(i).contents()) + .append(primaryKey(i).contents()) + .pack() + .toString(); } static std::string recordKey(const int i) { return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).pack().toString(); } static std::string recordValue(const int i) { - return Tuple().append(dataOfRecord(i)).pack().toString(); + return Tuple().append(dataOfRecord(i).contents()).pack().toString(); } TEST_CASE("fdb_transaction_get_range_and_flat_map") { From 5c9af1fdba1a4f693d2c4b961ab55c79a3a69616 Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Thu, 11 Nov 2021 16:39:39 -0800 Subject: [PATCH 048/142] Remove unnecessary calls to Standalone::contents --- bindings/c/test/unit/unit_tests.cpp | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/bindings/c/test/unit/unit_tests.cpp b/bindings/c/test/unit/unit_tests.cpp index 7bf9b39cc2..a44137aaa9 100644 --- a/bindings/c/test/unit/unit_tests.cpp +++ b/bindings/c/test/unit/unit_tests.cpp @@ -886,19 +886,13 @@ static Value dataOfRecord(const int i) { return Value(format("data-of-record-%08d", i)); } static std::string indexEntryKey(const int i) { - return Tuple() - .append(StringRef(prefix)) - .append(INDEX) - .append(indexKey(i).contents()) - .append(primaryKey(i).contents()) - .pack() - .toString(); + return Tuple().append(StringRef(prefix)).append(INDEX).append(indexKey(i)).append(primaryKey(i)).pack().toString(); } static std::string recordKey(const int i) { return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).pack().toString(); } static std::string recordValue(const int i) { - return Tuple().append(dataOfRecord(i).contents()).pack().toString(); + return Tuple().append(dataOfRecord(i)).pack().toString(); } TEST_CASE("fdb_transaction_get_range_and_flat_map") { From 984bc0fbea793c2bdab2976c49b7894cc3172751 Mon Sep 17 00:00:00 2001 From: He Liu Date: Thu, 11 Nov 2021 16:49:36 -0800 Subject: [PATCH 049/142] Added Endpoints. --- fdbclient/ClusterInterface.h | 37 +++++++++++++++++++++++++++- fdbserver/DataDistributorInterface.h | 22 +++++++++++++++-- 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/fdbclient/ClusterInterface.h b/fdbclient/ClusterInterface.h index d71e79c95c..89f8805f0d 100644 --- a/fdbclient/ClusterInterface.h +++ b/fdbclient/ClusterInterface.h @@ -38,6 +38,7 @@ struct ClusterInterface { RequestStream getClientWorkers; RequestStream forceRecovery; RequestStream moveShard; + RequestStream splitShard; RequestStream repairSystemData; bool operator==(ClusterInterface const& r) const { return id() == r.id(); } @@ -49,7 +50,8 @@ struct ClusterInterface { return openDatabase.getFuture().isReady() || failureMonitoring.getFuture().isReady() || databaseStatus.getFuture().isReady() || ping.getFuture().isReady() || getClientWorkers.getFuture().isReady() || forceRecovery.getFuture().isReady() || - moveShard.getFuture().isReady() || repairSystemData.getFuture().isReady(); + moveShard.getFuture().isReady() || splitShard.getFuture().isReady() || + repairSystemData.getFuture().isReady(); } void initEndpoints() { @@ -60,6 +62,7 @@ struct ClusterInterface { getClientWorkers.getEndpoint(TaskPriority::ClusterController); forceRecovery.getEndpoint(TaskPriority::ClusterController); moveShard.getEndpoint(TaskPriority::ClusterController); + splitShard.getEndpoint(TaskPriority::ClusterController); repairSystemData.getEndpoint(TaskPriority::ClusterController); } @@ -73,6 +76,7 @@ struct ClusterInterface { getClientWorkers, forceRecovery, moveShard, + splitShard, repairSystemData); } }; @@ -273,6 +277,37 @@ struct MoveShardRequest { } }; +// Returns the actual shards generated by the SplitShardRequest. +struct SplitShardReply { + constexpr static FileIdentifier file_identifier = 1384440; + std::vector shards; + + SplitShardReply() {} + explicit SplitShardReply(std::vector shards) : shards{ std::move(shards) } {} + + template + void serialize(Ar& ar) { + serializer(ar, shards); + } +}; + +// Split keyrange [shard.begin, shard.end) into num shards. +// Split points are chosen as the arithmeticlly equal division points of the given range. +struct SplitShardRequest { + constexpr static FileIdentifier file_identifier = 1384443; + KeyRange shard; + int num; + ReplyPromise reply; + + SplitShardRequest() : num(0) {} + SplitShardRequest(KeyRange shard, int num) : shard{ std::move(shard) }, num(num) {} + + template + void serialize(Ar& ar) { + serializer(ar, shard, num, reply); + } +}; + // Request to trigger a master recovery, and during the following recovery, the system metadata will be // reconstructed from TLogs, and written to a new SS team. // This is used when metadata on SSes are lost or corrupted. diff --git a/fdbserver/DataDistributorInterface.h b/fdbserver/DataDistributorInterface.h index 0bb032cde8..ad3c3c1bfb 100644 --- a/fdbserver/DataDistributorInterface.h +++ b/fdbserver/DataDistributorInterface.h @@ -21,9 +21,10 @@ #ifndef FDBSERVER_DATADISTRIBUTORINTERFACE_H #define FDBSERVER_DATADISTRIBUTORINTERFACE_H -#include "fdbrpc/fdbrpc.h" -#include "fdbrpc/Locality.h" +#include "fdbclient/ClusterInterface.h" #include "fdbclient/FDBTypes.h" +#include "fdbrpc/Locality.h" +#include "fdbrpc/fdbrpc.h" struct DataDistributorInterface { constexpr static FileIdentifier file_identifier = 12383874; @@ -33,6 +34,7 @@ struct DataDistributorInterface { UID myId; RequestStream distributorSnapReq; RequestStream distributorExclCheckReq; + RequestStream distributorSplitRange; RequestStream dataDistributorMetrics; DataDistributorInterface() {} @@ -53,6 +55,7 @@ struct DataDistributorInterface { myId, distributorSnapReq, distributorExclCheckReq, + distributorSplitRange, dataDistributorMetrics); } }; @@ -147,4 +150,19 @@ struct DistributorExclusionSafetyCheckRequest { } }; +// Insert split points, and distribute the resulted shards to different teams. +struct DistributorSplitRangeRequest { + constexpr static FileIdentifier file_identifier = 1384441; + std::vector splitPoints; + ReplyPromise reply; + + DistributorSplitRangeRequest() {} + explicit DistributorSplitRangeRequest(std::vector splitPoints) : splitPoints{ std::move(splitPoints) } {} + + template + void serialize(Ar& ar) { + serializer(ar, splitPoints, reply); + } +}; + #endif // FDBSERVER_DATADISTRIBUTORINTERFACE_H From d9965e0e5ae444e9f60cff46a0256a480c1fbc74 Mon Sep 17 00:00:00 2001 From: Renxuan Wang Date: Thu, 11 Nov 2021 15:13:39 -0800 Subject: [PATCH 050/142] Add DNS mock in SimExternalConnection. This is a substep in supporting hostnames in cluster files. So that in simulation, we can add mappings to mock DNS. --- fdbrpc/SimExternalConnection.actor.cpp | 91 ++++++++++++++++++++++++++ fdbrpc/SimExternalConnection.h | 21 ++++++ 2 files changed, 112 insertions(+) diff --git a/fdbrpc/SimExternalConnection.actor.cpp b/fdbrpc/SimExternalConnection.actor.cpp index a3a7c63101..7fae5cf522 100644 --- a/fdbrpc/SimExternalConnection.actor.cpp +++ b/fdbrpc/SimExternalConnection.actor.cpp @@ -67,6 +67,49 @@ public: } }; +bool MockDNS::findMockTCPEndpoint(const std::string& host, const std::string& service) { + std::string hostname = host + ":" + service; + return hostnameToAddresses.find(hostname) != hostnameToAddresses.end(); +} + +void MockDNS::addMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses) { + if (findMockTCPEndpoint(host, service)) { + throw operation_failed(); + } + hostnameToAddresses[host + ":" + service] = addresses; +} + +void MockDNS::updateMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses) { + if (!findMockTCPEndpoint(host, service)) { + throw operation_failed(); + } + hostnameToAddresses[host + ":" + service] = addresses; +} + +void MockDNS::removeMockTCPEndpoint(const std::string& host, const std::string& service) { + if (!findMockTCPEndpoint(host, service)) { + throw operation_failed(); + } + hostnameToAddresses.erase(host + ":" + service); +} + +std::vector MockDNS::getTCPEndpoint(const std::string& host, const std::string& service) { + if (!findMockTCPEndpoint(host, service)) { + throw operation_failed(); + } + return hostnameToAddresses[host + ":" + service]; +} + +void MockDNS::clearMockTCPEndpoints() { + hostnameToAddresses.clear(); +} + +MockDNS SimExternalConnection::mockDNS; + void SimExternalConnection::close() { socket.close(); } @@ -152,6 +195,9 @@ ACTOR static Future> resolveTCPEndpointImpl(std::str Future> SimExternalConnection::resolveTCPEndpoint(const std::string& host, const std::string& service) { + if (mockDNS.findMockTCPEndpoint(host, service)) { + return mockDNS.getTCPEndpoint(host, service); + } return resolveTCPEndpointImpl(host, service); } @@ -212,4 +258,49 @@ TEST_CASE("fdbrpc/SimExternalClient") { return Void(); } +TEST_CASE("fdbrpc/MockTCPEndpoints") { + state MockDNS mockDNS; + state std::vector networkAddresses; + state NetworkAddress address1(IPAddress(0x13131313), 1); + state NetworkAddress address2(IPAddress(0x14141414), 2); + networkAddresses.push_back(address1); + networkAddresses.push_back(address2); + mockDNS.addMockTCPEndpoint("testhost1", "testport1", networkAddresses); + ASSERT(mockDNS.findMockTCPEndpoint("testhost1", "testport1")); + ASSERT(mockDNS.findMockTCPEndpoint("testhost1", "testport2") == false); + std::vector resolvedNetworkAddresses = mockDNS.getTCPEndpoint("testhost1", "testport1"); + ASSERT(resolvedNetworkAddresses.size() == 2); + ASSERT(std::find(resolvedNetworkAddresses.begin(), resolvedNetworkAddresses.end(), address1) != + resolvedNetworkAddresses.end()); + ASSERT(std::find(resolvedNetworkAddresses.begin(), resolvedNetworkAddresses.end(), address2) != + resolvedNetworkAddresses.end()); + // Adding a hostname twice should fail. + try { + mockDNS.addMockTCPEndpoint("testhost1", "testport1", networkAddresses); + } catch (Error& e) { + ASSERT(e.code() == error_code_operation_failed); + } + // Updating an unexisted hostname should fail. + try { + mockDNS.updateMockTCPEndpoint("testhost2", "testport2", networkAddresses); + } catch (Error& e) { + ASSERT(e.code() == error_code_operation_failed); + } + // Removing an unexisted hostname should fail. + try { + mockDNS.removeMockTCPEndpoint("testhost2", "testport2"); + } catch (Error& e) { + ASSERT(e.code() == error_code_operation_failed); + } + mockDNS.clearMockTCPEndpoints(); + // Updating any hostname right after clearing endpoints should fail. + try { + mockDNS.updateMockTCPEndpoint("testhost1", "testport1", networkAddresses); + } catch (Error& e) { + ASSERT(e.code() == error_code_operation_failed); + } + + return Void(); +} + void forceLinkSimExternalConnectionTests() {} diff --git a/fdbrpc/SimExternalConnection.h b/fdbrpc/SimExternalConnection.h index fc1b485a66..9ce18340bf 100644 --- a/fdbrpc/SimExternalConnection.h +++ b/fdbrpc/SimExternalConnection.h @@ -28,6 +28,24 @@ #include +// MockDNS is a class maintaining a > mapping, mocking a DNS in simulation. +class MockDNS { +public: + bool findMockTCPEndpoint(const std::string& host, const std::string& service); + void addMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses); + void updateMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses); + void removeMockTCPEndpoint(const std::string& host, const std::string& service); + void clearMockTCPEndpoints(); + std::vector getTCPEndpoint(const std::string& host, const std::string& service); + +private: + std::map> hostnameToAddresses; +}; + class SimExternalConnection final : public IConnection, public ReferenceCounted { boost::asio::ip::tcp::socket socket; SimExternalConnection(boost::asio::ip::tcp::socket&& socket); @@ -50,6 +68,9 @@ public: UID getDebugID() const override; static Future> resolveTCPEndpoint(const std::string& host, const std::string& service); static Future> connect(NetworkAddress toAddr); + +private: + static MockDNS mockDNS; }; #endif From 9f4a63dacfb88c19778dcd40b6a88029f829a665 Mon Sep 17 00:00:00 2001 From: He Liu Date: Thu, 11 Nov 2021 17:45:08 -0800 Subject: [PATCH 051/142] Adjusted endpoints order to be compatible. --- fdbclient/ClusterInterface.h | 44 ++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/fdbclient/ClusterInterface.h b/fdbclient/ClusterInterface.h index 89f8805f0d..01ac351f61 100644 --- a/fdbclient/ClusterInterface.h +++ b/fdbclient/ClusterInterface.h @@ -38,8 +38,8 @@ struct ClusterInterface { RequestStream getClientWorkers; RequestStream forceRecovery; RequestStream moveShard; - RequestStream splitShard; RequestStream repairSystemData; + RequestStream splitShard; bool operator==(ClusterInterface const& r) const { return id() == r.id(); } bool operator!=(ClusterInterface const& r) const { return id() != r.id(); } @@ -50,8 +50,8 @@ struct ClusterInterface { return openDatabase.getFuture().isReady() || failureMonitoring.getFuture().isReady() || databaseStatus.getFuture().isReady() || ping.getFuture().isReady() || getClientWorkers.getFuture().isReady() || forceRecovery.getFuture().isReady() || - moveShard.getFuture().isReady() || splitShard.getFuture().isReady() || - repairSystemData.getFuture().isReady(); + moveShard.getFuture().isReady() || repairSystemData.getFuture().isReady() || + splitShard.getFuture().isReady(); } void initEndpoints() { @@ -62,8 +62,8 @@ struct ClusterInterface { getClientWorkers.getEndpoint(TaskPriority::ClusterController); forceRecovery.getEndpoint(TaskPriority::ClusterController); moveShard.getEndpoint(TaskPriority::ClusterController); - splitShard.getEndpoint(TaskPriority::ClusterController); repairSystemData.getEndpoint(TaskPriority::ClusterController); + splitShard.getEndpoint(TaskPriority::ClusterController); } template @@ -76,8 +76,8 @@ struct ClusterInterface { getClientWorkers, forceRecovery, moveShard, - splitShard, - repairSystemData); + repairSystemData, + splitShard); } }; @@ -277,6 +277,22 @@ struct MoveShardRequest { } }; +// Request to trigger a master recovery, and during the following recovery, the system metadata will be +// reconstructed from TLogs, and written to a new SS team. +// This is used when metadata on SSes are lost or corrupted. +struct RepairSystemDataRequest { + constexpr static FileIdentifier file_identifier = 2799593; + + ReplyPromise reply; + + RepairSystemDataRequest() {} + + template + void serialize(Ar& ar) { + serializer(ar, reply); + } +}; + // Returns the actual shards generated by the SplitShardRequest. struct SplitShardReply { constexpr static FileIdentifier file_identifier = 1384440; @@ -307,20 +323,4 @@ struct SplitShardRequest { serializer(ar, shard, num, reply); } }; - -// Request to trigger a master recovery, and during the following recovery, the system metadata will be -// reconstructed from TLogs, and written to a new SS team. -// This is used when metadata on SSes are lost or corrupted. -struct RepairSystemDataRequest { - constexpr static FileIdentifier file_identifier = 2799593; - - ReplyPromise reply; - - RepairSystemDataRequest() {} - - template - void serialize(Ar& ar) { - serializer(ar, reply); - } -}; #endif From d73d2144fdf7b5e02ea39133ebc86dc52be9334f Mon Sep 17 00:00:00 2001 From: He Liu Date: Thu, 11 Nov 2021 20:28:55 -0800 Subject: [PATCH 052/142] Adjust distributorSplitRange order. --- fdbserver/DataDistributorInterface.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fdbserver/DataDistributorInterface.h b/fdbserver/DataDistributorInterface.h index ad3c3c1bfb..5db806eb46 100644 --- a/fdbserver/DataDistributorInterface.h +++ b/fdbserver/DataDistributorInterface.h @@ -34,8 +34,8 @@ struct DataDistributorInterface { UID myId; RequestStream distributorSnapReq; RequestStream distributorExclCheckReq; - RequestStream distributorSplitRange; RequestStream dataDistributorMetrics; + RequestStream distributorSplitRange; DataDistributorInterface() {} explicit DataDistributorInterface(const struct LocalityData& l, UID id) : locality(l), myId(id) {} @@ -55,8 +55,8 @@ struct DataDistributorInterface { myId, distributorSnapReq, distributorExclCheckReq, - distributorSplitRange, - dataDistributorMetrics); + dataDistributorMetrics, + distributorSplitRange); } }; From 9dccb0131eebad0003bfbecfd8447a53b441677e Mon Sep 17 00:00:00 2001 From: Daniel Smith Date: Fri, 12 Nov 2021 12:14:12 -0500 Subject: [PATCH 053/142] Clean up RocksDB error logging --- fdbserver/KeyValueStoreRocksDB.actor.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fdbserver/KeyValueStoreRocksDB.actor.cpp b/fdbserver/KeyValueStoreRocksDB.actor.cpp index cd15108eb3..635709960e 100644 --- a/fdbserver/KeyValueStoreRocksDB.actor.cpp +++ b/fdbserver/KeyValueStoreRocksDB.actor.cpp @@ -206,7 +206,8 @@ ACTOR Future rocksDBMetricLogger(std::shared_ptr stat } void logRocksDBError(const rocksdb::Status& status, const std::string& method) { - TraceEvent e(SevError, "RocksDBError"); + auto level = status.IsTimedOut() ? SevWarn : SevError; + TraceEvent e(level, "RocksDBError"); e.detail("Error", status.ToString()).detail("Method", method).detail("RocksDBSeverity", status.severity()); if (status.IsIOError()) { e.detail("SubCode", status.subcode()); @@ -414,7 +415,7 @@ struct RocksDBKeyValueStore : IKeyValueStore { } else if (s.IsNotFound()) { a.result.send(Optional()); } else { - TraceEvent(SevError, "RocksDBError").detail("Error", s.ToString()).detail("Method", "ReadValue"); + logRocksDBError(s, "ReadValue"); a.result.sendError(statusToError(s)); } } From 2d5f92427848a7221d6584f155b999dd7a916df0 Mon Sep 17 00:00:00 2001 From: Tao Lin Date: Fri, 12 Nov 2021 09:35:24 -0800 Subject: [PATCH 054/142] GetKeyValuesAndFlatMap should return error if not retriable --- fdbserver/storageserver.actor.cpp | 20 ++++++++++++++--- .../workloads/IndexPrefetchDemo.actor.cpp | 22 +++++++++++++++---- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 2156303f15..d07be2bb95 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -85,6 +85,13 @@ bool canReplyWith(Error e) { case error_code_watch_cancelled: case error_code_unknown_change_feed: case error_code_server_overloaded: + // getRangeAndMap related exceptions that are not retriable: + case error_code_mapper_bad_index: + case error_code_mapper_no_such_key: + case error_code_mapper_bad_range_decriptor: + case error_code_quick_get_key_values_has_more: + case error_code_quick_get_value_miss: + case error_code_quick_get_key_values_miss: // case error_code_all_alternatives_failed: return true; default: @@ -2868,11 +2875,18 @@ ACTOR Future getKeyValuesAndFlatMapQ(StorageServer* data, GetKeyValuesAndF } else { state int remainingLimitBytes = req.limitBytes; - GetKeyValuesReply _r = wait( + GetKeyValuesReply getKeyValuesReply = wait( readRange(data, version, KeyRangeRef(begin, end), req.limit, &remainingLimitBytes, span.context, type)); - // Map the scanned range to another list of keys and look up. - state GetKeyValuesAndFlatMapReply r = wait(flatMap(data, _r, req.mapper)); + state GetKeyValuesAndFlatMapReply r; + try { + // Map the scanned range to another list of keys and look up. + GetKeyValuesAndFlatMapReply _r = wait(flatMap(data, getKeyValuesReply, req.mapper)); + r = _r; + } catch (Error& e) { + TraceEvent("FlatMapError").error(e); + throw; + } if (req.debugID.present()) g_traceBatch.addEvent("TransactionDebug", diff --git a/fdbserver/workloads/IndexPrefetchDemo.actor.cpp b/fdbserver/workloads/IndexPrefetchDemo.actor.cpp index 87247b0a11..002b766584 100644 --- a/fdbserver/workloads/IndexPrefetchDemo.actor.cpp +++ b/fdbserver/workloads/IndexPrefetchDemo.actor.cpp @@ -35,6 +35,7 @@ const KeyRef INDEX = "INDEX"_sr; struct IndexPrefetchDemoWorkload : TestWorkload { bool enabled; + const bool BAD_MAPPER = deterministicRandom()->random01() < 0.1; IndexPrefetchDemoWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { enabled = !clientId; // only do this on the "first" client @@ -97,7 +98,7 @@ struct IndexPrefetchDemoWorkload : TestWorkload { return Void(); } - ACTOR Future scanRangeAndFlatMap(Database cx, KeyRange range, Key mapper) { + ACTOR Future scanRangeAndFlatMap(Database cx, KeyRange range, Key mapper, IndexPrefetchDemoWorkload* self) { std::cout << "start scanRangeAndFlatMap " << range.toString() << std::endl; // TODO: When n is large, split into multiple transactions. state Transaction tr(cx); @@ -109,17 +110,26 @@ struct IndexPrefetchDemoWorkload : TestWorkload { mapper, GetRangeLimits(CLIENT_KNOBS->TOO_MANY))); showResult(result); + if (self->BAD_MAPPER) { + TraceEvent("IndexPrefetchDemoWorkloadShouldNotReachable").detail("ResultSize", result.size()); + } // result size: 2 // key=\x01prefix\x00\x01RECORD\x00\x01primary-key-of-record-2\x00, value=\x01data-of-record-2\x00 // key=\x01prefix\x00\x01RECORD\x00\x01primary-key-of-record-3\x00, value=\x01data-of-record-3\x00 } catch (Error& e) { - wait(tr.onError(e)); + if (self->BAD_MAPPER && e.code() == error_code_mapper_bad_index) { + TraceEvent("IndexPrefetchDemoWorkloadBadMapperDetected").error(e); + } else { + wait(tr.onError(e)); + } } std::cout << "finished scanRangeAndFlatMap" << std::endl; return Void(); } ACTOR Future _start(Database cx, IndexPrefetchDemoWorkload* self) { + TraceEvent("IndexPrefetchDemoWorkloadConfig").detail("BadMapper", self->BAD_MAPPER); + // TODO: Use toml to config wait(self->fillInRecords(cx, 5)); @@ -131,9 +141,13 @@ struct IndexPrefetchDemoWorkload : TestWorkload { wait(self->scanRange(cx, someIndexes)); Tuple mapperTuple; - mapperTuple << prefix << RECORD << "{K[3]}"_sr; + if (self->BAD_MAPPER) { + mapperTuple << prefix << RECORD << "{K[xxx]}"_sr; + } else { + mapperTuple << prefix << RECORD << "{K[3]}"_sr; + } Key mapper = mapperTuple.getDataAsStandalone(); - wait(self->scanRangeAndFlatMap(cx, someIndexes, mapper)); + wait(self->scanRangeAndFlatMap(cx, someIndexes, mapper, self)); return Void(); } From 82c3e8bf798e42b6062d33ff4e450e3840cbe2cc Mon Sep 17 00:00:00 2001 From: Ata E Husain Bohra Date: Fri, 12 Nov 2021 09:41:01 -0800 Subject: [PATCH 055/142] Trigger buildTeam operation if server transition from unhealthy -> healthy (#5930) * Trigger buildTeam operation if server transition from unhealthy -> healthy DataDistribution actor helps in building teams as server count changes (add/removal), however, it is possible that total_healthy_server count is insufficient to allow team formation. If happens, even healthy server count recover, the buildTeam operation will not be triggered. Patch proposal is to trigger `checkBuildTeam` operation if server transitions from unhealthy -> healthy state. Incase system already has created enough teams (desiredTeamCount/maxTeamCount), the operation incurs a very minimal cost. --- fdbserver/DataDistribution.actor.cpp | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index 84f13137a7..583aeb46bb 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -4641,12 +4641,18 @@ ACTOR Future storageServerFailureTracker(DDTeamCollection* self, self->healthyZone.set(Optional()); } } + if (!status->isUnhealthy()) { + // On server transistion from unhealthy -> healthy, trigger buildTeam check, + // handles scenario when team building failed due to insufficient healthy servers. + // Operaton cost is minimal if currentTeamCount == desiredTeamCount/maxTeamCount. + self->doBuildTeams = true; + } - // TraceEvent("StatusMapChange", self->distributorId) - // .detail("ServerID", interf.id()) - // .detail("Status", status->toString()) - // .detail("Available", - // IFailureMonitor::failureMonitor().getState(interf.waitFailure.getEndpoint()).isAvailable()); + TraceEvent(SevDebug, "StatusMapChange", self->distributorId) + .detail("ServerID", interf.id()) + .detail("Status", status->toString()) + .detail("Available", + IFailureMonitor::failureMonitor().getState(interf.waitFailure.getEndpoint()).isAvailable()); } when(wait(status->isUnhealthy() ? waitForAllDataRemoved(cx, interf.id(), addedVersion, self) : Never())) { break; From d129b726e1c07227ba9052435f66b116f2b75804 Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Fri, 12 Nov 2021 10:12:49 -0800 Subject: [PATCH 056/142] Don't use assertion for bad option usage; don't disable local client until setup network is called. --- fdbclient/MultiVersionTransaction.actor.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/fdbclient/MultiVersionTransaction.actor.cpp b/fdbclient/MultiVersionTransaction.actor.cpp index 4e756cf196..fb2fc184ea 100644 --- a/fdbclient/MultiVersionTransaction.actor.cpp +++ b/fdbclient/MultiVersionTransaction.actor.cpp @@ -1784,16 +1784,15 @@ void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option, } else if (option == FDBNetworkOptions::CLIENT_THREADS_PER_VERSION) { MutexHolder holder(lock); validateOption(value, true, false, false); - ASSERT(!networkStartSetup); + if (networkStartSetup) { + throw invalid_option(); + } #if defined(__unixish__) threadCount = extractIntOption(value, 1, 1024); #else // multiple client threads are not supported on windows. threadCount = extractIntOption(value, 1, 1); #endif - if (threadCount > 1) { - disableLocalClient(); - } } else { MutexHolder holder(lock); localClient->api->setNetworkOption(option, value); @@ -1821,6 +1820,10 @@ void MultiVersionApi::setupNetwork() { throw network_already_setup(); } + if (threadCount > 1) { + disableLocalClient(); + } + for (auto i : externalClientDescriptions) { std::string path = i.second.libPath; std::string filename = basename(path); From e752fdd69ffb283de7a7d99a79a9ba77024ad33d Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Fri, 12 Nov 2021 11:45:21 -0800 Subject: [PATCH 057/142] Optimization in FlowTransport's use of XXHash to avoid a malloc() when using the stream API and to not use the stream API when a message being sent is contiguous within a single packet buffer block. (#5970) --- fdbrpc/FlowTransport.actor.cpp | 46 ++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/fdbrpc/FlowTransport.actor.cpp b/fdbrpc/FlowTransport.actor.cpp index 7c58bc1c7f..986ac49a75 100644 --- a/fdbrpc/FlowTransport.actor.cpp +++ b/fdbrpc/FlowTransport.actor.cpp @@ -40,6 +40,7 @@ #include "flow/ObjectSerializer.h" #include "flow/ProtocolVersion.h" #include "flow/UnitTest.h" +#define XXH_INLINE_ALL #include "flow/xxhash.h" #include "flow/actorcompiler.h" // This must be the last #include. @@ -1589,16 +1590,17 @@ static ReliablePacket* sendPacket(TransportData* self, // Reserve some space for packet length and checksum, write them after serializing data SplitBuffer packetInfoBuffer; uint32_t len; - XXH64_hash_t checksum = 0; - XXH3_state_t* checksumState = nullptr; + + // This is technically abstraction breaking but avoids XXH3_createState() and XXH3_freeState() which are just + // malloc/free + XXH3_state_t checksumState; + // Checksum will be calculated with buffer API if contiguous, else using stream API. Mode is tracked here. + bool checksumStream = false; + XXH64_hash_t checksum; int packetInfoSize = PACKET_LEN_WIDTH; if (checksumEnabled) { packetInfoSize += sizeof(checksum); - checksumState = XXH3_createState(); - if (XXH3_64bits_reset(checksumState) != XXH_OK) { - throw internal_error(); - } } wr.writeAhead(packetInfoSize, &packetInfoBuffer); @@ -1620,19 +1622,37 @@ static ReliablePacket* sendPacket(TransportData* self, while (checksumUnprocessedLength > 0) { uint32_t processLength = std::min(checksumUnprocessedLength, (uint32_t)(checksumPb->bytes_written - prevBytesWritten)); - // This won't fail if inputs are non null - if (XXH3_64bits_update(checksumState, checksumPb->data() + prevBytesWritten, processLength) != XXH_OK) { - throw internal_error(); + + // If not in checksum stream mode yet + if (!checksumStream) { + // If there is nothing left to process then calculate checksum directly + if (processLength == checksumUnprocessedLength) { + checksum = XXH3_64bits(checksumPb->data() + prevBytesWritten, processLength); + } else { + // Otherwise, initialize checksum state and switch to stream mode + if (XXH3_64bits_reset(&checksumState) != XXH_OK) { + throw internal_error(); + } + checksumStream = true; + } } + + // If in checksum stream mode, update the checksum state + if (checksumStream) { + if (XXH3_64bits_update(&checksumState, checksumPb->data() + prevBytesWritten, processLength) != + XXH_OK) { + throw internal_error(); + } + } + checksumUnprocessedLength -= processLength; checksumPb = checksumPb->nextPacketBuffer(); prevBytesWritten = 0; } - checksum = XXH3_64bits_digest(checksumState); - // This always returns OK - if (XXH3_freeState(checksumState) != XXH_OK) { - throw internal_error(); + // If in checksum stream mode, get the final checksum + if (checksumStream) { + checksum = XXH3_64bits_digest(&checksumState); } } From 2f2e5219daeb854787520ffff5fbef3ed8a81e80 Mon Sep 17 00:00:00 2001 From: John Brownlee Date: Fri, 12 Nov 2021 11:58:38 -0800 Subject: [PATCH 058/142] Move the config file scheme into a separate package so it can be imported by the operator. Move the argument for the number of servers per pod into a command-line argument so we can use the same config file at different storage server densities. --- fdbkubernetesmonitor/README.md | 4 ++-- .../{ => api}/.testdata/default_config.json | 1 - .../{ => api}/.testdata/fdb.cluster | 0 .../{ => api}/.testdata/test_env.sh | 0 fdbkubernetesmonitor/{ => api}/config.go | 8 ++++--- fdbkubernetesmonitor/{ => api}/config_test.go | 2 +- fdbkubernetesmonitor/kubernetes.go | 3 ++- fdbkubernetesmonitor/main.go | 4 +++- fdbkubernetesmonitor/monitor.go | 22 ++++++++++++------- packaging/docker/kubernetes/Dockerfile | 2 +- packaging/docker/kubernetes/test_config.yaml | 4 ++-- 11 files changed, 30 insertions(+), 20 deletions(-) rename fdbkubernetesmonitor/{ => api}/.testdata/default_config.json (98%) rename fdbkubernetesmonitor/{ => api}/.testdata/fdb.cluster (100%) rename fdbkubernetesmonitor/{ => api}/.testdata/test_env.sh (100%) rename fdbkubernetesmonitor/{ => api}/config.go (95%) rename fdbkubernetesmonitor/{ => api}/config_test.go (99%) diff --git a/fdbkubernetesmonitor/README.md b/fdbkubernetesmonitor/README.md index b8a68a03ac..901b1c9c72 100644 --- a/fdbkubernetesmonitor/README.md +++ b/fdbkubernetesmonitor/README.md @@ -10,7 +10,7 @@ docker build -t foundationdb/foundationdb-kubernetes:6.3.15-local --build-arg FD kubectl apply -f packaging/docker/kubernetes/test_config.yaml # Wait for the pods to become ready ips=$(kubectl get pod -l app=fdb-kubernetes-example -o json | jq -j '[[.items|.[]|select(.status.podIP!="")]|limit(3;.[])|.status.podIP+":4501"]|join(",")') -sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"serverCount\": 0/\"serverCount\": 1/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f - +sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"runProcesses\": false/\"runProcesses\": true/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f - kubectl get pod -l app=fdb-kubernetes-example -o name | xargs -I {} kubectl annotate {} foundationdb.org/outdated-config-map-seen=$(date +%s) --overwrite # Watch the logs for the fdb-kubernetes-example pods to confirm that they have launched the fdbserver processes. kubectl exec -it sts/fdb-kubernetes-example -- fdbcli --exec "configure new double ssd" @@ -21,7 +21,7 @@ This will set up a cluster in your Kubernetes environment using a statefulset, t You can then make changes to the data in the config map and update the fdbserver processes: ```bash -sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"serverCount\": 0/\"serverCount\": 1/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f - +sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"runProcesses\": false/\"runProcesses\": true/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f - # You can apply an annotation to speed up the propagation of config kubectl get pod -l app=fdb-kubernetes-example -o name | xargs -I {} kubectl annotate {} foundationdb.org/outdated-config-map-seen=$(date +%s) --overwrite diff --git a/fdbkubernetesmonitor/.testdata/default_config.json b/fdbkubernetesmonitor/api/.testdata/default_config.json similarity index 98% rename from fdbkubernetesmonitor/.testdata/default_config.json rename to fdbkubernetesmonitor/api/.testdata/default_config.json index 60d337c4c5..d85d766f31 100644 --- a/fdbkubernetesmonitor/.testdata/default_config.json +++ b/fdbkubernetesmonitor/api/.testdata/default_config.json @@ -1,6 +1,5 @@ { "version": "6.3.15", - "serverCount": 1, "arguments": [ {"value": "--cluster_file"}, {"value": ".testdata/fdb.cluster"}, diff --git a/fdbkubernetesmonitor/.testdata/fdb.cluster b/fdbkubernetesmonitor/api/.testdata/fdb.cluster similarity index 100% rename from fdbkubernetesmonitor/.testdata/fdb.cluster rename to fdbkubernetesmonitor/api/.testdata/fdb.cluster diff --git a/fdbkubernetesmonitor/.testdata/test_env.sh b/fdbkubernetesmonitor/api/.testdata/test_env.sh similarity index 100% rename from fdbkubernetesmonitor/.testdata/test_env.sh rename to fdbkubernetesmonitor/api/.testdata/test_env.sh diff --git a/fdbkubernetesmonitor/config.go b/fdbkubernetesmonitor/api/config.go similarity index 95% rename from fdbkubernetesmonitor/config.go rename to fdbkubernetesmonitor/api/config.go index 2815dbd235..c44834987b 100644 --- a/fdbkubernetesmonitor/config.go +++ b/fdbkubernetesmonitor/api/config.go @@ -17,7 +17,7 @@ // limitations under the License. // -package main +package api import ( "fmt" @@ -31,8 +31,10 @@ type ProcessConfiguration struct { // Version provides the version of FoundationDB the process should run. Version string `json:"version"` - // ServerCount defines the number of processes to start. - ServerCount int `json:"serverCount,omitempty"` + // RunServers defines whether we should run the server processes. + // This defaults to true, but you can set it to false to prevent starting + // new fdbserver processes. + RunServers *bool `json:"runServers,omitempty"` // BinaryPath provides the path to the binary to launch. BinaryPath string `json:"-"` diff --git a/fdbkubernetesmonitor/config_test.go b/fdbkubernetesmonitor/api/config_test.go similarity index 99% rename from fdbkubernetesmonitor/config_test.go rename to fdbkubernetesmonitor/api/config_test.go index d0ea625807..bec9559638 100644 --- a/fdbkubernetesmonitor/config_test.go +++ b/fdbkubernetesmonitor/api/config_test.go @@ -17,7 +17,7 @@ // limitations under the License. // -package main +package api import ( "encoding/json" diff --git a/fdbkubernetesmonitor/kubernetes.go b/fdbkubernetesmonitor/kubernetes.go index 3378f75819..da4072aac1 100644 --- a/fdbkubernetesmonitor/kubernetes.go +++ b/fdbkubernetesmonitor/kubernetes.go @@ -27,6 +27,7 @@ import ( "path" "strconv" + "github.com/apple/foundationdb/fdbkubernetesmonitor/api" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -95,7 +96,7 @@ func CreatePodClient(logger logr.Logger) (*PodClient, error) { // retrieveEnvironmentVariables extracts the environment variables we have for // an argument into a map. -func retrieveEnvironmentVariables(argument Argument, target map[string]string) { +func retrieveEnvironmentVariables(argument api.Argument, target map[string]string) { if argument.Source != "" { target[argument.Source] = os.Getenv(argument.Source) } diff --git a/fdbkubernetesmonitor/main.go b/fdbkubernetesmonitor/main.go index 8bc1b57b09..d91493f3dd 100644 --- a/fdbkubernetesmonitor/main.go +++ b/fdbkubernetesmonitor/main.go @@ -51,6 +51,7 @@ var ( mainContainerVersion string currentContainerVersion string additionalEnvFile string + processCount int ) type executionMode string @@ -78,6 +79,7 @@ func main() { pflag.StringArrayVar(&requiredCopyFiles, "require-not-empty", nil, "When copying this file, exit with an error if the file is empty") pflag.StringVar(&mainContainerVersion, "main-container-version", "", "For sidecar mode, this specifies the version of the main container. If this is equal to the current container version, no files will be copied") pflag.StringVar(&additionalEnvFile, "additional-env-file", "", "A file with additional environment variables to use when interpreting the monitor configuration") + pflag.IntVar(&processCount, "process-count", 1, "The number of processes to start") pflag.Parse() zapConfig := zap.NewProductionConfig() @@ -110,7 +112,7 @@ func main() { logger.Error(err, "Error loading additional environment") os.Exit(1) } - StartMonitor(logger, fmt.Sprintf("%s/%s", inputDir, monitorConfFile), customEnvironment) + StartMonitor(logger, fmt.Sprintf("%s/%s", inputDir, monitorConfFile), customEnvironment, processCount) case executionModeInit: err = CopyFiles(logger, outputDir, copyDetails, requiredCopies) if err != nil { diff --git a/fdbkubernetesmonitor/monitor.go b/fdbkubernetesmonitor/monitor.go index 9ba3542971..c23f20e832 100644 --- a/fdbkubernetesmonitor/monitor.go +++ b/fdbkubernetesmonitor/monitor.go @@ -32,6 +32,7 @@ import ( "syscall" "time" + "github.com/apple/foundationdb/fdbkubernetesmonitor/api" "github.com/fsnotify/fsnotify" "github.com/go-logr/logr" ) @@ -52,7 +53,7 @@ type Monitor struct { CustomEnvironment map[string]string // ActiveConfiguration defines the active process configuration. - ActiveConfiguration *ProcessConfiguration + ActiveConfiguration *api.ProcessConfiguration // ActiveConfigurationBytes defines the source data for the active process // configuration. @@ -62,6 +63,9 @@ type Monitor struct { // configuration file. LastConfigurationTime time.Time + // ProcessCount defines how many processes the + ProcessCount int + // ProcessIDs stores the PIDs of the processes that are running. A PID of // zero will indicate that a process does not have a run loop. A PID of -1 // will indicate that a process has a run loop but is not currently running @@ -82,7 +86,7 @@ type Monitor struct { } // StartMonitor starts the monitor loop. -func StartMonitor(logger logr.Logger, configFile string, customEnvironment map[string]string) { +func StartMonitor(logger logr.Logger, configFile string, customEnvironment map[string]string, processCount int) { podClient, err := CreatePodClient(logger) if err != nil { panic(err) @@ -93,6 +97,7 @@ func StartMonitor(logger logr.Logger, configFile string, customEnvironment map[s PodClient: podClient, Logger: logger, CustomEnvironment: customEnvironment, + ProcessCount: processCount, } go func() { monitor.WatchPodTimestamps() }() @@ -107,7 +112,7 @@ func (monitor *Monitor) LoadConfiguration() { return } defer file.Close() - configuration := &ProcessConfiguration{} + configuration := &api.ProcessConfiguration{} configurationBytes, err := io.ReadAll(file) if err != nil { monitor.Logger.Error(err, "Error reading monitor configuration", "monitorConfigPath", monitor.ConfigFile) @@ -154,15 +159,15 @@ func checkOwnerExecutable(path string) error { // acceptConfiguration is called when the monitor process parses and accepts // a configuration from the local config file. -func (monitor *Monitor) acceptConfiguration(configuration *ProcessConfiguration, configurationBytes []byte) { +func (monitor *Monitor) acceptConfiguration(configuration *api.ProcessConfiguration, configurationBytes []byte) { monitor.Mutex.Lock() defer monitor.Mutex.Unlock() monitor.Logger.Info("Received new configuration file", "configuration", configuration) if monitor.ProcessIDs == nil { - monitor.ProcessIDs = make([]int, configuration.ServerCount+1) + monitor.ProcessIDs = make([]int, monitor.ProcessCount+1) } else { - for len(monitor.ProcessIDs) <= configuration.ServerCount { + for len(monitor.ProcessIDs) <= monitor.ProcessCount { monitor.ProcessIDs = append(monitor.ProcessIDs, 0) } } @@ -171,7 +176,7 @@ func (monitor *Monitor) acceptConfiguration(configuration *ProcessConfiguration, monitor.ActiveConfigurationBytes = configurationBytes monitor.LastConfigurationTime = time.Now() - for processNumber := 1; processNumber <= configuration.ServerCount; processNumber++ { + for processNumber := 1; processNumber <= monitor.ProcessCount; processNumber++ { if monitor.ProcessIDs[processNumber] == 0 { monitor.ProcessIDs[processNumber] = -1 tempNumber := processNumber @@ -284,7 +289,8 @@ func (monitor *Monitor) checkProcessRequired(processNumber int) bool { monitor.Mutex.Lock() defer monitor.Mutex.Unlock() logger := monitor.Logger.WithValues("processNumber", processNumber, "area", "checkProcessRequired") - if monitor.ActiveConfiguration.ServerCount < processNumber { + runProcesses := monitor.ActiveConfiguration.RunServers + if monitor.ProcessCount < processNumber || (runProcesses != nil && !*runProcesses) { logger.Info("Terminating run loop") monitor.ProcessIDs[processNumber] = 0 return false diff --git a/packaging/docker/kubernetes/Dockerfile b/packaging/docker/kubernetes/Dockerfile index 58ef7e998a..de529138b7 100644 --- a/packaging/docker/kubernetes/Dockerfile +++ b/packaging/docker/kubernetes/Dockerfile @@ -26,7 +26,7 @@ FROM golang:1.16.7-bullseye AS go-build COPY fdbkubernetesmonitor/ /fdbkubernetesmonitor WORKDIR /fdbkubernetesmonitor -RUN go build -o /fdb-kubernetes-monitor ./... +RUN go build -o /fdb-kubernetes-monitor *.go # Build the main image diff --git a/packaging/docker/kubernetes/test_config.yaml b/packaging/docker/kubernetes/test_config.yaml index 1f43b7dd3e..4bf58ec618 100644 --- a/packaging/docker/kubernetes/test_config.yaml +++ b/packaging/docker/kubernetes/test_config.yaml @@ -138,7 +138,7 @@ data: fdb.cluster: "" config.json: | { - "serverCount": 0, + "runProcesses": false, "version": "6.3.13", "arguments": [ {"value": "--cluster_file"}, @@ -239,7 +239,7 @@ spec: emptyDir: {} initContainers: - name: foundationdb-kubernetes-init - image: foundationdb/foundationdb-kubernetes:latest + image: foundationdb/foundationdb-kubernetes:6.3.13-local imagePullPolicy: IfNotPresent args: - "--mode" From 508429f30d9a332381373f38643d1426f106e4c6 Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Fri, 12 Nov 2021 13:47:07 -0800 Subject: [PATCH 059/142] Redwood chunked file growth and low priority IO starvation prevention (#5936) * Redwood files now growth in large page chunks controlled by a knob to reduce truncate() calls for expansion. PriorityMultiLock has limit on consecutive same-priority lock release. Increased Redwood max priority level to 3 for more separation at higher BTree levels. * Simulation fix, don't mark certain IO timeout errors as injected unless the simulated process has been set to have an unreliable disk. * Pager writes now truncate gradually upward, one chunk at a time, in response to writes, which wait on only the necessary truncate operations. Increased buggified chunk size because truncate can be very slow in simulation. * In simulation, ioTimeoutError() and ioDegradedOrTimeoutError() will wait until at least the target timeout interval past the point when simulation is sped up. * PriorityMultiLock::toString() prints more info and is now public. * Added queued time to PriorityMultiLock. * Bug fix to handle when speedUpSimulation changes later than the configured time. * Refactored mutation application in leaf nodes to do fewer comparisons and do in place value updates if the new value is the same size as the old value. * Renamed updatingInPlace to updatingDeltaTree for clarity. Inlined switchToLinearMerge() since it is only used in one place. * Updated extendToCover to be more clear by passing in the old extension future as a parameter. Fixed initialization warning. --- fdbclient/ServerKnobs.cpp | 1 + fdbclient/ServerKnobs.h | 1 + fdbrpc/genericactors.actor.cpp | 3 +- fdbserver/VersionedBTree.actor.cpp | 175 ++++++++++++++++++++++------- fdbserver/WorkerInterface.actor.h | 60 ++++++++++ fdbserver/tester.actor.cpp | 2 +- flow/Knobs.cpp | 1 + flow/Knobs.h | 1 + flow/genericactors.actor.h | 47 -------- 9 files changed, 201 insertions(+), 90 deletions(-) diff --git a/fdbclient/ServerKnobs.cpp b/fdbclient/ServerKnobs.cpp index f8368f1619..80c3379adf 100644 --- a/fdbclient/ServerKnobs.cpp +++ b/fdbclient/ServerKnobs.cpp @@ -771,6 +771,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi init( REDWOOD_LAZY_CLEAR_MAX_PAGES, 1e6 ); init( REDWOOD_REMAP_CLEANUP_WINDOW, 50 ); init( REDWOOD_REMAP_CLEANUP_LAG, 0.1 ); + init( REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES, 20000 ); if( randomize && BUGGIFY ) { REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES = deterministicRandom()->randomInt(200, 1000); } init( REDWOOD_METRICS_INTERVAL, 5.0 ); init( REDWOOD_HISTOGRAM_INTERVAL, 30.0 ); diff --git a/fdbclient/ServerKnobs.h b/fdbclient/ServerKnobs.h index 33d528adba..add4bf5303 100644 --- a/fdbclient/ServerKnobs.h +++ b/fdbclient/ServerKnobs.h @@ -724,6 +724,7 @@ public: int64_t REDWOOD_REMAP_CLEANUP_WINDOW; // Remap remover lag interval in which to coalesce page writes double REDWOOD_REMAP_CLEANUP_LAG; // Maximum allowed remap remover lag behind the cleanup window as a multiple of // the window size + int REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES; // Number of pages to grow page file by double REDWOOD_METRICS_INTERVAL; double REDWOOD_HISTOGRAM_INTERVAL; diff --git a/fdbrpc/genericactors.actor.cpp b/fdbrpc/genericactors.actor.cpp index c714e4329c..e99d951fc8 100644 --- a/fdbrpc/genericactors.actor.cpp +++ b/fdbrpc/genericactors.actor.cpp @@ -25,9 +25,8 @@ #include "flow/actorcompiler.h" ACTOR Future disableConnectionFailuresAfter(double time, std::string context) { - wait(delay(time)); - if (g_network->isSimulated()) { + wait(delayUntil(time)); g_simulator.connectionFailuresDisableDuration = 1e6; g_simulator.speedUpSimulation = true; TraceEvent(SevWarnAlways, ("DisableConnectionFailures_" + context).c_str()); diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 3cd5b1af9d..2ae64a8587 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -128,8 +128,13 @@ public: }; private: - typedef Promise Slot; - typedef Deque Queue; + struct Waiter { + Waiter() : queuedTime(now()) {} + Promise lockPromise; + double queuedTime; + }; + + typedef Deque Queue; #if PRIORITYMULTILOCK_DEBUG #define prioritylock_printf(...) printf(__VA_ARGS__) @@ -138,7 +143,8 @@ private: #endif public: - PriorityMultiLock(int concurrency, int maxPriority) : concurrency(concurrency), available(concurrency), waiting(0) { + PriorityMultiLock(int concurrency, int maxPriority, int launchLimit = std::numeric_limits::max()) + : concurrency(concurrency), available(concurrency), waiting(0), launchLimit(launchLimit) { waiters.resize(maxPriority + 1); fRunner = runner(this); } @@ -157,11 +163,37 @@ public: return p; } - Slot s; - waiters[priority].push_back(s); + Waiter w; + waiters[priority].push_back(w); ++waiting; prioritylock_printf("lock exit queued %s\n", toString().c_str()); - return s.getFuture(); + return w.lockPromise.getFuture(); + } + + std::string toString() const { + int runnersDone = 0; + for (int i = 0; i < runners.size(); ++i) { + if (runners[i].isReady()) { + ++runnersDone; + } + } + + std::string s = + format("{ ptr=%p concurrency=%d available=%d running=%d waiting=%d runnersQueue=%d runnersDone=%d ", + this, + concurrency, + available, + concurrency - available, + waiting, + runners.size(), + runnersDone); + + for (int i = 0; i < waiters.size(); ++i) { + s += format("p%d_waiters=%u ", i, waiters[i].size()); + } + + s += "}"; + return s; } private: @@ -181,6 +213,13 @@ private: state Future error = self->brokenOnDestruct.getFuture(); state int maxPriority = self->waiters.size() - 1; + // Priority to try to run tasks from next + state int priority = maxPriority; + state Queue* pQueue = &self->waiters[maxPriority]; + + // Track the number of waiters unlocked at the same priority in a row + state int lastPriorityCount = 0; + loop { // Cleanup finished runner futures at the front of the runner queue. while (!self->runners.empty() && self->runners.front().isReady()) { @@ -197,20 +236,22 @@ private: } // While there are available slots and there are waiters, launch tasks - int priority = maxPriority; - while (self->available > 0 && self->waiting > 0) { - auto& q = self->waiters[priority]; - prioritylock_printf( - "Checking priority=%d prioritySize=%d %s\n", priority, q.size(), self->toString().c_str()); + prioritylock_printf("Checking priority=%d lastPriorityCount=%d %s\n", + priority, + lastPriorityCount, + self->toString().c_str()); - while (!q.empty()) { - Slot s = q.front(); - q.pop_front(); + while (!pQueue->empty() && ++lastPriorityCount < self->launchLimit) { + Waiter w = pQueue->front(); + pQueue->pop_front(); --self->waiting; Lock lock; - prioritylock_printf(" Running waiter priority=%d prioritySize=%d\n", priority, q.size()); - s.send(lock); + prioritylock_printf(" Running waiter priority=%d wait=%f %s\n", + priority, + now() - w.queuedTime, + self->toString().c_str()); + w.lockPromise.send(lock); // Self may have been destructed during the lock callback if (error.isReady()) { @@ -228,24 +269,28 @@ private: } } - // Wrap around to highest priority + // If there are no more slots available, then don't move to the next priority + if (self->available == 0) { + break; + } + + // Decrease priority, wrapping around to max from 0 if (priority == 0) { priority = maxPriority; } else { --priority; } + + pQueue = &self->waiters[priority]; + lastPriorityCount = 0; } } } - std::string toString() const { - return format( - "{ slots=%d/%d waiting=%d runners=%d }", (concurrency - available), concurrency, waiting, runners.size()); - } - int concurrency; int available; int waiting; + int launchLimit; std::vector waiters; Deque> runners; Future fRunner; @@ -371,7 +416,7 @@ std::string toString(const std::pair& o) { static constexpr int ioMinPriority = 0; static constexpr int ioLeafPriority = 1; -static constexpr int ioMaxPriority = 2; +static constexpr int ioMaxPriority = 3; // A FIFO queue of T stored as a linked list of pages. // Main operations are pop(), pushBack(), pushFront(), and flush(). @@ -2102,10 +2147,10 @@ public: int concurrentExtentReads, bool memoryOnly = false, Promise errorPromise = {}) - : ioLock(FLOW_KNOBS->MAX_OUTSTANDING, ioMaxPriority), pageCacheBytes(pageCacheSizeBytes), pHeader(nullptr), - desiredPageSize(desiredPageSize), desiredExtentSize(desiredExtentSize), filename(filename), - memoryOnly(memoryOnly), errorPromise(errorPromise), remapCleanupWindow(remapCleanupWindow), - concurrentExtentReads(new FlowLock(concurrentExtentReads)) { + : ioLock(FLOW_KNOBS->MAX_OUTSTANDING, ioMaxPriority, FLOW_KNOBS->MAX_OUTSTANDING / 2), + pageCacheBytes(pageCacheSizeBytes), pHeader(nullptr), desiredPageSize(desiredPageSize), + desiredExtentSize(desiredExtentSize), filename(filename), memoryOnly(memoryOnly), errorPromise(errorPromise), + remapCleanupWindow(remapCleanupWindow), concurrentExtentReads(new FlowLock(concurrentExtentReads)) { if (!g_redwoodMetricsActor.isValid()) { g_redwoodMetricsActor = redwoodMetricsLogger(); @@ -2175,6 +2220,8 @@ public: wait(store(fileSize, self->pageFile->size())); } + self->fileExtension = Void(); + debug_printf( "DWALPager(%s) recover exists=%d fileSize=%" PRId64 "\n", self->filename.c_str(), exists, fileSize); // TODO: If the file exists but appears to never have been successfully committed is this an error or @@ -2221,6 +2268,9 @@ public: } self->setPageSize(self->pHeader->pageSize); + self->filePageCount = fileSize / self->physicalPageSize; + self->filePageCountPending = self->filePageCount; + if (self->logicalPageSize != self->desiredPageSize) { TraceEvent(SevWarn, "RedwoodPageSizeNotDesired") .detail("Filename", self->filename) @@ -2320,6 +2370,8 @@ public: // Now that the header page has been allocated, set page size to desired self->setPageSize(self->desiredPageSize); + self->filePageCount = 0; + self->filePageCountPending = 0; // Now set the extent size, do this always after setting the page size as // extent size is a multiple of page size @@ -2397,7 +2449,10 @@ public: self->recoveryVersion, self->pHeader->oldestVersion, self->logicalPageSize, - self->physicalPageSize); + self->physicalPageSize, + self->pHeader->pageCount, + self->filePageCount); + return Void(); } @@ -2486,12 +2541,14 @@ public: // Grow the pager file by one page and return it LogicalPageID newLastPageID() { LogicalPageID id = pHeader->pageCount; - ++pHeader->pageCount; + growPager(1); return id; } Future newPageID() override { return newPageID_impl(this); } + void growPager(int64_t pages) { pHeader->pageCount += pages; } + // Get a new, previously available extent and it's first page ID. The page will be considered in-use after the next // commit regardless of whether or not it was written to, until it is returned to the pager via freePage() ACTOR static Future newExtentPageID_impl(DWALPager* self, QueueID queueID) { @@ -2521,7 +2578,7 @@ public: // That translates to extentID being same as the return first pageID LogicalPageID newLastExtentID() { LogicalPageID id = pHeader->pageCount; - pHeader->pageCount += pagesPerExtent; + growPager(pagesPerExtent); return id; } @@ -2541,11 +2598,44 @@ public: if (self->memoryOnly) { return Void(); } + + // If a truncation up to include pageID has not yet been completed + if (pageID >= self->filePageCount) { + // And no extension pending will include pageID + if (pageID >= self->filePageCountPending) { + // Update extension to a new one that waits on the old one and extends further + self->fileExtension = extendToCover(self, pageID, self->fileExtension); + } + + // Wait for extension that covers pageID to complete; + wait(self->fileExtension); + } + // Note: Not using forwardError here so a write error won't be discovered until commit time. wait(self->pageFile->write(data, blockSize, (int64_t)pageID * blockSize)); return Void(); } + ACTOR static Future extendToCover(DWALPager* self, uint64_t pageID, Future previousExtension) { + // Calculate new page count, round up to nearest multiple of growth size > pageID + state int64_t newPageCount = pageID + SERVER_KNOBS->REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES - + (pageID % SERVER_KNOBS->REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES); + + // Indicate that extension to this new count has been started + self->filePageCountPending = newPageCount; + + // Wait for any previous extensions to complete + wait(previousExtension); + + // Grow the file + wait(self->pageFile->truncate(newPageCount * self->physicalPageSize)); + + // Indicate that extension to the new count has been completed + self->filePageCount = newPageCount; + + return Void(); + } + Future writePhysicalPage(PagerEventReasons reason, unsigned int level, Standalone> pageIDs, @@ -3699,6 +3789,13 @@ private: Reference headerPage; Header* pHeader; + // Pages - pages known to be in the file, truncations complete to that size + int64_t filePageCount; + // Pages that will be in file once fileExtension is ready + int64_t filePageCountPending; + // Future representing the end of all pending truncations + Future fileExtension; + int desiredPageSize; int desiredExtentSize; @@ -7154,7 +7251,7 @@ RedwoodRecordRef VersionedBTree::dbEnd(LiteralStringRef("\xff\xff\xff\xff\xff")) class KeyValueStoreRedwood : public IKeyValueStore { public: KeyValueStoreRedwood(std::string filePrefix, UID logID) - : m_filePrefix(filePrefix), m_concurrentReads(SERVER_KNOBS->REDWOOD_KVSTORE_CONCURRENT_READS, 0), + : m_filename(filePrefix), m_concurrentReads(SERVER_KNOBS->REDWOOD_KVSTORE_CONCURRENT_READS, 0), prefetch(SERVER_KNOBS->REDWOOD_KVSTORE_RANGE_PREFETCH) { int pageSize = @@ -7184,17 +7281,17 @@ public: Future init() override { return m_init; } ACTOR Future init_impl(KeyValueStoreRedwood* self) { - TraceEvent(SevInfo, "RedwoodInit").detail("FilePrefix", self->m_filePrefix); + TraceEvent(SevInfo, "RedwoodInit").detail("FilePrefix", self->m_filename); wait(self->m_tree->init()); TraceEvent(SevInfo, "RedwoodInitComplete") - .detail("FilePrefix", self->m_filePrefix) + .detail("Filename", self->m_filename) .detail("Version", self->m_tree->getLastCommittedVersion()); self->m_nextCommitVersion = self->m_tree->getLastCommittedVersion() + 1; return Void(); } ACTOR void shutdown(KeyValueStoreRedwood* self, bool dispose) { - TraceEvent(SevInfo, "RedwoodShutdown").detail("FilePrefix", self->m_filePrefix).detail("Dispose", dispose); + TraceEvent(SevInfo, "RedwoodShutdown").detail("Filename", self->m_filename).detail("Dispose", dispose); if (self->m_error.canBeSet()) { self->m_error.sendError(actor_cancelled()); // Ideally this should be shutdown_in_progress } @@ -7206,9 +7303,7 @@ public: self->m_tree->close(); wait(closedFuture); self->m_closed.send(Void()); - TraceEvent(SevInfo, "RedwoodShutdownComplete") - .detail("FilePrefix", self->m_filePrefix) - .detail("Dispose", dispose); + TraceEvent(SevInfo, "RedwoodShutdownComplete").detail("Filename", self->m_filename).detail("Dispose", dispose); delete self; } @@ -7428,7 +7523,7 @@ public: ~KeyValueStoreRedwood() override{}; private: - std::string m_filePrefix; + std::string m_filename; VersionedBTree* m_tree; Future m_init; Promise m_closed; @@ -9038,7 +9133,7 @@ TEST_CASE("Lredwood/correctness/btree") { g_redwoodMetricsActor = Void(); // Prevent trace event metrics from starting g_redwoodMetrics.clear(); - state std::string fileName = params.get("fileName").orDefault("unittest_pageFile.redwood"); + state std::string fileName = params.get("Filename").orDefault("unittest_pageFile.redwood"); IPager2* pager; state bool serialTest = params.getInt("serialTest").orDefault(deterministicRandom()->random01() < 0.25); @@ -9628,7 +9723,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { TEST_CASE(":/redwood/performance/set") { state SignalableActorCollection actors; - state std::string fileName = params.get("fileName").orDefault("unittest.redwood"); + state std::string fileName = params.get("Filename").orDefault("unittest.redwood"); state int pageSize = params.getInt("pageSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_PAGE_SIZE); state int extentSize = params.getInt("extentSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_EXTENT_SIZE); state int64_t pageCacheBytes = params.getInt("pageCacheBytes").orDefault(FLOW_KNOBS->PAGE_CACHE_4K); diff --git a/fdbserver/WorkerInterface.actor.h b/fdbserver/WorkerInterface.actor.h index 0deedd73c6..8ad2214e27 100644 --- a/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/WorkerInterface.actor.h @@ -1094,6 +1094,66 @@ ACTOR Future tLog(IKeyValueStore* persistentData, typedef decltype(&tLog) TLogFn; +ACTOR template +Future ioTimeoutError(Future what, double time) { + // Before simulation is sped up, IO operations can take a very long time so limit timeouts + // to not end until at least time after simulation is sped up. + if (g_network->isSimulated() && !g_simulator.speedUpSimulation) { + time += std::max(0.0, FLOW_KNOBS->SIM_SPEEDUP_AFTER_SECONDS - now()); + } + Future end = lowPriorityDelay(time); + choose { + when(T t = wait(what)) { return t; } + when(wait(end)) { + Error err = io_timeout(); + if (g_network->isSimulated() && !g_simulator.getCurrentProcess()->isReliable()) { + err = err.asInjectedFault(); + } + TraceEvent(SevError, "IoTimeoutError").error(err); + throw err; + } + } +} + +ACTOR template +Future ioDegradedOrTimeoutError(Future what, + double errTime, + Reference> degraded, + double degradedTime) { + // Before simulation is sped up, IO operations can take a very long time so limit timeouts + // to not end until at least time after simulation is sped up. + if (g_network->isSimulated() && !g_simulator.speedUpSimulation) { + double timeShift = std::max(0.0, FLOW_KNOBS->SIM_SPEEDUP_AFTER_SECONDS - now()); + errTime += timeShift; + degradedTime += timeShift; + } + + if (degradedTime < errTime) { + Future degradedEnd = lowPriorityDelay(degradedTime); + choose { + when(T t = wait(what)) { return t; } + when(wait(degradedEnd)) { + TEST(true); // TLog degraded + TraceEvent(SevWarnAlways, "IoDegraded").log(); + degraded->set(true); + } + } + } + + Future end = lowPriorityDelay(errTime - degradedTime); + choose { + when(T t = wait(what)) { return t; } + when(wait(end)) { + Error err = io_timeout(); + if (g_network->isSimulated() && !g_simulator.getCurrentProcess()->isReliable()) { + err = err.asInjectedFault(); + } + TraceEvent(SevError, "IoTimeoutError").error(err); + throw err; + } + } +} + #include "fdbserver/ServerDBInfo.h" #include "flow/unactorcompiler.h" #endif diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index 8c98b3cf54..ae19277f6f 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -1439,7 +1439,7 @@ ACTOR Future runTests(Reference disabler = disableConnectionFailuresAfter(450, "Tester"); + state Future disabler = disableConnectionFailuresAfter(FLOW_KNOBS->SIM_SPEEDUP_AFTER_SECONDS, "Tester"); // Change the configuration (and/or create the database) if necessary printf("startingConfiguration:%s start\n", startingConfiguration.toString().c_str()); diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index fb33dbb10f..907545223c 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -203,6 +203,7 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) { init( MAX_TRACE_FIELD_LENGTH, 495 ); // If the value of this is changed, the corresponding default in Trace.cpp should be changed as well init( MAX_TRACE_EVENT_LENGTH, 4000 ); // If the value of this is changed, the corresponding default in Trace.cpp should be changed as well init( ALLOCATION_TRACING_ENABLED, true ); + init( SIM_SPEEDUP_AFTER_SECONDS, 450 ); //TDMetrics init( MAX_METRICS, 600 ); diff --git a/flow/Knobs.h b/flow/Knobs.h index 318dcf8a13..275505c0b2 100644 --- a/flow/Knobs.h +++ b/flow/Knobs.h @@ -245,6 +245,7 @@ public: double MAX_CLOGGING_LATENCY; double MAX_BUGGIFIED_DELAY; int SIM_CONNECT_ERROR_MODE; + double SIM_SPEEDUP_AFTER_SECONDS; // Tracefiles int ZERO_LENGTH_FILE_PAD; diff --git a/flow/genericactors.actor.h b/flow/genericactors.actor.h index 73df375c39..aabcda6aba 100644 --- a/flow/genericactors.actor.h +++ b/flow/genericactors.actor.h @@ -844,53 +844,6 @@ Future timeoutWarningCollector(FutureStream const& input, Future quorumEqualsTrue(std::vector> const& futures, int const& required); Future lowPriorityDelay(double const& waitTime); -ACTOR template -Future ioTimeoutError(Future what, double time) { - Future end = lowPriorityDelay(time); - choose { - when(T t = wait(what)) { return t; } - when(wait(end)) { - Error err = io_timeout(); - if (g_network->isSimulated()) { - err = err.asInjectedFault(); - } - TraceEvent(SevError, "IoTimeoutError").error(err); - throw err; - } - } -} - -ACTOR template -Future ioDegradedOrTimeoutError(Future what, - double errTime, - Reference> degraded, - double degradedTime) { - if (degradedTime < errTime) { - Future degradedEnd = lowPriorityDelay(degradedTime); - choose { - when(T t = wait(what)) { return t; } - when(wait(degradedEnd)) { - TEST(true); // TLog degraded - TraceEvent(SevWarnAlways, "IoDegraded").log(); - degraded->set(true); - } - } - } - - Future end = lowPriorityDelay(errTime - degradedTime); - choose { - when(T t = wait(what)) { return t; } - when(wait(end)) { - Error err = io_timeout(); - if (g_network->isSimulated()) { - err = err.asInjectedFault(); - } - TraceEvent(SevError, "IoTimeoutError").error(err); - throw err; - } - } -} - ACTOR template Future streamHelper(PromiseStream output, PromiseStream errors, Future input) { try { From 9422b8e5f2ed1a147239ff95416b518d077c4c95 Mon Sep 17 00:00:00 2001 From: Tao Lin Date: Fri, 12 Nov 2021 15:12:37 -0800 Subject: [PATCH 060/142] Restricted getRangeAndFlatMap to snapshot --- bindings/c/test/unit/fdb_api.hpp | 3 ++- bindings/c/test/unit/unit_tests.cpp | 2 +- .../RangeAndFlatMapQueryIntegrationTest.java | 15 +++++++++------ .../com/apple/foundationdb/FDBTransaction.java | 5 +---- .../com/apple/foundationdb/ReadTransaction.java | 5 ++--- fdbclient/ReadYourWrites.actor.cpp | 7 +++---- fdbserver/workloads/IndexPrefetchDemo.actor.cpp | 3 ++- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bindings/c/test/unit/fdb_api.hpp b/bindings/c/test/unit/fdb_api.hpp index fb1304a26e..4643323d60 100644 --- a/bindings/c/test/unit/fdb_api.hpp +++ b/bindings/c/test/unit/fdb_api.hpp @@ -219,7 +219,8 @@ public: fdb_bool_t snapshot, fdb_bool_t reverse); - // Returns a future which will be set to an FDBKeyValue array. + // WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation + // AND disabling read-your-writes. Returns a future which will be set to an FDBKeyValue array. KeyValueArrayFuture get_range_and_flat_map(const uint8_t* begin_key_name, int begin_key_name_length, fdb_bool_t begin_or_equal, diff --git a/bindings/c/test/unit/unit_tests.cpp b/bindings/c/test/unit/unit_tests.cpp index a44137aaa9..6321a94161 100644 --- a/bindings/c/test/unit/unit_tests.cpp +++ b/bindings/c/test/unit/unit_tests.cpp @@ -922,7 +922,7 @@ TEST_CASE("fdb_transaction_get_range_and_flat_map") { /* target_bytes */ 0, /* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL, /* iteration */ 0, - /* snapshot */ false, + /* snapshot */ true, /* reverse */ 0); if (result.err) { diff --git a/bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java b/bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java index e2801aafd6..c97ce1f750 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java +++ b/bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java @@ -180,9 +180,10 @@ class RangeAndFlatMapQueryIntegrationTest { try { tr.options().setReadYourWritesDisable(); List kvs = - tr.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)), - KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER, - ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL) + tr.snapshot() + .getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)), + KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER, + ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL) .asList() .get(); Assertions.assertEquals(end - begin, kvs.size()); @@ -219,10 +220,12 @@ class RangeAndFlatMapQueryIntegrationTest { // getRangeAndFlatMap is only support without RYW. This is a must!!! tr.options().setReadYourWritesDisable(); + // getRangeAndFlatMap is only supported with snapshot. Iterator kvs = - tr.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(0)), - KeySelector.firstGreaterThan(indexEntryKey(1)), MAPPER, - ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL) + tr.snapshot() + .getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(0)), + KeySelector.firstGreaterThan(indexEntryKey(1)), MAPPER, + ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL) .iterator(); Iterator expected_data_of_records_iter = expected_data_of_records.iterator(); while (expected_data_of_records_iter.hasNext()) { diff --git a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java index 9bd99c892d..8a30280a4d 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java @@ -350,10 +350,7 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC @Override public AsyncIterable getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit, boolean reverse, StreamingMode mode) { - if (mapper == null) { - throw new IllegalArgumentException("Mapper must be non-null"); - } - return new RangeQuery(this, false, begin, end, mapper, limit, reverse, mode, eventKeeper); + throw new UnsupportedOperationException("getRangeAndFlatMap is only supported in snapshot"); } /////////////////// diff --git a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java index 699dfd3ec0..b2b81553ef 100644 --- a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java @@ -425,9 +425,8 @@ public interface ReadTransaction extends ReadTransactionContext { int limit, boolean reverse, StreamingMode mode); /** - * Gets an ordered range of keys and values from the database. The begin - * and end keys are specified by {@code KeySelector}s, with the begin - * {@code KeySelector} inclusive and the end {@code KeySelector} exclusive. + * WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation + * AND disabling read-your-writes. * * @see KeySelector * @see AsyncIterator diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index 56ce22fd07..00632fa550 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -397,11 +397,10 @@ public: static inline Future readWithConflictRangeAndFlatMap(ReadYourWritesTransaction* ryw, Req const& req, Snapshot snapshot) { - if (ryw->options.readYourWritesDisabled) { + // For now, getRangeAndFlatMap is only supported if transaction use snapshot isolation AND read-your-writes is + // disabled. + if (snapshot && ryw->options.readYourWritesDisabled) { return readWithConflictRangeThroughAndFlatMap(ryw, req, snapshot); - } else if (snapshot && ryw->options.snapshotRywEnabled <= 0) { - TEST(true); // readWithConflictRangeSnapshot not supported for getRangeAndFlatMap - throw client_invalid_operation(); } TEST(true); // readWithConflictRangeRYW not supported for getRangeAndFlatMap throw client_invalid_operation(); diff --git a/fdbserver/workloads/IndexPrefetchDemo.actor.cpp b/fdbserver/workloads/IndexPrefetchDemo.actor.cpp index 002b766584..08723c9424 100644 --- a/fdbserver/workloads/IndexPrefetchDemo.actor.cpp +++ b/fdbserver/workloads/IndexPrefetchDemo.actor.cpp @@ -108,7 +108,8 @@ struct IndexPrefetchDemoWorkload : TestWorkload { wait(tr.getRangeAndFlatMap(KeySelector(firstGreaterOrEqual(range.begin), range.arena()), KeySelector(firstGreaterOrEqual(range.end), range.arena()), mapper, - GetRangeLimits(CLIENT_KNOBS->TOO_MANY))); + GetRangeLimits(CLIENT_KNOBS->TOO_MANY), + Snapshot::True)); showResult(result); if (self->BAD_MAPPER) { TraceEvent("IndexPrefetchDemoWorkloadShouldNotReachable").detail("ResultSize", result.size()); From dc756228f2d5a87dd8c4cf2a69d12ead6d25722b Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Sun, 14 Nov 2021 13:46:17 -0800 Subject: [PATCH 061/142] Make snapshot errors more descriptive --- flow/error_definitions.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/error_definitions.h b/flow/error_definitions.h index 5e815c5798..11ca33530c 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -251,11 +251,11 @@ ERROR( json_malformed, 2401, "JSON string was malformed") ERROR( json_eof_expected, 2402, "JSON string did not terminate where expected") // 2500 - disk snapshot based backup errors -ERROR( snap_disable_tlog_pop_failed, 2500, "Disk Snapshot error") +ERROR( snap_disable_tlog_pop_failed, 2500, "Failed to disable tlog pops") ERROR( snap_storage_failed, 2501, "Failed to snapshot storage nodes") ERROR( snap_tlog_failed, 2502, "Failed to snapshot TLog nodes") ERROR( snap_coord_failed, 2503, "Failed to snapshot coordinator nodes") -ERROR( snap_enable_tlog_pop_failed, 2504, "Disk Snapshot error") +ERROR( snap_enable_tlog_pop_failed, 2504, "Failed to enable tlog pops") ERROR( snap_path_not_whitelisted, 2505, "Snapshot create binary path not whitelisted") ERROR( snap_not_fully_recovered_unsupported, 2506, "Unsupported when the cluster is not fully recovered") ERROR( snap_log_anti_quorum_unsupported, 2507, "Unsupported when log anti quorum is configured") From 0ba77ea79b67abb4f5fb09f136dc08e5a2897892 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Sun, 14 Nov 2021 16:12:28 -0800 Subject: [PATCH 062/142] Fix proxySnapCreate trace typo --- fdbserver/CommitProxyServer.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbserver/CommitProxyServer.actor.cpp b/fdbserver/CommitProxyServer.actor.cpp index fa755359ef..d2c5d7aa28 100644 --- a/fdbserver/CommitProxyServer.actor.cpp +++ b/fdbserver/CommitProxyServer.actor.cpp @@ -1729,7 +1729,7 @@ ACTOR Future proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co // FIXME: logAntiQuorum not supported, remove it later, // In version2, we probably don't need this limtiation, but this needs to be tested. if (logAntiQuorum > 0) { - TraceEvent("SnapCommitProxy_LogAnitQuorumNotSupported") + TraceEvent("SnapCommitProxy_LogAntiQuorumNotSupported") .detail("SnapPayload", snapReq.snapPayload) .detail("SnapUID", snapReq.snapUID); throw snap_log_anti_quorum_unsupported(); From 6909754b214975d57ea1de74c9b7a47a6b2b1f33 Mon Sep 17 00:00:00 2001 From: Evan Tschannen Date: Sun, 14 Nov 2021 19:08:46 -0800 Subject: [PATCH 063/142] changefeeds now have a whenAtLeast function for efficiently learning when the version has updated but no mutations have been committed --- fdbcli/ChangeFeedCommand.actor.cpp | 21 ++- fdbclient/ClientKnobs.cpp | 1 + fdbclient/ClientKnobs.h | 1 + fdbclient/DatabaseContext.h | 25 ++- fdbclient/NativeAPI.actor.cpp | 198 +++++++++++++++++++--- fdbclient/StorageServerInterface.h | 41 ++++- fdbserver/BlobWorker.actor.cpp | 17 +- fdbserver/storageserver.actor.cpp | 117 ++++++++++--- fdbserver/workloads/ChangeFeeds.actor.cpp | 4 +- flow/error_definitions.h | 1 + 10 files changed, 360 insertions(+), 66 deletions(-) diff --git a/fdbcli/ChangeFeedCommand.actor.cpp b/fdbcli/ChangeFeedCommand.actor.cpp index d28d96c367..5328bd854d 100644 --- a/fdbcli/ChangeFeedCommand.actor.cpp +++ b/fdbcli/ChangeFeedCommand.actor.cpp @@ -62,6 +62,17 @@ ACTOR Future changeFeedList(Database db) { namespace fdb_cli { +ACTOR Future requestVersionUpdate(Database localDb, Reference feedData) { + loop { + wait(delay(5.0)); + Transaction tr(localDb); + state Version ver = wait(tr.getReadVersion()); + printf("Requesting version %d\n", ver); + wait(feedData->whenAtLeast(ver)); + printf("Feed at version %d\n", ver); + } +} + ACTOR Future changeFeedCommandActor(Database localDb, std::vector tokens, Future warn) { if (tokens.size() == 1) { printUsage(tokens[0]); @@ -117,14 +128,16 @@ ACTOR Future changeFeedCommandActor(Database localDb, std::vector>> feedResults; - state Future feed = localDb->getChangeFeedStream(feedResults, tokens[2], begin, end); + state Reference feedData = makeReference(); + state Future feed = localDb->getChangeFeedStream(feedData, tokens[2], begin, end); + state Future versionUpdates = requestVersionUpdate(localDb, feedData); printf("\n"); try { state Future feedInterrupt = LineNoise::onKeyboardInterrupt(); loop { choose { - when(Standalone> res = waitNext(feedResults.getFuture())) { + when(Standalone> res = + waitNext(feedData->mutations.getFuture())) { for (auto& it : res) { for (auto& it2 : it.mutations) { printf("%lld %s\n", it.version, it2.toString().c_str()); @@ -134,7 +147,7 @@ ACTOR Future changeFeedCommandActor(Database localDb, std::vector(); feed.cancel(); - feedResults = PromiseStream>>(); + feedData = makeReference(); break; } } diff --git a/fdbclient/ClientKnobs.cpp b/fdbclient/ClientKnobs.cpp index 66c4390c4d..939a440b8d 100644 --- a/fdbclient/ClientKnobs.cpp +++ b/fdbclient/ClientKnobs.cpp @@ -100,6 +100,7 @@ void ClientKnobs::initialize(Randomize randomize) { init( RANGESTREAM_FRAGMENT_SIZE, 1e6 ); init( RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT, 20 ); init( QUARANTINE_TSS_ON_MISMATCH, true ); if( randomize && BUGGIFY ) QUARANTINE_TSS_ON_MISMATCH = false; // if true, a tss mismatch will put the offending tss in quarantine. If false, it will just be killed + init( CHANGE_FEED_EMPTY_BATCH_TIME, 0.005 ); //KeyRangeMap init( KRM_GET_RANGE_LIMIT, 1e5 ); if( randomize && BUGGIFY ) KRM_GET_RANGE_LIMIT = 10; diff --git a/fdbclient/ClientKnobs.h b/fdbclient/ClientKnobs.h index ba1a49e379..fefeceb32f 100644 --- a/fdbclient/ClientKnobs.h +++ b/fdbclient/ClientKnobs.h @@ -100,6 +100,7 @@ public: int64_t RANGESTREAM_FRAGMENT_SIZE; int RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT; bool QUARANTINE_TSS_ON_MISMATCH; + double CHANGE_FEED_EMPTY_BATCH_TIME; // KeyRangeMap int KRM_GET_RANGE_LIMIT; diff --git a/fdbclient/DatabaseContext.h b/fdbclient/DatabaseContext.h index 837d4ec793..debe521075 100644 --- a/fdbclient/DatabaseContext.h +++ b/fdbclient/DatabaseContext.h @@ -20,6 +20,7 @@ #ifndef DatabaseContext_h #define DatabaseContext_h +#include "fdbclient/Notified.h" #include "flow/FastAlloc.h" #include "flow/FastRef.h" #include "fdbclient/StorageServerInterface.h" @@ -146,6 +147,25 @@ public: WatchMetadata(Key key, Optional value, Version version, TransactionInfo info, TagSet tags); }; +struct ChangeFeedStorageData : ReferenceCounted { + UID id; + Future updater; + NotifiedVersion version; + NotifiedVersion desired; +}; + +struct ChangeFeedData : ReferenceCounted { + PromiseStream>> mutations; + + Version getVersion(); + Future whenAtLeast(Version version); + + NotifiedVersion lastReturnedVersion; + std::vector> storageData; + AsyncVar notAtLatest; + Promise refresh; +}; + class DatabaseContext : public ReferenceCounted, public FastAllocated, NonCopyable { public: static DatabaseContext* allocateOnForeignThread() { @@ -252,7 +272,7 @@ public: // Management API, create snapshot Future createSnapshot(StringRef uid, StringRef snapshot_command); - Future getChangeFeedStream(const PromiseStream>>& results, + Future getChangeFeedStream(Reference results, Key rangeID, Version begin = 0, Version end = std::numeric_limits::max(), @@ -345,6 +365,9 @@ public: std::unordered_map> tssMetrics; // map from changeFeedId -> changeFeedRange std::unordered_map changeFeedCache; + std::unordered_map> changeFeedUpdaters; + + Reference getStorageData(StorageServerInterface interf); UID dbId; IsInternal internal; // Only contexts created through the C client and fdbcli are non-internal diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 117524a43a..883140d135 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -65,6 +65,7 @@ #include "flow/ActorCollection.h" #include "flow/DeterministicRandom.h" #include "flow/Error.h" +#include "flow/FastRef.h" #include "flow/IRandom.h" #include "flow/flow.h" #include "flow/genericactors.actor.h" @@ -6663,12 +6664,101 @@ Future DatabaseContext::createSnapshot(StringRef uid, StringRef snapshot_c return createSnapshotActor(this, UID::fromString(uid_str), snapshot_command); } +ACTOR Future storageFeedVersionUpdater(StorageServerInterface interf, ChangeFeedStorageData* self) { + loop { + if (self->version.get() < self->desired.get()) { + wait(delay(CLIENT_KNOBS->CHANGE_FEED_EMPTY_BATCH_TIME) || self->version.whenAtLeast(self->desired.get())); + if (self->version.get() < self->desired.get()) { + ChangeFeedVersionUpdateReply rep = wait(brokenPromiseToNever( + interf.changeFeedVersionUpdate.getReply(ChangeFeedVersionUpdateRequest(self->desired.get())))); + if (rep.version > self->version.get()) { + self->version.set(rep.version); + } + } + } else { + wait(self->desired.whenAtLeast(self->version.get() + 1)); + } + } +} + +Reference DatabaseContext::getStorageData(StorageServerInterface interf) { + auto it = changeFeedUpdaters.find(interf.id()); + if (it == changeFeedUpdaters.end()) { + Reference newStorageUpdater = makeReference(); + newStorageUpdater->id = interf.id(); + newStorageUpdater->updater = storageFeedVersionUpdater(interf, newStorageUpdater.getPtr()); + changeFeedUpdaters[interf.id()] = newStorageUpdater; + return newStorageUpdater; + } + return it->second; +} + +Version ChangeFeedData::getVersion() { + if (notAtLatest.get() == 0 && mutations.isEmpty()) { + Version v = storageData[0]->version.get(); + for (int i = 1; i < storageData.size(); i++) { + if (storageData[i]->version.get() < v) { + v = storageData[i]->version.get(); + } + } + return std::max(v, lastReturnedVersion.get()); + } + return lastReturnedVersion.get(); +} + +ACTOR Future changeFeedWhenAtLatest(ChangeFeedData* self, Version version) { + state Future lastReturned = self->lastReturnedVersion.whenAtLeast(version); + loop { + if (self->notAtLatest.get() == 0) { + std::vector> allAtLeast; + for (auto& it : self->storageData) { + if (it->version.get() < version) { + if (version > it->desired.get()) { + it->desired.set(version); + } + allAtLeast.push_back(it->version.whenAtLeast(version)); + } + } + choose { + when(wait(lastReturned)) { return Void(); } + when(wait(waitForAll(allAtLeast))) { + if (self->mutations.isEmpty()) { + return Void(); + } + choose { + when(wait(self->mutations.onEmpty())) { + wait(delay(0)); + return Void(); + } + when(wait(lastReturned)) { return Void(); } + when(wait(self->refresh.getFuture())) {} + } + } + when(wait(self->refresh.getFuture())) {} + } + } else { + choose { + when(wait(lastReturned)) { return Void(); } + when(wait(self->notAtLatest.onChange())) {} + when(wait(self->refresh.getFuture())) {} + } + } + } +} + +Future ChangeFeedData::whenAtLeast(Version version) { + return changeFeedWhenAtLatest(this, version); +} + ACTOR Future singleChangeFeedStream(StorageServerInterface interf, PromiseStream> results, Key rangeID, Version begin, Version end, - KeyRange range) { + KeyRange range, + Reference feedData, + Reference storageData) { + state bool atLatestVersion = false; loop { try { state Version lastEmpty = invalidVersion; @@ -6699,6 +6789,13 @@ ACTOR Future singleChangeFeedStream(StorageServerInterface interf, results.sendError(end_of_stream()); return Void(); } + if (!atLatestVersion && rep.atLatestVersion) { + atLatestVersion = true; + feedData->notAtLatest.set(feedData->notAtLatest.get() - 1); + } + if (rep.minStreamVersion > storageData->version.get()) { + storageData->version.set(rep.minStreamVersion); + } } } catch (Error& e) { if (e.code() == error_code_actor_cancelled) { @@ -6716,17 +6813,39 @@ struct MutationAndVersionStream { bool operator<(MutationAndVersionStream const& rhs) const { return next.version > rhs.next.version; } }; -ACTOR Future mergeChangeFeedStream(std::vector> interfs, - PromiseStream>> results, +ACTOR Future mergeChangeFeedStream(Reference db, + std::vector> interfs, + Reference results, Key rangeID, Version* begin, Version end) { state std::priority_queue> mutations; state std::vector> fetchers(interfs.size()); state std::vector streams(interfs.size()); + + for (auto& it : results->storageData) { + if (it->debugGetReferenceCount() == 2) { + db->changeFeedUpdaters.erase(it->id); + } + } + results->storageData.clear(); + Promise refresh = results->refresh; + results->refresh = Promise(); for (int i = 0; i < interfs.size(); i++) { - fetchers[i] = - singleChangeFeedStream(interfs[i].first, streams[i].results, rangeID, *begin, end, interfs[i].second); + results->storageData.push_back(db->getStorageData(interfs[i].first)); + } + results->notAtLatest.set(interfs.size()); + refresh.send(Void()); + + for (int i = 0; i < interfs.size(); i++) { + fetchers[i] = singleChangeFeedStream(interfs[i].first, + streams[i].results, + rangeID, + *begin, + end, + interfs[i].second, + results, + results->storageData[i]); } state int interfNum = 0; while (interfNum < interfs.size()) { @@ -6750,7 +6869,8 @@ ACTOR Future mergeChangeFeedStream(std::vectormutations.send(nextOut); + results->lastReturnedVersion.set(nextOut.back().version); nextOut = Standalone>(); } checkVersion = nextStream.next.version; @@ -6775,7 +6895,8 @@ ACTOR Future mergeChangeFeedStream(std::vectormutations.send(nextOut); + results->lastReturnedVersion.set(nextOut.back().version); } throw end_of_stream(); } @@ -6814,7 +6935,7 @@ ACTOR Future getChangeFeedRange(Reference db, Databas } ACTOR Future getChangeFeedStreamActor(Reference db, - PromiseStream>> results, + Reference results, Key rangeID, Version begin, Version end, @@ -6887,32 +7008,57 @@ ACTOR Future getChangeFeedStreamActor(Reference db, interfs.push_back(std::make_pair(locations[i].second->getInterface(chosenLocations[i]), locations[i].first & range)); } - wait(mergeChangeFeedStream(interfs, results, rangeID, &begin, end) || cx->connectionFileChanged()); + wait(mergeChangeFeedStream(db, interfs, results, rangeID, &begin, end) || cx->connectionFileChanged()); } else { state ChangeFeedStreamRequest req; req.rangeID = rangeID; req.begin = begin; req.end = end; req.range = range; - + StorageServerInterface interf = locations[0].second->getInterface(chosenLocations[0]); state ReplyPromiseStream replyStream = - locations[0] - .second->get(chosenLocations[0], &StorageServerInterface::changeFeedStream) - .getReplyStream(req); - + interf.changeFeedStream.getReplyStream(req); + for (auto& it : results->storageData) { + if (it->debugGetReferenceCount() == 2) { + db->changeFeedUpdaters.erase(it->id); + } + } + results->storageData.clear(); + results->storageData.push_back(db->getStorageData(interf)); + Promise refresh = results->refresh; + results->refresh = Promise(); + results->notAtLatest.set(1); + refresh.send(Void()); + state bool atLatest = false; loop { - wait(results.onEmpty()); + wait(results->mutations.onEmpty()); choose { when(wait(cx->connectionFileChanged())) { break; } when(ChangeFeedStreamReply rep = waitNext(replyStream.getFuture())) { begin = rep.mutations.back().version + 1; - results.send(Standalone>(rep.mutations, rep.arena)); + results->mutations.send( + Standalone>(rep.mutations, rep.arena)); + results->lastReturnedVersion.set(rep.mutations.back().version); + if (!atLatest && rep.atLatestVersion) { + atLatest = true; + results->notAtLatest.set(0); + } + if (rep.minStreamVersion > results->storageData[0]->version.get()) { + results->storageData[0]->version.set(rep.minStreamVersion); + } } } } } } catch (Error& e) { if (e.code() == error_code_actor_cancelled) { + for (auto& it : results->storageData) { + if (it->debugGetReferenceCount() == 2) { + db->changeFeedUpdaters.erase(it->id); + } + } + results->storageData.clear(); + results->refresh.sendError(change_feed_cancelled()); throw; } if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed || @@ -6922,19 +7068,25 @@ ACTOR Future getChangeFeedStreamActor(Reference db, cx->invalidateCache(keys); wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY)); } else { - results.sendError(e); + results->mutations.sendError(e); + results->refresh.sendError(change_feed_cancelled()); + for (auto& it : results->storageData) { + if (it->debugGetReferenceCount() == 2) { + db->changeFeedUpdaters.erase(it->id); + } + } + results->storageData.clear(); return Void(); } } } } -Future DatabaseContext::getChangeFeedStream( - const PromiseStream>>& results, - Key rangeID, - Version begin, - Version end, - KeyRange range) { +Future DatabaseContext::getChangeFeedStream(Reference results, + Key rangeID, + Version begin, + Version end, + KeyRange range) { return getChangeFeedStreamActor(Reference::addRef(this), results, rangeID, begin, end, range); } diff --git a/fdbclient/StorageServerInterface.h b/fdbclient/StorageServerInterface.h index 1671abe67d..a2cd86166f 100644 --- a/fdbclient/StorageServerInterface.h +++ b/fdbclient/StorageServerInterface.h @@ -81,6 +81,7 @@ struct StorageServerInterface { RequestStream changeFeedStream; RequestStream overlappingChangeFeeds; RequestStream changeFeedPop; + RequestStream changeFeedVersionUpdate; explicit StorageServerInterface(UID uid) : uniqueID(uid) {} StorageServerInterface() : uniqueID(deterministicRandom()->randomUniqueID()) {} @@ -129,6 +130,8 @@ struct StorageServerInterface { RequestStream(getValue.getEndpoint().getAdjustedEndpoint(15)); changeFeedPop = RequestStream(getValue.getEndpoint().getAdjustedEndpoint(16)); + changeFeedVersionUpdate = RequestStream( + getValue.getEndpoint().getAdjustedEndpoint(17)); } } else { ASSERT(Ar::isDeserializing); @@ -174,6 +177,7 @@ struct StorageServerInterface { streams.push_back(changeFeedStream.getReceiver()); streams.push_back(overlappingChangeFeeds.getReceiver()); streams.push_back(changeFeedPop.getReceiver()); + streams.push_back(changeFeedVersionUpdate.getReceiver()); FlowTransport::transport().addEndpoints(streams); } }; @@ -639,6 +643,8 @@ struct ChangeFeedStreamReply : public ReplyPromiseStreamReply { constexpr static FileIdentifier file_identifier = 1783066; Arena arena; VectorRef mutations; + bool atLatestVersion = false; + Version minStreamVersion = invalidVersion; ChangeFeedStreamReply() {} @@ -646,7 +652,13 @@ struct ChangeFeedStreamReply : public ReplyPromiseStreamReply { template void serialize(Ar& ar) { - serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, ReplyPromiseStreamReply::sequence, mutations, arena); + serializer(ar, + ReplyPromiseStreamReply::acknowledgeToken, + ReplyPromiseStreamReply::sequence, + mutations, + atLatestVersion, + minStreamVersion, + arena); } }; @@ -734,6 +746,33 @@ struct OverlappingChangeFeedsRequest { } }; +struct ChangeFeedVersionUpdateReply { + constexpr static FileIdentifier file_identifier = 11815134; + Version version = 0; + + ChangeFeedVersionUpdateReply() {} + explicit ChangeFeedVersionUpdateReply(Version version) : version(version) {} + + template + void serialize(Ar& ar) { + serializer(ar, version); + } +}; + +struct ChangeFeedVersionUpdateRequest { + constexpr static FileIdentifier file_identifier = 6795746; + Version minVersion; + ReplyPromise reply; + + ChangeFeedVersionUpdateRequest() {} + explicit ChangeFeedVersionUpdateRequest(Version minVersion) : minVersion(minVersion) {} + + template + void serialize(Ar& ar) { + serializer(ar, minVersion, reply); + } +}; + struct GetStorageMetricsReply { constexpr static FileIdentifier file_identifier = 15491478; StorageMetrics load; diff --git a/fdbserver/BlobWorker.actor.cpp b/fdbserver/BlobWorker.actor.cpp index d85d113e99..3359d00117 100644 --- a/fdbserver/BlobWorker.actor.cpp +++ b/fdbserver/BlobWorker.actor.cpp @@ -1054,8 +1054,8 @@ static Version doGranuleRollback(Reference metadata, ACTOR Future blobGranuleUpdateFiles(Reference bwData, Reference metadata, Future assignFuture) { - state PromiseStream>> oldChangeFeedStream; - state PromiseStream>> changeFeedStream; + state Reference oldChangeFeedStream = makeReference(); + state Reference changeFeedStream = makeReference(); state Future inFlightBlobSnapshot; state std::deque inFlightDeltaFiles; state Future oldChangeFeedFuture; @@ -1220,7 +1220,8 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, state Standalone> mutations; if (readOldChangeFeed) { - Standalone> oldMutations = waitNext(oldChangeFeedStream.getFuture()); + Standalone> oldMutations = + waitNext(oldChangeFeedStream->mutations.getFuture()); // TODO filter old mutations won't be necessary, SS does it already if (filterOldMutations( metadata->keyRange, &oldMutations, &mutations, startState.changeFeedStartVersion)) { @@ -1235,10 +1236,11 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, // now that old change feed is cancelled, clear out any mutations still in buffer by replacing // promise stream - oldChangeFeedStream = PromiseStream>>(); + oldChangeFeedStream = makeReference(); } } else { - Standalone> newMutations = waitNext(changeFeedStream.getFuture()); + Standalone> newMutations = + waitNext(changeFeedStream->mutations.getFuture()); mutations = newMutations; } @@ -1504,8 +1506,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, // reset change feeds to cfRollbackVersion if (readOldChangeFeed) { - oldChangeFeedStream = - PromiseStream>>(); + oldChangeFeedStream = makeReference(); oldChangeFeedFuture = bwData->db->getChangeFeedStream( oldChangeFeedStream, oldCFKey.get(), @@ -1513,7 +1514,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, MAX_VERSION, startState.parentGranule.get().first /*metadata->keyRange*/); } else { - changeFeedStream = PromiseStream>>(); + changeFeedStream = makeReference(); changeFeedFuture = bwData->db->getChangeFeedStream(changeFeedStream, cfKey, cfRollbackVersion + 1, diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index c7bb89afc6..4b4f610551 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -607,6 +607,7 @@ public: Deque, Version>> changeFeedVersions; std::map> changeFeedRemovals; std::set currentChangeFeeds; + std::unordered_map> changeFeedClientVersions; // newestAvailableVersion[k] // == invalidVersion -> k is unavailable at all versions @@ -622,7 +623,7 @@ public: // that were only partly available (due to cancelled fetchKeys) // The following are in rough order from newest to oldest - Version lastTLogVersion, lastVersionWithData, restoredVersion; + Version lastTLogVersion, lastVersionWithData, restoredVersion, prevVersion; NotifiedVersion version; NotifiedVersion desiredOldestVersion; // We can increase oldestVersion (and then durableVersion) to this version // when the disk permits @@ -884,10 +885,11 @@ public: Histogram::Unit::microseconds)), tag(invalidTag), poppedAllAfter(std::numeric_limits::max()), cpuUsage(0.0), diskUsage(0.0), storage(this, storage), shardChangeCounter(0), lastTLogVersion(0), lastVersionWithData(0), restoredVersion(0), - rebootAfterDurableVersion(std::numeric_limits::max()), primaryLocality(tagLocalityInvalid), - knownCommittedVersion(0), versionLag(0), logProtocol(0), thisServerID(ssi.id()), tssInQuarantine(false), db(db), - actors(false), byteSampleClears(false, LiteralStringRef("\xff\xff\xff")), durableInProgress(Void()), - watchBytes(0), numWatches(0), noRecentUpdates(false), lastUpdate(now()), + prevVersion(0), rebootAfterDurableVersion(std::numeric_limits::max()), + primaryLocality(tagLocalityInvalid), knownCommittedVersion(0), versionLag(0), logProtocol(0), + thisServerID(ssi.id()), tssInQuarantine(false), db(db), actors(false), + byteSampleClears(false, LiteralStringRef("\xff\xff\xff")), durableInProgress(Void()), watchBytes(0), + numWatches(0), noRecentUpdates(false), lastUpdate(now()), readQueueSizeMetric(LiteralStringRef("StorageServer.ReadQueueSize")), updateEagerReads(nullptr), fetchKeysParallelismLock(SERVER_KNOBS->FETCH_KEYS_PARALLELISM), fetchKeysBytesBudget(SERVER_KNOBS->STORAGE_FETCH_BYTES), fetchKeysBudgetUsed(false), @@ -1712,7 +1714,7 @@ ACTOR Future getChangeFeedMutations(StorageServer* data, state ChangeFeedStreamReply memoryReply; state int remainingLimitBytes = CLIENT_KNOBS->REPLY_BYTE_LIMIT; state int remainingDurableBytes = CLIENT_KNOBS->REPLY_BYTE_LIMIT; - wait(delay(0, TaskPriority::DefaultEndpoint)); + if (data->version.get() < req.begin) { wait(data->version.whenAtLeast(req.begin)); } @@ -1847,17 +1849,47 @@ ACTOR Future localChangeFeedStream(StorageServer* data, ACTOR Future changeFeedStreamQ(StorageServer* data, ChangeFeedStreamRequest req) { state Span span("SS:getChangeFeedStream"_loc, { req.spanContext }); + state bool atLatest = false; + state UID streamUID = deterministicRandom()->randomUniqueID(); + state bool removeUID = false; req.reply.setByteLimit(SERVER_KNOBS->RANGESTREAM_LIMIT_BYTES); wait(delay(0, TaskPriority::DefaultEndpoint)); try { loop { - wait(req.reply.onReady()); - ChangeFeedStreamReply _feedReply = wait(getChangeFeedMutations(data, req, false)); + Future onReady = req.reply.onReady(); + if (atLatest && !onReady.isReady()) { + data->changeFeedClientVersions[req.reply.getEndpoint().getPrimaryAddress()][streamUID] = + data->version.get(); + removeUID = true; + } + wait(onReady); + state Future feedReplyFuture = getChangeFeedMutations(data, req, false); + if (atLatest && !removeUID && !feedReplyFuture.isReady()) { + data->changeFeedClientVersions[req.reply.getEndpoint().getPrimaryAddress()][streamUID] = + data->prevVersion; + removeUID = true; + } + ChangeFeedStreamReply _feedReply = wait(feedReplyFuture); ChangeFeedStreamReply feedReply = _feedReply; req.begin = feedReply.mutations.back().version + 1; + if (!atLatest && feedReply.mutations.back().mutations.empty()) { + atLatest = true; + } + auto& clientVersions = data->changeFeedClientVersions[req.reply.getEndpoint().getPrimaryAddress()]; + Version minVersion = removeUID ? data->version.get() : data->prevVersion; + if (removeUID) { + data->changeFeedClientVersions[req.reply.getEndpoint().getPrimaryAddress()].erase(streamUID); + removeUID = false; + } + + for (auto& it : clientVersions) { + minVersion = std::min(minVersion, it.second); + } + feedReply.atLatestVersion = atLatest; + feedReply.minStreamVersion = minVersion; req.reply.send(feedReply); if (feedReply.mutations.back().version == req.end - 1) { req.reply.sendError(end_of_stream()); @@ -1869,13 +1901,21 @@ ACTOR Future changeFeedStreamQ(StorageServer* data, ChangeFeedStreamReques req.reply.sendError(unknown_change_feed()); return Void(); } - choose { - when(wait(delay(5.0, TaskPriority::DefaultEndpoint))) {} - when(wait(feed->second->newMutations.onTrigger())) {} - } + wait(feed->second->newMutations + .onTrigger()); // FIXME: check that this is triggered when the range is moved to a different + // server, also check that the stream is closed } } } catch (Error& e) { + auto it = data->changeFeedClientVersions.find(req.reply.getEndpoint().getPrimaryAddress()); + if (it != data->changeFeedClientVersions.end()) { + if (removeUID) { + it->second.erase(streamUID); + } + if (it->second.empty()) { + data->changeFeedClientVersions.erase(it); + } + } if (e.code() != error_code_operation_obsolete) { if (!canReplyWith(e)) throw; @@ -1885,6 +1925,18 @@ ACTOR Future changeFeedStreamQ(StorageServer* data, ChangeFeedStreamReques return Void(); } +ACTOR Future changeFeedVersionUpdateQ(StorageServer* data, ChangeFeedVersionUpdateRequest req) { + wait(data->version.whenAtLeast(req.minVersion)); + wait(delay(0)); + auto& clientVersions = data->changeFeedClientVersions[req.reply.getEndpoint().getPrimaryAddress()]; + Version minVersion = data->version.get(); + for (auto& it : clientVersions) { + minVersion = std::min(minVersion, it.second); + } + req.reply.send(ChangeFeedVersionUpdateReply(minVersion)); + return Void(); +} + #ifdef NO_INTELLISENSE size_t WATCH_OVERHEAD_WATCHQ = sizeof(WatchValueSendReplyActorState) + sizeof(WatchValueSendReplyActor); @@ -3294,14 +3346,14 @@ ACTOR Future fetchChangeFeedApplier(StorageServer* data, KeyRange range, Version fetchVersion, bool existing) { - state PromiseStream>> feedResults; + state Reference feedResults = makeReference(); state Future feed = data->cx->getChangeFeedStream( feedResults, rangeId, 0, existing ? fetchVersion + 1 : data->version.get() + 1, range); if (!existing) { try { loop { - Standalone> res = waitNext(feedResults.getFuture()); + Standalone> res = waitNext(feedResults->mutations.getFuture()); for (auto& it : res) { if (it.mutations.size()) { data->storage.writeKeyValue( @@ -3332,7 +3384,8 @@ ACTOR Future fetchChangeFeedApplier(StorageServer* data, localResult = _localResult; try { loop { - state Standalone> remoteResult = waitNext(feedResults.getFuture()); + state Standalone> remoteResult = + waitNext(feedResults->mutations.getFuture()); state int remoteLoc = 0; while (remoteLoc < remoteResult.size()) { if (remoteResult[remoteLoc].version < localResult.version) { @@ -4581,6 +4634,7 @@ ACTOR Future update(StorageServer* data, bool* pReceivedUpdate) { cloneCursor2->setProtocolVersion(data->logProtocol); state SpanID spanContext = SpanID(); state double beforeTLogMsgsUpdates = now(); + state std::set updatedChangeFeeds; for (; cloneCursor2->hasMessage(); cloneCursor2->nextMessage()) { if (mutationBytes > SERVER_KNOBS->DESIRED_UPDATE_BYTES) { mutationBytes = 0; @@ -4599,12 +4653,7 @@ ACTOR Future update(StorageServer* data, bool* pReceivedUpdate) { if (data->currentChangeFeeds.size()) { data->changeFeedVersions.push_back(std::make_pair( std::vector(data->currentChangeFeeds.begin(), data->currentChangeFeeds.end()), ver)); - for (auto& it : data->currentChangeFeeds) { - auto feed = data->uidChangeFeed.find(it); - if (feed != data->uidChangeFeed.end()) { - feed->second->newMutations.trigger(); - } - } + updatedChangeFeeds.insert(data->currentChangeFeeds.begin(), data->currentChangeFeeds.end()); data->currentChangeFeeds.clear(); } ver = cloneCursor2->version().version; @@ -4690,12 +4739,7 @@ ACTOR Future update(StorageServer* data, bool* pReceivedUpdate) { if (data->currentChangeFeeds.size()) { data->changeFeedVersions.push_back(std::make_pair( std::vector(data->currentChangeFeeds.begin(), data->currentChangeFeeds.end()), ver)); - for (auto& it : data->currentChangeFeeds) { - auto feed = data->uidChangeFeed.find(it); - if (feed != data->uidChangeFeed.end()) { - feed->second->newMutations.trigger(); - } - } + updatedChangeFeeds.insert(data->currentChangeFeeds.begin(), data->currentChangeFeeds.end()); data->currentChangeFeeds.clear(); } @@ -4737,8 +4781,17 @@ ACTOR Future update(StorageServer* data, bool* pReceivedUpdate) { data->noRecentUpdates.set(false); data->lastUpdate = now(); + + data->prevVersion = data->version.get(); data->version.set(ver); // Triggers replies to waiting gets for new version(s) + for (auto& it : updatedChangeFeeds) { + auto feed = data->uidChangeFeed.find(it); + if (feed != data->uidChangeFeed.end()) { + feed->second->newMutations.trigger(); + } + } + setDataVersion(data->thisServerID, data->version.get()); if (data->otherError.getFuture().isReady()) data->otherError.getFuture().get(); @@ -5843,6 +5896,15 @@ ACTOR Future serveChangeFeedPopRequests(StorageServer* self, FutureStream< } } +ACTOR Future serveChangeFeedVersionUpdateRequests( + StorageServer* self, + FutureStream changeFeedVersionUpdate) { + loop { + ChangeFeedVersionUpdateRequest req = waitNext(changeFeedVersionUpdate); + self->actors.add(self->readGuard(req, changeFeedVersionUpdateQ)); + } +} + ACTOR Future reportStorageServerState(StorageServer* self) { if (!SERVER_KNOBS->REPORT_DD_METRICS) { return Void(); @@ -5895,6 +5957,7 @@ ACTOR Future storageServerCore(StorageServer* self, StorageServerInterface self->actors.add(serveChangeFeedStreamRequests(self, ssi.changeFeedStream.getFuture())); self->actors.add(serveOverlappingChangeFeedsRequests(self, ssi.overlappingChangeFeeds.getFuture())); self->actors.add(serveChangeFeedPopRequests(self, ssi.changeFeedPop.getFuture())); + self->actors.add(serveChangeFeedVersionUpdateRequests(self, ssi.changeFeedVersionUpdate.getFuture())); self->actors.add(traceRole(Role::STORAGE_SERVER, ssi.id())); self->actors.add(reportStorageServerState(self)); diff --git a/fdbserver/workloads/ChangeFeeds.actor.cpp b/fdbserver/workloads/ChangeFeeds.actor.cpp index 06415d0a81..a032ad6884 100644 --- a/fdbserver/workloads/ChangeFeeds.actor.cpp +++ b/fdbserver/workloads/ChangeFeeds.actor.cpp @@ -65,10 +65,10 @@ ACTOR Future>> readMutations(Databa state Standalone> output; loop { try { - state PromiseStream>> results; + state Reference results = makeReference(); state Future stream = cx->getChangeFeedStream(results, rangeID, begin, end, normalKeys); loop { - Standalone> res = waitNext(results.getFuture()); + Standalone> res = waitNext(results->mutations.getFuture()); output.arena().dependsOn(res.arena()); for (auto& it : res) { if (it.mutations.size() == 1 && it.mutations.back().param1 == lastEpochEndPrivateKey) { diff --git a/flow/error_definitions.h b/flow/error_definitions.h index e468e46801..0034b61245 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -82,6 +82,7 @@ ERROR( wrong_format_version, 1058, "Format version not recognized" ) ERROR( unknown_change_feed, 1059, "Change feed not found" ) ERROR( change_feed_not_registered, 1060, "Change feed not registered" ) ERROR( granule_assignment_conflict, 1061, "Conflicting attempts to assign blob granules" ) +ERROR( change_feed_cancelled, 1062, "Change feed was cancelled" ) ERROR( broken_promise, 1100, "Broken promise" ) ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" ) From 6e81b83924f71561a5fd422b514722532dabdd8e Mon Sep 17 00:00:00 2001 From: Evan Tschannen Date: Mon, 15 Nov 2021 11:47:42 -0800 Subject: [PATCH 064/142] fix: cleanup change feeds which have been completely removed from a storage server --- fdbserver/storageserver.actor.cpp | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 12b557775e..5acd71f7f2 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -4577,6 +4577,46 @@ void changeServerKeys(StorageServer* data, setAvailableStatus(data, range, true); } validate(data); + + if (!nowAssigned) { + std::map candidateFeeds; + auto ranges = data->keyChangeFeed.intersectingRanges(keys); + for (auto r : ranges) { + for (auto feed : r.value()) { + candidateFeeds[feed->id] = feed->range; + } + } + for (auto f : candidateFeeds) { + bool foundAssigned = false; + auto shards = data->shards.intersectingRanges(f.second); + for (auto shard : shards) { + if (shard->value()->assigned()) { + foundAssigned = true; + break; + } + } + if (!foundAssigned) { + Key beginClearKey = f.first.withPrefix(persistChangeFeedKeys.begin); + auto& mLV = data->addVersionToMutationLog(data->data().getLatestVersion()); + data->addMutationToMutationLog( + mLV, MutationRef(MutationRef::ClearRange, beginClearKey, keyAfter(beginClearKey))); + data->addMutationToMutationLog(mLV, + MutationRef(MutationRef::ClearRange, + changeFeedDurableKey(f.first, 0), + changeFeedDurableKey(f.first, version))); + auto rs = data->keyChangeFeed.modify(f.second); + for (auto r = rs.begin(); r != rs.end(); ++r) { + auto& feedList = r->value(); + for (int i = 0; i < feedList.size(); i++) { + if (feedList[i]->id == f.first) { + swapAndPop(&feedList, i--); + } + } + } + data->uidChangeFeed.erase(f.first); + } + } + } } void rollback(StorageServer* data, Version rollbackVersion, Version nextVersion) { From 4630b0ccea43d8bab70be9911b2ab8efd1d95b74 Mon Sep 17 00:00:00 2001 From: Renxuan Wang Date: Mon, 15 Nov 2021 14:34:52 -0800 Subject: [PATCH 065/142] Move DNS mock from SimExternalConnection to Sim2. This is a revise PR of #5934. In simulation, we don't have direct access to SimExternalConnection. --- fdbrpc/SimExternalConnection.actor.cpp | 5 ----- fdbrpc/SimExternalConnection.h | 3 --- fdbrpc/sim2.actor.cpp | 13 +++++++++++++ flow/Net2.actor.cpp | 6 ++++++ flow/network.h | 3 +++ 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/fdbrpc/SimExternalConnection.actor.cpp b/fdbrpc/SimExternalConnection.actor.cpp index 7fae5cf522..8d7146f890 100644 --- a/fdbrpc/SimExternalConnection.actor.cpp +++ b/fdbrpc/SimExternalConnection.actor.cpp @@ -108,8 +108,6 @@ void MockDNS::clearMockTCPEndpoints() { hostnameToAddresses.clear(); } -MockDNS SimExternalConnection::mockDNS; - void SimExternalConnection::close() { socket.close(); } @@ -195,9 +193,6 @@ ACTOR static Future> resolveTCPEndpointImpl(std::str Future> SimExternalConnection::resolveTCPEndpoint(const std::string& host, const std::string& service) { - if (mockDNS.findMockTCPEndpoint(host, service)) { - return mockDNS.getTCPEndpoint(host, service); - } return resolveTCPEndpointImpl(host, service); } diff --git a/fdbrpc/SimExternalConnection.h b/fdbrpc/SimExternalConnection.h index 9ce18340bf..8ff12b4d4e 100644 --- a/fdbrpc/SimExternalConnection.h +++ b/fdbrpc/SimExternalConnection.h @@ -68,9 +68,6 @@ public: UID getDebugID() const override; static Future> resolveTCPEndpoint(const std::string& host, const std::string& service); static Future> connect(NetworkAddress toAddr); - -private: - static MockDNS mockDNS; }; #endif diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 7bdafe0f21..c5501f66a3 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -945,8 +945,18 @@ public: Future> createUDPSocket(NetworkAddress toAddr) override; Future> createUDPSocket(bool isV6 = false) override; + // Add a > pair to mock DNS in simulation. + void addMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses) override { + mockDNS.addMockTCPEndpoint(host, service, addresses); + } Future> resolveTCPEndpoint(const std::string& host, const std::string& service) override { + // If a > pair was injected to mock DNS, use it. + if (mockDNS.findMockTCPEndpoint(host, service)) { + return mockDNS.getTCPEndpoint(host, service); + } return SimExternalConnection::resolveTCPEndpoint(host, service); } ACTOR static Future> onConnect(Future ready, Reference conn) { @@ -2132,6 +2142,9 @@ public: bool yielded; int yield_limit; // how many more times yield may return false before next returning true +private: + MockDNS mockDNS; + #ifdef ENABLE_SAMPLING ActorLineageSet actorLineageSet; #endif diff --git a/flow/Net2.actor.cpp b/flow/Net2.actor.cpp index 6653dbda3d..e49577113f 100644 --- a/flow/Net2.actor.cpp +++ b/flow/Net2.actor.cpp @@ -152,6 +152,12 @@ public: Future> connectExternal(NetworkAddress toAddr, const std::string& host) override; Future> createUDPSocket(NetworkAddress toAddr) override; Future> createUDPSocket(bool isV6) override; + // This method should only be used in simulation. + void addMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses) override { + throw operation_failed(); + } Future> resolveTCPEndpoint(const std::string& host, const std::string& service) override; Reference listen(NetworkAddress localAddr) override; diff --git a/flow/network.h b/flow/network.h index f693aa491d..8a25f82179 100644 --- a/flow/network.h +++ b/flow/network.h @@ -689,6 +689,9 @@ public: // Make an outgoing udp connection without establishing a connection virtual Future> createUDPSocket(bool isV6 = false) = 0; + virtual void addMockTCPEndpoint(const std::string& host, + const std::string& service, + const std::vector& addresses) = 0; // Resolve host name and service name (such as "http" or can be a plain number like "80") to a list of 1 or more // NetworkAddresses virtual Future> resolveTCPEndpoint(const std::string& host, From deb0eb85e89c2ce063c1903296119b01dca20562 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Tue, 16 Nov 2021 00:41:34 -0800 Subject: [PATCH 066/142] Initialize global flow knobs in setupNetwork --- fdbbackup/backup.actor.cpp | 57 +++++++++++++++++------------------ fdbcli/fdbcli.actor.cpp | 4 +-- fdbclient/NativeAPI.actor.cpp | 1 + 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index 61a734ecb6..9d27e29c9f 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -3762,35 +3762,6 @@ int main(int argc, char* argv[]) { } } - IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False); - auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection(); - for (const auto& [knobName, knobValueString] : knobs) { - try { - auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString); - g_knobs.setKnob(knobName, knobValue); - } catch (Error& e) { - if (e.code() == error_code_invalid_option_value) { - fprintf(stderr, - "WARNING: Invalid value '%s' for knob option '%s'\n", - knobValueString.c_str(), - knobName.c_str()); - TraceEvent(SevWarnAlways, "InvalidKnobValue") - .detail("Knob", printable(knobName)) - .detail("Value", printable(knobValueString)); - } else { - fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knobName.c_str(), e.what()); - TraceEvent(SevError, "FailedToSetKnob") - .detail("Knob", printable(knobName)) - .detail("Value", printable(knobValueString)) - .error(e); - throw; - } - } - } - - // Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs - g_knobs.initialize(Randomize::False, IsSimulated::False); - if (trace) { if (!traceLogGroup.empty()) setNetworkOption(FDBNetworkOptions::TRACE_LOG_GROUP, StringRef(traceLogGroup)); @@ -3831,6 +3802,34 @@ int main(int argc, char* argv[]) { return FDB_EXIT_ERROR; } + auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection(); + for (const auto& [knobName, knobValueString] : knobs) { + try { + auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString); + g_knobs.setKnob(knobName, knobValue); + } catch (Error& e) { + if (e.code() == error_code_invalid_option_value) { + fprintf(stderr, + "WARNING: Invalid value '%s' for knob option '%s'\n", + knobValueString.c_str(), + knobName.c_str()); + TraceEvent(SevWarnAlways, "InvalidKnobValue") + .detail("Knob", printable(knobName)) + .detail("Value", printable(knobValueString)); + } else { + fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knobName.c_str(), e.what()); + TraceEvent(SevError, "FailedToSetKnob") + .detail("Knob", printable(knobName)) + .detail("Value", printable(knobValueString)) + .error(e); + throw; + } + } + } + + // Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs + g_knobs.initialize(Randomize::False, IsSimulated::False); + TraceEvent("ProgramStart") .setMaxEventLength(12000) .detail("SourceVersion", getSourceVersion()) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index 31ac1a4418..f6381f7bff 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -2432,8 +2432,6 @@ int main(int argc, char** argv) { registerCrashHandler(); - IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False); - #ifdef __unixish__ struct sigaction act; @@ -2548,4 +2546,4 @@ int main(int argc, char** argv) { fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code()); return 1; } -} \ No newline at end of file +} diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index c365c7bcbb..7cfa78d75d 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -2150,6 +2150,7 @@ void setupNetwork(uint64_t transportId, UseMetrics useMetrics) { g_network->addStopCallback(TLS::DestroyOpenSSLGlobalState); FlowTransport::createInstance(true, transportId, WLTOKEN_RESERVED_COUNT); Net2FileSystem::newFileSystem(); + IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False); uncancellable(monitorNetworkBusyness()); } From c53f5aa110e50f98868343a942f3a2d64b5bb91c Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Tue, 16 Nov 2021 02:15:22 -0800 Subject: [PATCH 067/142] Renamed redwood to redwood-1-experimental and file extension to .redwood-v1. --- .../source/mr-status-json-schemas.rst.inc | 4 ++-- documentation/sphinx/source/tss.rst | 2 +- fdbclient/DatabaseConfiguration.cpp | 4 ++-- fdbclient/FDBTypes.h | 2 +- fdbclient/ManagementAPI.actor.cpp | 2 +- fdbclient/Schemas.cpp | 4 ++-- fdbserver/SimulatedCluster.actor.cpp | 4 ++-- fdbserver/VersionedBTree.actor.cpp | 20 +++++++++---------- fdbserver/worker.actor.cpp | 4 ++-- fdbserver/workloads/KVStoreTest.actor.cpp | 2 +- tests/KVStoreTest.txt | 6 +++--- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/documentation/sphinx/source/mr-status-json-schemas.rst.inc b/documentation/sphinx/source/mr-status-json-schemas.rst.inc index a8d82794e3..80302ed4f7 100644 --- a/documentation/sphinx/source/mr-status-json-schemas.rst.inc +++ b/documentation/sphinx/source/mr-status-json-schemas.rst.inc @@ -700,7 +700,7 @@ "ssd", "ssd-1", "ssd-2", - "ssd-redwood-experimental", + "ssd-redwood-1-experimental", "ssd-rocksdb-experimental", "memory", "memory-1", @@ -713,7 +713,7 @@ "ssd", "ssd-1", "ssd-2", - "ssd-redwood-experimental", + "ssd-redwood-1-experimental", "ssd-rocksdb-experimental", "memory", "memory-1", diff --git a/documentation/sphinx/source/tss.rst b/documentation/sphinx/source/tss.rst index 7d27190532..215e9967b9 100644 --- a/documentation/sphinx/source/tss.rst +++ b/documentation/sphinx/source/tss.rst @@ -31,7 +31,7 @@ Because TSS recruitment only pairs *new* storage processes, you must add process Example commands ---------------- -Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-experimental count=4``. +Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-1-experimental count=4``. Change the desired TSS process count to 2: ``configure tss count=2``. diff --git a/fdbclient/DatabaseConfiguration.cpp b/fdbclient/DatabaseConfiguration.cpp index d778b35845..cef31d134e 100644 --- a/fdbclient/DatabaseConfiguration.cpp +++ b/fdbclient/DatabaseConfiguration.cpp @@ -288,7 +288,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const { result["storage_engine"] = "ssd-2"; } else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 && storageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) { - result["storage_engine"] = "ssd-redwood-experimental"; + result["storage_engine"] = "ssd-redwood-1-experimental"; } else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 && storageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) { result["storage_engine"] = "ssd-rocksdb-experimental"; @@ -311,7 +311,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const { } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_BTREE_V2) { result["tss_storage_engine"] = "ssd-2"; } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) { - result["tss_storage_engine"] = "ssd-redwood-experimental"; + result["tss_storage_engine"] = "ssd-redwood-1-experimental"; } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) { result["tss_storage_engine"] = "ssd-rocksdb-experimental"; } else if (testingStorageServerStoreType == KeyValueStoreType::MEMORY_RADIXTREE) { diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index d0eb4c0d0b..db52ea26dd 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -706,7 +706,7 @@ struct KeyValueStoreType { case SSD_BTREE_V2: return "ssd-2"; case SSD_REDWOOD_V1: - return "ssd-redwood-experimental"; + return "ssd-redwood-1-experimental"; case SSD_ROCKSDB_V1: return "ssd-rocksdb-experimental"; case MEMORY: diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index 53e9dba1a8..c8a06c5481 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -184,7 +184,7 @@ std::map configForToken(std::string const& mode) { } else if (mode == "ssd" || mode == "ssd-2") { logType = KeyValueStoreType::SSD_BTREE_V2; storeType = KeyValueStoreType::SSD_BTREE_V2; - } else if (mode == "ssd-redwood-experimental") { + } else if (mode == "ssd-redwood-1-experimental") { logType = KeyValueStoreType::SSD_BTREE_V2; storeType = KeyValueStoreType::SSD_REDWOOD_V1; } else if (mode == "ssd-rocksdb-experimental") { diff --git a/fdbclient/Schemas.cpp b/fdbclient/Schemas.cpp index e99aafd21a..10f77d7638 100644 --- a/fdbclient/Schemas.cpp +++ b/fdbclient/Schemas.cpp @@ -728,7 +728,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "ssd", "ssd-1", "ssd-2", - "ssd-redwood-experimental", + "ssd-redwood-1-experimental", "ssd-rocksdb-experimental", "memory", "memory-1", @@ -741,7 +741,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "ssd", "ssd-1", "ssd-2", - "ssd-redwood-experimental", + "ssd-redwood-1-experimental", "ssd-rocksdb-experimental", "memory", "memory-1", diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 62d1f71db9..c1cb7d1e8f 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -278,7 +278,7 @@ public: // 0 = "ssd" // 1 = "memory" // 2 = "memory-radixtree-beta" - // 3 = "ssd-redwood-experimental" + // 3 = "ssd-redwood-1-experimental" // 4 = "ssd-rocksdb-experimental" // Requires a comma-separated list of numbers WITHOUT whitespaces std::vector storageEngineExcludeTypes; @@ -1339,7 +1339,7 @@ void SimulationConfig::setStorageEngine(const TestConfig& testConfig) { } case 3: { TEST(true); // Simulated cluster using redwood storage engine - set_config("ssd-redwood-experimental"); + set_config("ssd-redwood-1-experimental"); break; } case 4: { diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 2ae64a8587..58a56508d7 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -9133,7 +9133,7 @@ TEST_CASE("Lredwood/correctness/btree") { g_redwoodMetricsActor = Void(); // Prevent trace event metrics from starting g_redwoodMetrics.clear(); - state std::string fileName = params.get("Filename").orDefault("unittest_pageFile.redwood"); + state std::string fileName = params.get("Filename").orDefault("unittest_pageFile.redwood-v1"); IPager2* pager; state bool serialTest = params.getInt("serialTest").orDefault(deterministicRandom()->random01() < 0.25); @@ -9524,7 +9524,7 @@ ACTOR Future randomScans(VersionedBTree* btree, } TEST_CASE(":/redwood/correctness/pager/cow") { - state std::string pagerFile = "unittest_pageFile.redwood"; + state std::string pagerFile = "unittest_pageFile.redwood-v1"; printf("Deleting old test data\n"); deleteFile(pagerFile); @@ -9573,7 +9573,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { state DWALPager* pager; // If a test file is passed in by environment then don't write new data to it. state bool reload = getenv("TESTFILE") == nullptr; - state std::string fileName = reload ? "unittest.redwood" : getenv("TESTFILE"); + state std::string fileName = reload ? "unittest.redwood-v1" : getenv("TESTFILE"); if (reload) { printf("Deleting old test data\n"); @@ -9723,7 +9723,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { TEST_CASE(":/redwood/performance/set") { state SignalableActorCollection actors; - state std::string fileName = params.get("Filename").orDefault("unittest.redwood"); + state std::string fileName = params.get("Filename").orDefault("unittest.redwood-v1"); state int pageSize = params.getInt("pageSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_PAGE_SIZE); state int extentSize = params.getInt("extentSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_EXTENT_SIZE); state int64_t pageCacheBytes = params.getInt("pageCacheBytes").orDefault(FLOW_KNOBS->PAGE_CACHE_4K); @@ -10254,9 +10254,9 @@ ACTOR Future doPrefixInsertComparison(int suffixSize, bool usePrefixesInOrder, KVSource source) { - deleteFile("test.redwood"); + deleteFile("test.redwood-v1"); wait(delay(5)); - state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood", UID(), 0); + state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood-v1", UID(), 0); wait(prefixClusteredInsert(redwood, suffixSize, valueSize, source, recordCountTarget, usePrefixesInOrder, true)); wait(closeKVS(redwood)); printf("\n"); @@ -10298,9 +10298,9 @@ TEST_CASE(":/redwood/performance/sequentialInsert") { state int valueSize = params.getInt("valueSize").orDefault(100); state int recordCountTarget = params.getInt("recordCountTarget").orDefault(100e6); - deleteFile("test.redwood"); + deleteFile("test.redwood-v1"); wait(delay(5)); - state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood", UID(), 0); + state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood-v1", UID(), 0); wait(sequentialInsert(redwood, prefixLen, valueSize, recordCountTarget)); wait(closeKVS(redwood)); printf("\n"); @@ -10378,9 +10378,9 @@ TEST_CASE(":/redwood/performance/randomRangeScans") { state KVSource source({ { prefixLen, 1000 } }); - deleteFile("test.redwood"); + deleteFile("test.redwood-v1"); wait(delay(5)); - state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood", UID(), 0); + state IKeyValueStore* redwood = openKVStore(KeyValueStoreType::SSD_REDWOOD_V1, "test.redwood-v1", UID(), 0); wait(prefixClusteredInsert( redwood, suffixSize, valueSize, source, writeRecordCountTarget, writePrefixesInOrder, false)); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index dce9bba3c1..f1b68ee680 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -308,7 +308,7 @@ KeyValueStoreSuffix bTreeV1Suffix = { KeyValueStoreType::SSD_BTREE_V1, ".fdb", F KeyValueStoreSuffix bTreeV2Suffix = { KeyValueStoreType::SSD_BTREE_V2, ".sqlite", FilesystemCheck::FILES_ONLY }; KeyValueStoreSuffix memorySuffix = { KeyValueStoreType::MEMORY, "-0.fdq", FilesystemCheck::FILES_ONLY }; KeyValueStoreSuffix memoryRTSuffix = { KeyValueStoreType::MEMORY_RADIXTREE, "-0.fdr", FilesystemCheck::FILES_ONLY }; -KeyValueStoreSuffix redwoodSuffix = { KeyValueStoreType::SSD_REDWOOD_V1, ".redwood", FilesystemCheck::FILES_ONLY }; +KeyValueStoreSuffix redwoodSuffix = { KeyValueStoreType::SSD_REDWOOD_V1, ".redwood-v1", FilesystemCheck::FILES_ONLY }; KeyValueStoreSuffix rocksdbSuffix = { KeyValueStoreType::SSD_ROCKSDB_V1, ".rocksdb", FilesystemCheck::DIRECTORIES_ONLY }; @@ -338,7 +338,7 @@ std::string filenameFromId(KeyValueStoreType storeType, std::string folder, std: else if (storeType == KeyValueStoreType::MEMORY || storeType == KeyValueStoreType::MEMORY_RADIXTREE) return joinPath(folder, prefix + id.toString() + "-"); else if (storeType == KeyValueStoreType::SSD_REDWOOD_V1) - return joinPath(folder, prefix + id.toString() + ".redwood"); + return joinPath(folder, prefix + id.toString() + ".redwood-v1"); else if (storeType == KeyValueStoreType::SSD_ROCKSDB_V1) return joinPath(folder, prefix + id.toString() + ".rocksdb"); diff --git a/fdbserver/workloads/KVStoreTest.actor.cpp b/fdbserver/workloads/KVStoreTest.actor.cpp index acf4dc6532..59598043b9 100644 --- a/fdbserver/workloads/KVStoreTest.actor.cpp +++ b/fdbserver/workloads/KVStoreTest.actor.cpp @@ -383,7 +383,7 @@ ACTOR Future testKVStore(KVStoreTestWorkload* workload) { test.store = keyValueStoreSQLite(fn, id, KeyValueStoreType::SSD_BTREE_V1); else if (workload->storeType == "ssd-2") test.store = keyValueStoreSQLite(fn, id, KeyValueStoreType::SSD_REDWOOD_V1); - else if (workload->storeType == "ssd-redwood-experimental") + else if (workload->storeType == "ssd-redwood-1-experimental") test.store = keyValueStoreRedwoodV1(fn, id); else if (workload->storeType == "ssd-rocksdb-experimental") test.store = keyValueStoreRocksDB(fn, id, KeyValueStoreType::SSD_ROCKSDB_V1); diff --git a/tests/KVStoreTest.txt b/tests/KVStoreTest.txt index 97eb709703..ca8774194a 100644 --- a/tests/KVStoreTest.txt +++ b/tests/KVStoreTest.txt @@ -60,7 +60,7 @@ setup=true clear=false count=false useDB=false -storeType=ssd-redwood-experimental +storeType=ssd-redwood-1-experimental filename=bttest-redwood testTitle=Scan @@ -76,7 +76,7 @@ setup=false clear=false count=true useDB=false -storeType=ssd-redwood-experimental +storeType=ssd-redwood-1-experimental filename=bttest-redwood testTitle=RandomWriteSaturation @@ -93,5 +93,5 @@ setup=false clear=false count=false useDB=false -storeType=ssd-redwood-experimental +storeType=ssd-redwood-1-experimental filename=bttest-redwood From 7b29804a5ea6c11e1082a6b005243a9ff9b3aa15 Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Tue, 16 Nov 2021 02:30:57 -0800 Subject: [PATCH 068/142] Fix typo. --- fdbserver/VersionedBTree.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 58a56508d7..535373c527 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -9133,7 +9133,7 @@ TEST_CASE("Lredwood/correctness/btree") { g_redwoodMetricsActor = Void(); // Prevent trace event metrics from starting g_redwoodMetrics.clear(); - state std::string fileName = params.get("Filename").orDefault("unittest_pageFile.redwood-v1"); + state std::string fileName = params.get("fileName").orDefault("unittest_pageFile.redwood-v1"); IPager2* pager; state bool serialTest = params.getInt("serialTest").orDefault(deterministicRandom()->random01() < 0.25); From 867999a41a94a0940a30f32a721ad191e7379bcf Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Tue, 16 Nov 2021 03:25:54 -0800 Subject: [PATCH 069/142] Rename wrong_format_version to unsupported_format_version. --- fdbserver/VersionedBTree.actor.cpp | 2 +- flow/error_definitions.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 535373c527..93c7af7c14 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -2258,7 +2258,7 @@ public: self->pHeader = (Header*)self->headerPage->begin(); if (self->pHeader->formatVersion != Header::FORMAT_VERSION) { - Error e = wrong_format_version(); + Error e = unsupported_format_version(); TraceEvent(SevWarn, "RedwoodRecoveryFailedWrongVersion") .detail("Filename", self->filename) .detail("Version", self->pHeader->formatVersion) diff --git a/flow/error_definitions.h b/flow/error_definitions.h index 4dea94645b..244f1bf993 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -78,7 +78,7 @@ ERROR( wrong_connection_file, 1054, "Connection file mismatch") ERROR( version_already_compacted, 1055, "The requested changes have been compacted away") ERROR( local_config_changed, 1056, "Local configuration file has changed. Restart and apply these changes" ) ERROR( failed_to_reach_quorum, 1057, "Failed to reach quorum from configuration database nodes. Retry sending these requests" ) -ERROR( wrong_format_version, 1058, "Format version not recognized" ) +ERROR( unsupported_format_version, 1058, "Format version not supported" ) ERROR( unknown_change_feed, 1059, "Change feed not found" ) ERROR( change_feed_not_registered, 1060, "Change feed not registered" ) ERROR( granule_assignment_conflict, 1061, "Conflicting attempts to assign blob granules" ) From 21c3c585cac31a0353ec74ab3eb111891359d012 Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Tue, 16 Nov 2021 04:03:11 -0800 Subject: [PATCH 070/142] Make file name parameter more user friendly in unit tests. --- fdbserver/VersionedBTree.actor.cpp | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 93c7af7c14..65004392bc 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -9133,7 +9133,7 @@ TEST_CASE("Lredwood/correctness/btree") { g_redwoodMetricsActor = Void(); // Prevent trace event metrics from starting g_redwoodMetrics.clear(); - state std::string fileName = params.get("fileName").orDefault("unittest_pageFile.redwood-v1"); + state std::string file = params.get("file").orDefault("unittest_pageFile.redwood-v1"); IPager2* pager; state bool serialTest = params.getInt("serialTest").orDefault(deterministicRandom()->random01() < 0.25); @@ -9177,6 +9177,7 @@ TEST_CASE("Lredwood/correctness/btree") { params.getInt("concurrentExtentReads").orDefault(SERVER_KNOBS->REDWOOD_EXTENT_CONCURRENT_READS); printf("\n"); + printf("file: %s\n", file.c_str()); printf("targetPageOps: %" PRId64 "\n", targetPageOps); printf("pagerMemoryOnly: %d\n", pagerMemoryOnly); printf("serialTest: %d\n", serialTest); @@ -9198,12 +9199,12 @@ TEST_CASE("Lredwood/correctness/btree") { printf("\n"); printf("Deleting existing test data...\n"); - deleteFile(fileName); + deleteFile(file); printf("Initializing...\n"); pager = new DWALPager( - pageSize, extentSize, fileName, cacheSizeBytes, remapCleanupWindow, concurrentExtentReads, pagerMemoryOnly); - state VersionedBTree* btree = new VersionedBTree(pager, fileName); + pageSize, extentSize, file, cacheSizeBytes, remapCleanupWindow, concurrentExtentReads, pagerMemoryOnly); + state VersionedBTree* btree = new VersionedBTree(pager, file); wait(btree->init()); state std::map, Optional> written; @@ -9410,8 +9411,8 @@ TEST_CASE("Lredwood/correctness/btree") { printf("Reopening btree from disk.\n"); IPager2* pager = new DWALPager( - pageSize, extentSize, fileName, cacheSizeBytes, remapCleanupWindow, concurrentExtentReads); - btree = new VersionedBTree(pager, fileName); + pageSize, extentSize, file, cacheSizeBytes, remapCleanupWindow, concurrentExtentReads); + btree = new VersionedBTree(pager, file); wait(btree->init()); Version v = btree->getLastCommittedVersion(); @@ -9450,8 +9451,8 @@ TEST_CASE("Lredwood/correctness/btree") { state Future closedFuture = btree->onClosed(); btree->close(); wait(closedFuture); - btree = new VersionedBTree(new DWALPager(pageSize, extentSize, fileName, cacheSizeBytes, 0, concurrentExtentReads), - fileName); + btree = + new VersionedBTree(new DWALPager(pageSize, extentSize, file, cacheSizeBytes, 0, concurrentExtentReads), file); wait(btree->init()); wait(btree->clearAllAndCheckSanity()); @@ -9723,7 +9724,7 @@ TEST_CASE(":/redwood/performance/extentQueue") { TEST_CASE(":/redwood/performance/set") { state SignalableActorCollection actors; - state std::string fileName = params.get("Filename").orDefault("unittest.redwood-v1"); + state std::string file = params.get("file").orDefault("unittest.redwood-v1"); state int pageSize = params.getInt("pageSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_PAGE_SIZE); state int extentSize = params.getInt("extentSize").orDefault(SERVER_KNOBS->REDWOOD_DEFAULT_EXTENT_SIZE); state int64_t pageCacheBytes = params.getInt("pageCacheBytes").orDefault(FLOW_KNOBS->PAGE_CACHE_4K); @@ -9754,6 +9755,10 @@ TEST_CASE(":/redwood/performance/set") { state bool traceMetrics = params.getInt("traceMetrics").orDefault(0); state bool destructiveSanityCheck = params.getInt("destructiveSanityCheck").orDefault(0); + printf("file: %s\n", file.c_str()); + printf("openExisting: %d\n", openExisting); + printf("insertRecords: %d\n", insertRecords); + printf("destructiveSanityCheck: %d\n", destructiveSanityCheck); printf("pagerMemoryOnly: %d\n", pagerMemoryOnly); printf("pageSize: %d\n", pageSize); printf("extentSize: %d\n", extentSize); @@ -9776,10 +9781,6 @@ TEST_CASE(":/redwood/performance/set") { printf("scans: %d\n", scans); printf("scanWidth: %d\n", scanWidth); printf("scanPrefetchBytes: %d\n", scanPrefetchBytes); - printf("fileName: %s\n", fileName.c_str()); - printf("openExisting: %d\n", openExisting); - printf("insertRecords: %d\n", insertRecords); - printf("destructiveSanityCheck: %d\n", destructiveSanityCheck); // If using stdout for metrics, prevent trace event metrics logger from starting if (!traceMetrics) { @@ -9789,12 +9790,12 @@ TEST_CASE(":/redwood/performance/set") { if (!openExisting) { printf("Deleting old test data\n"); - deleteFile(fileName); + deleteFile(file); } DWALPager* pager = new DWALPager( - pageSize, extentSize, fileName, pageCacheBytes, remapCleanupWindow, concurrentExtentReads, pagerMemoryOnly); - state VersionedBTree* btree = new VersionedBTree(pager, fileName); + pageSize, extentSize, file, pageCacheBytes, remapCleanupWindow, concurrentExtentReads, pagerMemoryOnly); + state VersionedBTree* btree = new VersionedBTree(pager, file); wait(btree->init()); printf("Initialized. StorageBytes=%s\n", btree->getStorageBytes().toString().c_str()); From 76f0323873208452105a326a56ac90d2f65f9c30 Mon Sep 17 00:00:00 2001 From: Steve Atherton Date: Tue, 16 Nov 2021 04:27:58 -0800 Subject: [PATCH 071/142] Add missing declaration of test file. --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e5f52a2de3..2f7b253798 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -191,6 +191,7 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES rare/SpecificUnitTests.toml) add_fdb_test(TEST_FILES rare/SwizzledLargeApiCorrectness.toml) add_fdb_test(TEST_FILES rare/RedwoodCorrectnessBTree.toml) + add_fdb_test(TEST_FILES rare/RedwoodDeltaTree.toml) add_fdb_test(TEST_FILES rare/Throttling.toml) add_fdb_test(TEST_FILES rare/TransactionTagApiCorrectness.toml) add_fdb_test(TEST_FILES rare/TransactionTagSwizzledApiCorrectness.toml) From 3b1ae39403891437d0b1e72681d673d5782c8dd3 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Tue, 16 Nov 2021 10:39:22 -0800 Subject: [PATCH 072/142] Add CLIOptions::setupKnobs method --- fdbcli/fdbcli.actor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index f6381f7bff..b5388de08c 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -1402,7 +1402,9 @@ struct CLIOptions { exit_code = FDB_EXIT_ERROR; return; } + } + void setupKnobs() { auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection(); for (const auto& [knobName, knobValueString] : knobs) { try { @@ -2532,6 +2534,10 @@ int main(int argc, char** argv) { try { API->selectApiVersion(opt.api_version); API->setupNetwork(); + opt.setupKnobs(); + if (opt.exit_code != -1) { + return opt.exit_code; + } Future cliFuture = runCli(opt); Future timeoutFuture = opt.exit_timeout ? timeExit(opt.exit_timeout) : Never(); auto f = stopNetworkAfter(success(cliFuture) || timeoutFuture); From 8ae49c18eb4489ec352cc7b45cb844cf59a22ce8 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Tue, 16 Nov 2021 01:28:16 -0800 Subject: [PATCH 073/142] Add assertion to IKnobCollection::setGlobalKnobCollection --- fdbclient/IKnobCollection.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/fdbclient/IKnobCollection.cpp b/fdbclient/IKnobCollection.cpp index a1821696bf..8748280784 100644 --- a/fdbclient/IKnobCollection.cpp +++ b/fdbclient/IKnobCollection.cpp @@ -83,6 +83,7 @@ std::unique_ptr& IKnobCollection::globalKnobCollection() { void IKnobCollection::setGlobalKnobCollection(Type type, Randomize randomize, IsSimulated isSimulated) { globalKnobCollection() = create(type, randomize, isSimulated); + ASSERT(FLOW_KNOBS == &bootstrapGlobalFlowKnobs); FLOW_KNOBS = &globalKnobCollection()->getFlowKnobs(); } From b1633b90f1b2fa35b1504e25241d36f74f80e16a Mon Sep 17 00:00:00 2001 From: Markus Pilman Date: Tue, 16 Nov 2021 12:03:49 -0700 Subject: [PATCH 074/142] Added fmt to flow --- CMakeLists.txt | 2 +- contrib/CMakeLists.txt | 1 + contrib/fmt-8.0.1/.clang-format | 3 + contrib/fmt-8.0.1/CMakeLists.txt | 410 +++ contrib/fmt-8.0.1/LICENSE.rst | 27 + contrib/fmt-8.0.1/include/fmt/args.h | 232 ++ contrib/fmt-8.0.1/include/fmt/chrono.h | 1308 +++++++ contrib/fmt-8.0.1/include/fmt/color.h | 627 ++++ contrib/fmt-8.0.1/include/fmt/compile.h | 639 ++++ contrib/fmt-8.0.1/include/fmt/core.h | 3002 +++++++++++++++++ contrib/fmt-8.0.1/include/fmt/format-inl.h | 2620 ++++++++++++++ contrib/fmt-8.0.1/include/fmt/format.h | 2830 ++++++++++++++++ contrib/fmt-8.0.1/include/fmt/locale.h | 2 + contrib/fmt-8.0.1/include/fmt/os.h | 515 +++ contrib/fmt-8.0.1/include/fmt/ostream.h | 181 + contrib/fmt-8.0.1/include/fmt/printf.h | 652 ++++ contrib/fmt-8.0.1/include/fmt/ranges.h | 468 +++ contrib/fmt-8.0.1/include/fmt/xchar.h | 236 ++ contrib/fmt-8.0.1/src/fmt.cc | 100 + contrib/fmt-8.0.1/src/format.cc | 78 + contrib/fmt-8.0.1/src/os.cc | 360 ++ .../fmt-8.0.1/support/cmake/FindSetEnv.cmake | 7 + .../fmt-8.0.1/support/cmake/JoinPaths.cmake | 26 + contrib/fmt-8.0.1/support/cmake/cxx14.cmake | 70 + .../support/cmake/fmt-config.cmake.in | 4 + contrib/fmt-8.0.1/support/cmake/fmt.pc.in | 11 + flow/CMakeLists.txt | 1 + 27 files changed, 14411 insertions(+), 1 deletion(-) create mode 100644 contrib/fmt-8.0.1/.clang-format create mode 100644 contrib/fmt-8.0.1/CMakeLists.txt create mode 100644 contrib/fmt-8.0.1/LICENSE.rst create mode 100644 contrib/fmt-8.0.1/include/fmt/args.h create mode 100644 contrib/fmt-8.0.1/include/fmt/chrono.h create mode 100644 contrib/fmt-8.0.1/include/fmt/color.h create mode 100644 contrib/fmt-8.0.1/include/fmt/compile.h create mode 100644 contrib/fmt-8.0.1/include/fmt/core.h create mode 100644 contrib/fmt-8.0.1/include/fmt/format-inl.h create mode 100644 contrib/fmt-8.0.1/include/fmt/format.h create mode 100644 contrib/fmt-8.0.1/include/fmt/locale.h create mode 100644 contrib/fmt-8.0.1/include/fmt/os.h create mode 100644 contrib/fmt-8.0.1/include/fmt/ostream.h create mode 100644 contrib/fmt-8.0.1/include/fmt/printf.h create mode 100644 contrib/fmt-8.0.1/include/fmt/ranges.h create mode 100644 contrib/fmt-8.0.1/include/fmt/xchar.h create mode 100644 contrib/fmt-8.0.1/src/fmt.cc create mode 100644 contrib/fmt-8.0.1/src/format.cc create mode 100644 contrib/fmt-8.0.1/src/os.cc create mode 100644 contrib/fmt-8.0.1/support/cmake/FindSetEnv.cmake create mode 100644 contrib/fmt-8.0.1/support/cmake/JoinPaths.cmake create mode 100644 contrib/fmt-8.0.1/support/cmake/cxx14.cmake create mode 100644 contrib/fmt-8.0.1/support/cmake/fmt-config.cmake.in create mode 100644 contrib/fmt-8.0.1/support/cmake/fmt.pc.in diff --git a/CMakeLists.txt b/CMakeLists.txt index 710e049bec..86e8aa419b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -158,6 +158,7 @@ endif() include(CompileBoost) include(GetMsgpack) +add_subdirectory(contrib) add_subdirectory(flow) add_subdirectory(fdbrpc) add_subdirectory(fdbclient) @@ -169,7 +170,6 @@ else() add_subdirectory(fdbservice) endif() add_subdirectory(fdbbackup) -add_subdirectory(contrib) add_subdirectory(tests) add_subdirectory(flowbench EXCLUDE_FROM_ALL) if(WITH_PYTHON AND WITH_C_BINDING) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 6bc891b854..63ed617212 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,3 +1,4 @@ +add_subdirectory(fmt-8.0.1) if(NOT WIN32) add_subdirectory(monitoring) add_subdirectory(TraceLogHelper) diff --git a/contrib/fmt-8.0.1/.clang-format b/contrib/fmt-8.0.1/.clang-format new file mode 100644 index 0000000000..445f63ed30 --- /dev/null +++ b/contrib/fmt-8.0.1/.clang-format @@ -0,0 +1,3 @@ +DisableFormat: true +SortIncludes: Never + diff --git a/contrib/fmt-8.0.1/CMakeLists.txt b/contrib/fmt-8.0.1/CMakeLists.txt new file mode 100644 index 0000000000..f7f3046663 --- /dev/null +++ b/contrib/fmt-8.0.1/CMakeLists.txt @@ -0,0 +1,410 @@ +cmake_minimum_required(VERSION 3.1...3.18) + +# Fallback for using newer policies on CMake <3.12. +if(${CMAKE_VERSION} VERSION_LESS 3.12) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +endif() + +# Determine if fmt is built as a subproject (using add_subdirectory) +# or if it is the master project. +if (NOT DEFINED FMT_MASTER_PROJECT) + set(FMT_MASTER_PROJECT OFF) + if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) + set(FMT_MASTER_PROJECT ON) + message(STATUS "CMake version: ${CMAKE_VERSION}") + endif () +endif () + +# Joins arguments and places the results in ${result_var}. +function(join result_var) + set(result "") + foreach (arg ${ARGN}) + set(result "${result}${arg}") + endforeach () + set(${result_var} "${result}" PARENT_SCOPE) +endfunction() + +function(enable_module target) + if (MSVC) + set(BMI ${CMAKE_CURRENT_BINARY_DIR}/${target}.ifc) + target_compile_options(${target} + PRIVATE /interface /ifcOutput ${BMI} + INTERFACE /reference fmt=${BMI}) + endif () + set_target_properties(${target} PROPERTIES ADDITIONAL_CLEAN_FILES ${BMI}) + set_source_files_properties(${BMI} PROPERTIES GENERATED ON) +endfunction() + +include(CMakeParseArguments) + +# Sets a cache variable with a docstring joined from multiple arguments: +# set( ... CACHE ...) +# This allows splitting a long docstring for readability. +function(set_verbose) + # cmake_parse_arguments is broken in CMake 3.4 (cannot parse CACHE) so use + # list instead. + list(GET ARGN 0 var) + list(REMOVE_AT ARGN 0) + list(GET ARGN 0 val) + list(REMOVE_AT ARGN 0) + list(REMOVE_AT ARGN 0) + list(GET ARGN 0 type) + list(REMOVE_AT ARGN 0) + join(doc ${ARGN}) + set(${var} ${val} CACHE ${type} ${doc}) +endfunction() + +# Set the default CMAKE_BUILD_TYPE to Release. +# This should be done before the project command since the latter can set +# CMAKE_BUILD_TYPE itself (it does so for nmake). +if (FMT_MASTER_PROJECT AND NOT CMAKE_BUILD_TYPE) + set_verbose(CMAKE_BUILD_TYPE Release CACHE STRING + "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or " + "CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif () + +project(FMT CXX) +include(GNUInstallDirs) +set_verbose(FMT_INC_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE STRING + "Installation directory for include files, a relative path that " + "will be joined with ${CMAKE_INSTALL_PREFIX} or an absolute path.") + +option(FMT_PEDANTIC "Enable extra warnings and expensive tests." OFF) +option(FMT_WERROR "Halt the compilation with an error on compiler warnings." + OFF) + +# Options that control generation of various targets. +option(FMT_DOC "Generate the doc target." ${FMT_MASTER_PROJECT}) +option(FMT_INSTALL "Generate the install target." ${FMT_MASTER_PROJECT}) +option(FMT_TEST "Generate the test target." ${FMT_MASTER_PROJECT}) +option(FMT_FUZZ "Generate the fuzz target." OFF) +option(FMT_CUDA_TEST "Generate the cuda-test target." OFF) +option(FMT_OS "Include core requiring OS (Windows/Posix) " ON) +option(FMT_MODULE "Build a module instead of a traditional library." OFF) + +set(FMT_CAN_MODULE OFF) +if (CMAKE_CXX_STANDARD GREATER 17 AND + # msvc 16.10-pre4 + MSVC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.29.30035) + set(FMT_CAN_MODULE ON) +endif () +if (NOT FMT_CAN_MODULE) + set(FMT_MODULE OFF) + message(STATUS "Module support is disabled.") +endif () +if (FMT_TEST AND FMT_MODULE) + # The tests require {fmt} to be compiled as traditional library + message(STATUS "Testing is incompatible with build mode 'module'.") +endif () + +# Get version from core.h +file(READ include/fmt/core.h core_h) +if (NOT core_h MATCHES "FMT_VERSION ([0-9]+)([0-9][0-9])([0-9][0-9])") + message(FATAL_ERROR "Cannot get FMT_VERSION from core.h.") +endif () +# Use math to skip leading zeros if any. +math(EXPR CPACK_PACKAGE_VERSION_MAJOR ${CMAKE_MATCH_1}) +math(EXPR CPACK_PACKAGE_VERSION_MINOR ${CMAKE_MATCH_2}) +math(EXPR CPACK_PACKAGE_VERSION_PATCH ${CMAKE_MATCH_3}) +join(FMT_VERSION ${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}. + ${CPACK_PACKAGE_VERSION_PATCH}) +message(STATUS "Version: ${FMT_VERSION}") + +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") + +if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +endif () + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} + "${CMAKE_CURRENT_SOURCE_DIR}/support/cmake") + +include(cxx14) +include(CheckCXXCompilerFlag) +include(JoinPaths) + +list(FIND CMAKE_CXX_COMPILE_FEATURES "cxx_variadic_templates" index) +if (${index} GREATER -1) + # Use cxx_variadic_templates instead of more appropriate cxx_std_11 for + # compatibility with older CMake versions. + set(FMT_REQUIRED_FEATURES cxx_variadic_templates) +endif () +message(STATUS "Required features: ${FMT_REQUIRED_FEATURES}") + +if (FMT_MASTER_PROJECT AND NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET) + set_verbose(CMAKE_CXX_VISIBILITY_PRESET hidden CACHE STRING + "Preset for the export of private symbols") + set_property(CACHE CMAKE_CXX_VISIBILITY_PRESET PROPERTY STRINGS + hidden default) +endif () + +if (FMT_MASTER_PROJECT AND NOT DEFINED CMAKE_VISIBILITY_INLINES_HIDDEN) + set_verbose(CMAKE_VISIBILITY_INLINES_HIDDEN ON CACHE BOOL + "Whether to add a compile flag to hide symbols of inline functions") +endif () + +if (CMAKE_CXX_COMPILER_ID MATCHES "GNU") + set(PEDANTIC_COMPILE_FLAGS -pedantic-errors -Wall -Wextra -pedantic + -Wold-style-cast -Wundef + -Wredundant-decls -Wwrite-strings -Wpointer-arith + -Wcast-qual -Wformat=2 -Wmissing-include-dirs + -Wcast-align + -Wctor-dtor-privacy -Wdisabled-optimization + -Winvalid-pch -Woverloaded-virtual + -Wconversion -Wswitch-enum -Wundef + -Wno-ctor-dtor-privacy -Wno-format-nonliteral) + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.6) + set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} + -Wno-dangling-else -Wno-unused-local-typedefs) + endif () + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) + set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wdouble-promotion + -Wtrampolines -Wzero-as-null-pointer-constant -Wuseless-cast + -Wvector-operation-performance -Wsized-deallocation -Wshadow) + endif () + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0) + set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wshift-overflow=2 + -Wnull-dereference -Wduplicated-cond) + endif () + set(WERROR_FLAG -Werror) +endif () + +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + set(PEDANTIC_COMPILE_FLAGS -Wall -Wextra -pedantic -Wconversion -Wundef + -Wdeprecated -Wweak-vtables -Wshadow + -Wno-gnu-zero-variadic-macro-arguments) + check_cxx_compiler_flag(-Wzero-as-null-pointer-constant HAS_NULLPTR_WARNING) + if (HAS_NULLPTR_WARNING) + set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} + -Wzero-as-null-pointer-constant) + endif () + set(WERROR_FLAG -Werror) +endif () + +if (MSVC) + set(PEDANTIC_COMPILE_FLAGS /W3) + set(WERROR_FLAG /WX) +endif () + +if (FMT_MASTER_PROJECT AND CMAKE_GENERATOR MATCHES "Visual Studio") + # If Microsoft SDK is installed create script run-msbuild.bat that + # calls SetEnv.cmd to set up build environment and runs msbuild. + # It is useful when building Visual Studio projects with the SDK + # toolchain rather than Visual Studio. + include(FindSetEnv) + if (WINSDK_SETENV) + set(MSBUILD_SETUP "call \"${WINSDK_SETENV}\"") + endif () + # Set FrameworkPathOverride to get rid of MSB3644 warnings. + join(netfxpath + "C:\\Program Files\\Reference Assemblies\\Microsoft\\Framework\\" + ".NETFramework\\v4.0") + file(WRITE run-msbuild.bat " + ${MSBUILD_SETUP} + ${CMAKE_MAKE_PROGRAM} -p:FrameworkPathOverride=\"${netfxpath}\" %*") +endif () + +set(strtod_l_headers stdlib.h) +if (APPLE) + set(strtod_l_headers ${strtod_l_headers} xlocale.h) +endif () + +include(CheckSymbolExists) +if (WIN32) + check_symbol_exists(_strtod_l "${strtod_l_headers}" HAVE_STRTOD_L) +else () + check_symbol_exists(strtod_l "${strtod_l_headers}" HAVE_STRTOD_L) +endif () + +function(add_headers VAR) + set(headers ${${VAR}}) + foreach (header ${ARGN}) + set(headers ${headers} include/fmt/${header}) + endforeach() + set(${VAR} ${headers} PARENT_SCOPE) +endfunction() + +# Define the fmt library, its includes and the needed defines. +add_headers(FMT_HEADERS args.h chrono.h color.h compile.h core.h format.h + format-inl.h locale.h os.h ostream.h printf.h ranges.h + xchar.h) +if (FMT_MODULE) + set(FMT_SOURCES src/fmt.cc) +elseif (FMT_OS) + set(FMT_SOURCES src/format.cc src/os.cc) +else() + set(FMT_SOURCES src/format.cc) +endif () + +add_library(fmt ${FMT_SOURCES} ${FMT_HEADERS}) +add_library(fmt::fmt ALIAS fmt) + +if (HAVE_STRTOD_L) + target_compile_definitions(fmt PUBLIC FMT_LOCALE) +endif () + +if (MINGW) + check_cxx_compiler_flag("Wa,-mbig-obj" FMT_HAS_MBIG_OBJ) + if (${FMT_HAS_MBIG_OBJ}) + target_compile_options(fmt PUBLIC "-Wa,-mbig-obj") + endif() +endif () + +if (FMT_WERROR) + target_compile_options(fmt PRIVATE ${WERROR_FLAG}) +endif () +if (FMT_PEDANTIC) + target_compile_options(fmt PRIVATE ${PEDANTIC_COMPILE_FLAGS}) +endif () +if (FMT_MODULE) + enable_module(fmt) +endif () + +target_compile_features(fmt INTERFACE ${FMT_REQUIRED_FEATURES}) + +target_include_directories(fmt PUBLIC + $ + $) + +set(FMT_DEBUG_POSTFIX d CACHE STRING "Debug library postfix.") + +set_target_properties(fmt PROPERTIES + VERSION ${FMT_VERSION} SOVERSION ${CPACK_PACKAGE_VERSION_MAJOR} + DEBUG_POSTFIX "${FMT_DEBUG_POSTFIX}") + +# Set FMT_LIB_NAME for pkg-config fmt.pc. We cannot use the OUTPUT_NAME target +# property because it's not set by default. +set(FMT_LIB_NAME fmt) +if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(FMT_LIB_NAME ${FMT_LIB_NAME}${FMT_DEBUG_POSTFIX}) +endif () + +if (BUILD_SHARED_LIBS) + if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS" AND + NOT EMSCRIPTEN) + # Fix rpmlint warning: + # unused-direct-shlib-dependency /usr/lib/libformat.so.1.1.0 /lib/libm.so.6. + target_link_libraries(fmt -Wl,--as-needed) + endif () + target_compile_definitions(fmt PRIVATE FMT_EXPORT INTERFACE FMT_SHARED) +endif () +if (FMT_SAFE_DURATION_CAST) + target_compile_definitions(fmt PUBLIC FMT_SAFE_DURATION_CAST) +endif() + +add_library(fmt-header-only INTERFACE) +add_library(fmt::fmt-header-only ALIAS fmt-header-only) + +target_compile_definitions(fmt-header-only INTERFACE FMT_HEADER_ONLY=1) +target_compile_features(fmt-header-only INTERFACE ${FMT_REQUIRED_FEATURES}) + +target_include_directories(fmt-header-only INTERFACE + $ + $) + +# Install targets. +if (FMT_INSTALL) + include(CMakePackageConfigHelpers) + set_verbose(FMT_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/fmt CACHE STRING + "Installation directory for cmake files, a relative path that " + "will be joined with ${CMAKE_INSTALL_PREFIX} or an absolute " + "path.") + set(version_config ${PROJECT_BINARY_DIR}/fmt-config-version.cmake) + set(project_config ${PROJECT_BINARY_DIR}/fmt-config.cmake) + set(pkgconfig ${PROJECT_BINARY_DIR}/fmt.pc) + set(targets_export_name fmt-targets) + + set_verbose(FMT_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE STRING + "Installation directory for libraries, a relative path that " + "will be joined to ${CMAKE_INSTALL_PREFIX} or an absolute path.") + + set_verbose(FMT_PKGCONFIG_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig CACHE PATH + "Installation directory for pkgconfig (.pc) files, a relative " + "path that will be joined with ${CMAKE_INSTALL_PREFIX} or an " + "absolute path.") + + # Generate the version, config and target files into the build directory. + write_basic_package_version_file( + ${version_config} + VERSION ${FMT_VERSION} + COMPATIBILITY AnyNewerVersion) + + join_paths(libdir_for_pc_file "\${exec_prefix}" "${FMT_LIB_DIR}") + join_paths(includedir_for_pc_file "\${prefix}" "${FMT_INC_DIR}") + + configure_file( + "${PROJECT_SOURCE_DIR}/support/cmake/fmt.pc.in" + "${pkgconfig}" + @ONLY) + configure_package_config_file( + ${PROJECT_SOURCE_DIR}/support/cmake/fmt-config.cmake.in + ${project_config} + INSTALL_DESTINATION ${FMT_CMAKE_DIR}) + + set(INSTALL_TARGETS fmt fmt-header-only) + + # Install the library and headers. + install(TARGETS ${INSTALL_TARGETS} EXPORT ${targets_export_name} + LIBRARY DESTINATION ${FMT_LIB_DIR} + ARCHIVE DESTINATION ${FMT_LIB_DIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + + # Use a namespace because CMake provides better diagnostics for namespaced + # imported targets. + export(TARGETS ${INSTALL_TARGETS} NAMESPACE fmt:: + FILE ${PROJECT_BINARY_DIR}/${targets_export_name}.cmake) + + # Install version, config and target files. + install( + FILES ${project_config} ${version_config} + DESTINATION ${FMT_CMAKE_DIR}) + install(EXPORT ${targets_export_name} DESTINATION ${FMT_CMAKE_DIR} + NAMESPACE fmt::) + + install(FILES $ + DESTINATION ${FMT_LIB_DIR} OPTIONAL) + install(FILES ${FMT_HEADERS} DESTINATION "${FMT_INC_DIR}/fmt") + install(FILES "${pkgconfig}" DESTINATION "${FMT_PKGCONFIG_DIR}") +endif () + +if (FMT_DOC) + add_subdirectory(doc) +endif () + +if (FMT_TEST) + enable_testing() + add_subdirectory(test) +endif () + +# Control fuzzing independent of the unit tests. +if (FMT_FUZZ) + add_subdirectory(test/fuzzing) + + # The FMT_FUZZ macro is used to prevent resource exhaustion in fuzzing + # mode and make fuzzing practically possible. It is similar to + # FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION but uses a different name to + # avoid interfering with fuzzing of projects that use {fmt}. + # See also https://llvm.org/docs/LibFuzzer.html#fuzzer-friendly-build-mode. + target_compile_definitions(fmt PUBLIC FMT_FUZZ) +endif () + +set(gitignore ${PROJECT_SOURCE_DIR}/.gitignore) +if (FMT_MASTER_PROJECT AND EXISTS ${gitignore}) + # Get the list of ignored files from .gitignore. + file (STRINGS ${gitignore} lines) + list(REMOVE_ITEM lines /doc/html) + foreach (line ${lines}) + string(REPLACE "." "[.]" line "${line}") + string(REPLACE "*" ".*" line "${line}") + set(ignored_files ${ignored_files} "${line}$" "${line}/") + endforeach () + set(ignored_files ${ignored_files} + /.git /breathe /format-benchmark sphinx/ .buildinfo .doctrees) + + set(CPACK_SOURCE_GENERATOR ZIP) + set(CPACK_SOURCE_IGNORE_FILES ${ignored_files}) + set(CPACK_SOURCE_PACKAGE_FILE_NAME fmt-${FMT_VERSION}) + set(CPACK_PACKAGE_NAME fmt) + set(CPACK_RESOURCE_FILE_README ${PROJECT_SOURCE_DIR}/README.rst) + include(CPack) +endif () diff --git a/contrib/fmt-8.0.1/LICENSE.rst b/contrib/fmt-8.0.1/LICENSE.rst new file mode 100644 index 0000000000..f0ec3db4d2 --- /dev/null +++ b/contrib/fmt-8.0.1/LICENSE.rst @@ -0,0 +1,27 @@ +Copyright (c) 2012 - present, Victor Zverovich + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- Optional exception to the license --- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into a machine-executable object form of such +source code, you may redistribute such embedded portions in such object form +without including the above copyright and permission notices. diff --git a/contrib/fmt-8.0.1/include/fmt/args.h b/contrib/fmt-8.0.1/include/fmt/args.h new file mode 100644 index 0000000000..562e8ab111 --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/args.h @@ -0,0 +1,232 @@ +// Formatting library for C++ - dynamic format arguments +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_ARGS_H_ +#define FMT_ARGS_H_ + +#include // std::reference_wrapper +#include // std::unique_ptr +#include + +#include "core.h" + +FMT_BEGIN_NAMESPACE + +namespace detail { + +template struct is_reference_wrapper : std::false_type {}; +template +struct is_reference_wrapper> : std::true_type {}; + +template const T& unwrap(const T& v) { return v; } +template const T& unwrap(const std::reference_wrapper& v) { + return static_cast(v); +} + +class dynamic_arg_list { + // Workaround for clang's -Wweak-vtables. Unlike for regular classes, for + // templates it doesn't complain about inability to deduce single translation + // unit for placing vtable. So storage_node_base is made a fake template. + template struct node { + virtual ~node() = default; + std::unique_ptr> next; + }; + + template struct typed_node : node<> { + T value; + + template + FMT_CONSTEXPR typed_node(const Arg& arg) : value(arg) {} + + template + FMT_CONSTEXPR typed_node(const basic_string_view& arg) + : value(arg.data(), arg.size()) {} + }; + + std::unique_ptr> head_; + + public: + template const T& push(const Arg& arg) { + auto new_node = std::unique_ptr>(new typed_node(arg)); + auto& value = new_node->value; + new_node->next = std::move(head_); + head_ = std::move(new_node); + return value; + } +}; +} // namespace detail + +/** + \rst + A dynamic version of `fmt::format_arg_store`. + It's equipped with a storage to potentially temporary objects which lifetimes + could be shorter than the format arguments object. + + It can be implicitly converted into `~fmt::basic_format_args` for passing + into type-erased formatting functions such as `~fmt::vformat`. + \endrst + */ +template +class dynamic_format_arg_store +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + // Workaround a GCC template argument substitution bug. + : public basic_format_args +#endif +{ + private: + using char_type = typename Context::char_type; + + template struct need_copy { + static constexpr detail::type mapped_type = + detail::mapped_type_constant::value; + + enum { + value = !(detail::is_reference_wrapper::value || + std::is_same>::value || + std::is_same>::value || + (mapped_type != detail::type::cstring_type && + mapped_type != detail::type::string_type && + mapped_type != detail::type::custom_type)) + }; + }; + + template + using stored_type = conditional_t::value && + !has_formatter::value && + !detail::is_reference_wrapper::value, + std::basic_string, T>; + + // Storage of basic_format_arg must be contiguous. + std::vector> data_; + std::vector> named_info_; + + // Storage of arguments not fitting into basic_format_arg must grow + // without relocation because items in data_ refer to it. + detail::dynamic_arg_list dynamic_args_; + + friend class basic_format_args; + + unsigned long long get_types() const { + return detail::is_unpacked_bit | data_.size() | + (named_info_.empty() + ? 0ULL + : static_cast(detail::has_named_args_bit)); + } + + const basic_format_arg* data() const { + return named_info_.empty() ? data_.data() : data_.data() + 1; + } + + template void emplace_arg(const T& arg) { + data_.emplace_back(detail::make_arg(arg)); + } + + template + void emplace_arg(const detail::named_arg& arg) { + if (named_info_.empty()) { + constexpr const detail::named_arg_info* zero_ptr{nullptr}; + data_.insert(data_.begin(), {zero_ptr, 0}); + } + data_.emplace_back(detail::make_arg(detail::unwrap(arg.value))); + auto pop_one = [](std::vector>* data) { + data->pop_back(); + }; + std::unique_ptr>, decltype(pop_one)> + guard{&data_, pop_one}; + named_info_.push_back({arg.name, static_cast(data_.size() - 2u)}); + data_[0].value_.named_args = {named_info_.data(), named_info_.size()}; + guard.release(); + } + + public: + /** + \rst + Adds an argument into the dynamic store for later passing to a formatting + function. + + Note that custom types and string types (but not string views) are copied + into the store dynamically allocating memory if necessary. + + **Example**:: + + fmt::dynamic_format_arg_store store; + store.push_back(42); + store.push_back("abc"); + store.push_back(1.5f); + std::string result = fmt::vformat("{} and {} and {}", store); + \endrst + */ + template void push_back(const T& arg) { + if (detail::const_check(need_copy::value)) + emplace_arg(dynamic_args_.push>(arg)); + else + emplace_arg(detail::unwrap(arg)); + } + + /** + \rst + Adds a reference to the argument into the dynamic store for later passing to + a formatting function. + + **Example**:: + + fmt::dynamic_format_arg_store store; + char band[] = "Rolling Stones"; + store.push_back(std::cref(band)); + band[9] = 'c'; // Changing str affects the output. + std::string result = fmt::vformat("{}", store); + // result == "Rolling Scones" + \endrst + */ + template void push_back(std::reference_wrapper arg) { + static_assert( + need_copy::value, + "objects of built-in types and string views are always copied"); + emplace_arg(arg.get()); + } + + /** + Adds named argument into the dynamic store for later passing to a formatting + function. ``std::reference_wrapper`` is supported to avoid copying of the + argument. The name is always copied into the store. + */ + template + void push_back(const detail::named_arg& arg) { + const char_type* arg_name = + dynamic_args_.push>(arg.name).c_str(); + if (detail::const_check(need_copy::value)) { + emplace_arg( + fmt::arg(arg_name, dynamic_args_.push>(arg.value))); + } else { + emplace_arg(fmt::arg(arg_name, arg.value)); + } + } + + /** Erase all elements from the store */ + void clear() { + data_.clear(); + named_info_.clear(); + dynamic_args_ = detail::dynamic_arg_list(); + } + + /** + \rst + Reserves space to store at least *new_cap* arguments including + *new_cap_named* named arguments. + \endrst + */ + void reserve(size_t new_cap, size_t new_cap_named) { + FMT_ASSERT(new_cap >= new_cap_named, + "Set of arguments includes set of named arguments"); + data_.reserve(new_cap); + named_info_.reserve(new_cap_named); + } +}; + +FMT_END_NAMESPACE + +#endif // FMT_ARGS_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/chrono.h b/contrib/fmt-8.0.1/include/fmt/chrono.h new file mode 100644 index 0000000000..c024fd710c --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/chrono.h @@ -0,0 +1,1308 @@ +// Formatting library for C++ - chrono support +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_CHRONO_H_ +#define FMT_CHRONO_H_ + +#include +#include +#include +#include +#include + +#include "format.h" + +FMT_BEGIN_NAMESPACE + +// Enable safe chrono durations, unless explicitly disabled. +#ifndef FMT_SAFE_DURATION_CAST +# define FMT_SAFE_DURATION_CAST 1 +#endif +#if FMT_SAFE_DURATION_CAST + +// For conversion between std::chrono::durations without undefined +// behaviour or erroneous results. +// This is a stripped down version of duration_cast, for inclusion in fmt. +// See https://github.com/pauldreik/safe_duration_cast +// +// Copyright Paul Dreik 2019 +namespace safe_duration_cast { + +template ::value && + std::numeric_limits::is_signed == + std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + // A and B are both signed, or both unsigned. + if (F::digits <= T::digits) { + // From fits in To without any problem. + } else { + // From does not always fit in To, resort to a dynamic check. + if (from < (T::min)() || from > (T::max)()) { + // outside range. + ec = 1; + return {}; + } + } + return static_cast(from); +} + +/** + * converts From to To, without loss. If the dynamic value of from + * can't be converted to To without loss, ec is set. + */ +template ::value && + std::numeric_limits::is_signed != + std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + if (detail::const_check(F::is_signed && !T::is_signed)) { + // From may be negative, not allowed! + if (fmt::detail::is_negative(from)) { + ec = 1; + return {}; + } + // From is positive. Can it always fit in To? + if (F::digits > T::digits && + from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + } + + if (!F::is_signed && T::is_signed && F::digits >= T::digits && + from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + return static_cast(from); // Lossless conversion. +} + +template ::value)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + return from; +} // function + +// clang-format off +/** + * converts From to To if possible, otherwise ec is set. + * + * input | output + * ---------------------------------|--------------- + * NaN | NaN + * Inf | Inf + * normal, fits in output | converted (possibly lossy) + * normal, does not fit in output | ec is set + * subnormal | best effort + * -Inf | -Inf + */ +// clang-format on +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { + ec = 0; + using T = std::numeric_limits; + static_assert(std::is_floating_point::value, "From must be floating"); + static_assert(std::is_floating_point::value, "To must be floating"); + + // catch the only happy case + if (std::isfinite(from)) { + if (from >= T::lowest() && from <= (T::max)()) { + return static_cast(from); + } + // not within range. + ec = 1; + return {}; + } + + // nan and inf will be preserved + return static_cast(from); +} // function + +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { + ec = 0; + static_assert(std::is_floating_point::value, "From must be floating"); + return from; +} + +/** + * safe duration cast between integral durations + */ +template ::value), + FMT_ENABLE_IF(std::is_integral::value)> +To safe_duration_cast(std::chrono::duration from, + int& ec) { + using From = std::chrono::duration; + ec = 0; + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor + : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // safe conversion to IntermediateRep + IntermediateRep count = + lossless_integral_conversion(from.count(), ec); + if (ec) return {}; + // multiply with Factor::num without overflow or underflow + if (detail::const_check(Factor::num != 1)) { + const auto max1 = detail::max_value() / Factor::num; + if (count > max1) { + ec = 1; + return {}; + } + const auto min1 = + (std::numeric_limits::min)() / Factor::num; + if (count < min1) { + ec = 1; + return {}; + } + count *= Factor::num; + } + + if (detail::const_check(Factor::den != 1)) count /= Factor::den; + auto tocount = lossless_integral_conversion(count, ec); + return ec ? To() : To(tocount); +} + +/** + * safe duration_cast between floating point durations + */ +template ::value), + FMT_ENABLE_IF(std::is_floating_point::value)> +To safe_duration_cast(std::chrono::duration from, + int& ec) { + using From = std::chrono::duration; + ec = 0; + if (std::isnan(from.count())) { + // nan in, gives nan out. easy. + return To{std::numeric_limits::quiet_NaN()}; + } + // maybe we should also check if from is denormal, and decide what to do about + // it. + + // +-inf should be preserved. + if (std::isinf(from.count())) { + return To{from.count()}; + } + + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor + : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // force conversion of From::rep -> IntermediateRep to be safe, + // even if it will never happen be narrowing in this context. + IntermediateRep count = + safe_float_conversion(from.count(), ec); + if (ec) { + return {}; + } + + // multiply with Factor::num without overflow or underflow + if (Factor::num != 1) { + constexpr auto max1 = detail::max_value() / + static_cast(Factor::num); + if (count > max1) { + ec = 1; + return {}; + } + constexpr auto min1 = std::numeric_limits::lowest() / + static_cast(Factor::num); + if (count < min1) { + ec = 1; + return {}; + } + count *= static_cast(Factor::num); + } + + // this can't go wrong, right? den>0 is checked earlier. + if (Factor::den != 1) { + using common_t = typename std::common_type::type; + count /= static_cast(Factor::den); + } + + // convert to the to type, safely + using ToRep = typename To::rep; + + const ToRep tocount = safe_float_conversion(count, ec); + if (ec) { + return {}; + } + return To{tocount}; +} +} // namespace safe_duration_cast +#endif + +// Prevents expansion of a preceding token as a function-style macro. +// Usage: f FMT_NOMACRO() +#define FMT_NOMACRO + +namespace detail { +template struct null {}; +inline null<> localtime_r FMT_NOMACRO(...) { return null<>(); } +inline null<> localtime_s(...) { return null<>(); } +inline null<> gmtime_r(...) { return null<>(); } +inline null<> gmtime_s(...) { return null<>(); } + +inline auto do_write(const std::tm& time, const std::locale& loc, char format, + char modifier) -> std::string { + auto&& os = std::ostringstream(); + os.imbue(loc); + using iterator = std::ostreambuf_iterator; + const auto& facet = std::use_facet>(loc); + auto end = facet.put(os, os, ' ', &time, format, modifier); + if (end.failed()) FMT_THROW(format_error("failed to format time")); + auto str = os.str(); + if (!detail::is_utf8() || loc == std::locale::classic()) return str; + // char16_t and char32_t codecvts are broken in MSVC (linkage errors) and + // gcc-4. +#if FMT_MSC_VER != 0 || \ + (defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI)) + // The _GLIBCXX_USE_DUAL_ABI macro is always defined in libstdc++ from gcc-5 + // and newer. + using code_unit = wchar_t; +#else + using code_unit = char32_t; +#endif + auto& f = std::use_facet>(loc); + auto mb = std::mbstate_t(); + const char* from_next = nullptr; + code_unit* to_next = nullptr; + constexpr size_t buf_size = 32; + code_unit buf[buf_size] = {}; + auto result = f.in(mb, str.data(), str.data() + str.size(), from_next, buf, + buf + buf_size, to_next); + if (result != std::codecvt_base::ok) + FMT_THROW(format_error("failed to format time")); + str.clear(); + for (code_unit* p = buf; p != to_next; ++p) { + uint32_t c = static_cast(*p); + if (sizeof(code_unit) == 2 && c >= 0xd800 && c <= 0xdfff) { + // surrogate pair + ++p; + if (p == to_next || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) { + FMT_THROW(format_error("failed to format time")); + } + c = (c << 10) + static_cast(*p) - 0x35fdc00; + } + if (c < 0x80) { + str.push_back(static_cast(c)); + } else if (c < 0x800) { + str.push_back(static_cast(0xc0 | (c >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { + str.push_back(static_cast(0xe0 | (c >> 12))); + str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else if (c >= 0x10000 && c <= 0x10ffff) { + str.push_back(static_cast(0xf0 | (c >> 18))); + str.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); + str.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + str.push_back(static_cast(0x80 | (c & 0x3f))); + } else { + FMT_THROW(format_error("failed to format time")); + } + } + return str; +} + +template +auto write(OutputIt out, const std::tm& time, const std::locale& loc, + char format, char modifier = 0) -> OutputIt { + auto str = do_write(time, loc, format, modifier); + return std::copy(str.begin(), str.end(), out); +} +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +/** + Converts given time since epoch as ``std::time_t`` value into calendar time, + expressed in local time. Unlike ``std::localtime``, this function is + thread-safe on most platforms. + */ +inline std::tm localtime(std::time_t time) { + struct dispatcher { + std::time_t time_; + std::tm tm_; + + dispatcher(std::time_t t) : time_(t) {} + + bool run() { + using namespace fmt::detail; + return handle(localtime_r(&time_, &tm_)); + } + + bool handle(std::tm* tm) { return tm != nullptr; } + + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(localtime_s(&tm_, &time_)); + } + + bool fallback(int res) { return res == 0; } + +#if !FMT_MSC_VER + bool fallback(detail::null<>) { + using namespace fmt::detail; + std::tm* tm = std::localtime(&time_); + if (tm) tm_ = *tm; + return tm != nullptr; + } +#endif + }; + dispatcher lt(time); + // Too big time values may be unsupported. + if (!lt.run()) FMT_THROW(format_error("time_t value out of range")); + return lt.tm_; +} + +inline std::tm localtime( + std::chrono::time_point time_point) { + return localtime(std::chrono::system_clock::to_time_t(time_point)); +} + +/** + Converts given time since epoch as ``std::time_t`` value into calendar time, + expressed in Coordinated Universal Time (UTC). Unlike ``std::gmtime``, this + function is thread-safe on most platforms. + */ +inline std::tm gmtime(std::time_t time) { + struct dispatcher { + std::time_t time_; + std::tm tm_; + + dispatcher(std::time_t t) : time_(t) {} + + bool run() { + using namespace fmt::detail; + return handle(gmtime_r(&time_, &tm_)); + } + + bool handle(std::tm* tm) { return tm != nullptr; } + + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(gmtime_s(&tm_, &time_)); + } + + bool fallback(int res) { return res == 0; } + +#if !FMT_MSC_VER + bool fallback(detail::null<>) { + std::tm* tm = std::gmtime(&time_); + if (tm) tm_ = *tm; + return tm != nullptr; + } +#endif + }; + dispatcher gt(time); + // Too big time values may be unsupported. + if (!gt.run()) FMT_THROW(format_error("time_t value out of range")); + return gt.tm_; +} + +inline std::tm gmtime( + std::chrono::time_point time_point) { + return gmtime(std::chrono::system_clock::to_time_t(time_point)); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +inline size_t strftime(char* str, size_t count, const char* format, + const std::tm* time) { + // Assign to a pointer to suppress GCCs -Wformat-nonliteral + // First assign the nullptr to suppress -Wsuggest-attribute=format + std::size_t (*strftime)(char*, std::size_t, const char*, const std::tm*) = + nullptr; + strftime = std::strftime; + return strftime(str, count, format, time); +} + +inline size_t strftime(wchar_t* str, size_t count, const wchar_t* format, + const std::tm* time) { + // See above + std::size_t (*wcsftime)(wchar_t*, std::size_t, const wchar_t*, + const std::tm*) = nullptr; + wcsftime = std::wcsftime; + return wcsftime(str, count, format, time); +} + +FMT_END_DETAIL_NAMESPACE + +template +struct formatter, + Char> : formatter { + FMT_CONSTEXPR formatter() { + this->specs = {default_specs, sizeof(default_specs) / sizeof(Char)}; + } + + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + auto it = ctx.begin(); + if (it != ctx.end() && *it == ':') ++it; + auto end = it; + while (end != ctx.end() && *end != '}') ++end; + if (end != it) this->specs = {it, detail::to_unsigned(end - it)}; + return end; + } + + template + auto format(std::chrono::time_point val, + FormatContext& ctx) -> decltype(ctx.out()) { + std::tm time = localtime(val); + return formatter::format(time, ctx); + } + + static constexpr Char default_specs[] = {'%', 'Y', '-', '%', 'm', '-', + '%', 'd', ' ', '%', 'H', ':', + '%', 'M', ':', '%', 'S'}; +}; + +template +constexpr Char + formatter, + Char>::default_specs[]; + +template struct formatter { + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + auto it = ctx.begin(); + if (it != ctx.end() && *it == ':') ++it; + auto end = it; + while (end != ctx.end() && *end != '}') ++end; + specs = {it, detail::to_unsigned(end - it)}; + return end; + } + + template + auto format(const std::tm& tm, FormatContext& ctx) const + -> decltype(ctx.out()) { + basic_memory_buffer tm_format; + tm_format.append(specs.begin(), specs.end()); + // By appending an extra space we can distinguish an empty result that + // indicates insufficient buffer size from a guaranteed non-empty result + // https://github.com/fmtlib/fmt/issues/2238 + tm_format.push_back(' '); + tm_format.push_back('\0'); + basic_memory_buffer buf; + size_t start = buf.size(); + for (;;) { + size_t size = buf.capacity() - start; + size_t count = detail::strftime(&buf[start], size, &tm_format[0], &tm); + if (count != 0) { + buf.resize(start + count); + break; + } + const size_t MIN_GROWTH = 10; + buf.reserve(buf.capacity() + (size > MIN_GROWTH ? size : MIN_GROWTH)); + } + // Remove the extra space. + return std::copy(buf.begin(), buf.end() - 1, ctx.out()); + } + + basic_string_view specs; +}; + +FMT_BEGIN_DETAIL_NAMESPACE + +template FMT_CONSTEXPR inline const char* get_units() { + if (std::is_same::value) return "as"; + if (std::is_same::value) return "fs"; + if (std::is_same::value) return "ps"; + if (std::is_same::value) return "ns"; + if (std::is_same::value) return "µs"; + if (std::is_same::value) return "ms"; + if (std::is_same::value) return "cs"; + if (std::is_same::value) return "ds"; + if (std::is_same>::value) return "s"; + if (std::is_same::value) return "das"; + if (std::is_same::value) return "hs"; + if (std::is_same::value) return "ks"; + if (std::is_same::value) return "Ms"; + if (std::is_same::value) return "Gs"; + if (std::is_same::value) return "Ts"; + if (std::is_same::value) return "Ps"; + if (std::is_same::value) return "Es"; + if (std::is_same>::value) return "m"; + if (std::is_same>::value) return "h"; + return nullptr; +} + +enum class numeric_system { + standard, + // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale. + alternative +}; + +// Parses a put_time-like format string and invokes handler actions. +template +FMT_CONSTEXPR const Char* parse_chrono_format(const Char* begin, + const Char* end, + Handler&& handler) { + auto ptr = begin; + while (ptr != end) { + auto c = *ptr; + if (c == '}') break; + if (c != '%') { + ++ptr; + continue; + } + if (begin != ptr) handler.on_text(begin, ptr); + ++ptr; // consume '%' + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case '%': + handler.on_text(ptr - 1, ptr); + break; + case 'n': { + const Char newline[] = {'\n'}; + handler.on_text(newline, newline + 1); + break; + } + case 't': { + const Char tab[] = {'\t'}; + handler.on_text(tab, tab + 1); + break; + } + // Day of the week: + case 'a': + handler.on_abbr_weekday(); + break; + case 'A': + handler.on_full_weekday(); + break; + case 'w': + handler.on_dec0_weekday(numeric_system::standard); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::standard); + break; + // Month: + case 'b': + handler.on_abbr_month(); + break; + case 'B': + handler.on_full_month(); + break; + // Hour, minute, second: + case 'H': + handler.on_24_hour(numeric_system::standard); + break; + case 'I': + handler.on_12_hour(numeric_system::standard); + break; + case 'M': + handler.on_minute(numeric_system::standard); + break; + case 'S': + handler.on_second(numeric_system::standard); + break; + // Other: + case 'c': + handler.on_datetime(numeric_system::standard); + break; + case 'x': + handler.on_loc_date(numeric_system::standard); + break; + case 'X': + handler.on_loc_time(numeric_system::standard); + break; + case 'D': + handler.on_us_date(); + break; + case 'F': + handler.on_iso_date(); + break; + case 'r': + handler.on_12_hour_time(); + break; + case 'R': + handler.on_24_hour_time(); + break; + case 'T': + handler.on_iso_time(); + break; + case 'p': + handler.on_am_pm(); + break; + case 'Q': + handler.on_duration_value(); + break; + case 'q': + handler.on_duration_unit(); + break; + case 'z': + handler.on_utc_offset(); + break; + case 'Z': + handler.on_tz_name(); + break; + // Alternative representation: + case 'E': { + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'c': + handler.on_datetime(numeric_system::alternative); + break; + case 'x': + handler.on_loc_date(numeric_system::alternative); + break; + case 'X': + handler.on_loc_time(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + } + case 'O': + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'w': + handler.on_dec0_weekday(numeric_system::alternative); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::alternative); + break; + case 'H': + handler.on_24_hour(numeric_system::alternative); + break; + case 'I': + handler.on_12_hour(numeric_system::alternative); + break; + case 'M': + handler.on_minute(numeric_system::alternative); + break; + case 'S': + handler.on_second(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + default: + FMT_THROW(format_error("invalid format")); + } + begin = ptr; + } + if (begin != ptr) handler.on_text(begin, ptr); + return ptr; +} + +template struct null_chrono_spec_handler { + FMT_CONSTEXPR void unsupported() { + static_cast(this)->unsupported(); + } + FMT_CONSTEXPR void on_abbr_weekday() { unsupported(); } + FMT_CONSTEXPR void on_full_weekday() { unsupported(); } + FMT_CONSTEXPR void on_dec0_weekday(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_dec1_weekday(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_abbr_month() { unsupported(); } + FMT_CONSTEXPR void on_full_month() { unsupported(); } + FMT_CONSTEXPR void on_24_hour(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_12_hour(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_minute(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_second(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_datetime(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_loc_date(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_loc_time(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_us_date() { unsupported(); } + FMT_CONSTEXPR void on_iso_date() { unsupported(); } + FMT_CONSTEXPR void on_12_hour_time() { unsupported(); } + FMT_CONSTEXPR void on_24_hour_time() { unsupported(); } + FMT_CONSTEXPR void on_iso_time() { unsupported(); } + FMT_CONSTEXPR void on_am_pm() { unsupported(); } + FMT_CONSTEXPR void on_duration_value() { unsupported(); } + FMT_CONSTEXPR void on_duration_unit() { unsupported(); } + FMT_CONSTEXPR void on_utc_offset() { unsupported(); } + FMT_CONSTEXPR void on_tz_name() { unsupported(); } +}; + +struct chrono_format_checker : null_chrono_spec_handler { + FMT_NORETURN void unsupported() { FMT_THROW(format_error("no date")); } + + template + FMT_CONSTEXPR void on_text(const Char*, const Char*) {} + FMT_CONSTEXPR void on_24_hour(numeric_system) {} + FMT_CONSTEXPR void on_12_hour(numeric_system) {} + FMT_CONSTEXPR void on_minute(numeric_system) {} + FMT_CONSTEXPR void on_second(numeric_system) {} + FMT_CONSTEXPR void on_12_hour_time() {} + FMT_CONSTEXPR void on_24_hour_time() {} + FMT_CONSTEXPR void on_iso_time() {} + FMT_CONSTEXPR void on_am_pm() {} + FMT_CONSTEXPR void on_duration_value() {} + FMT_CONSTEXPR void on_duration_unit() {} +}; + +template ::value)> +inline bool isnan(T) { + return false; +} +template ::value)> +inline bool isnan(T value) { + return std::isnan(value); +} + +template ::value)> +inline bool isfinite(T) { + return true; +} +template ::value)> +inline bool isfinite(T value) { + return std::isfinite(value); +} + +// Converts value to int and checks that it's in the range [0, upper). +template ::value)> +inline int to_nonnegative_int(T value, int upper) { + FMT_ASSERT(value >= 0 && to_unsigned(value) <= to_unsigned(upper), + "invalid value"); + (void)upper; + return static_cast(value); +} +template ::value)> +inline int to_nonnegative_int(T value, int upper) { + FMT_ASSERT( + std::isnan(value) || (value >= 0 && value <= static_cast(upper)), + "invalid value"); + (void)upper; + return static_cast(value); +} + +template ::value)> +inline T mod(T x, int y) { + return x % static_cast(y); +} +template ::value)> +inline T mod(T x, int y) { + return std::fmod(x, static_cast(y)); +} + +// If T is an integral type, maps T to its unsigned counterpart, otherwise +// leaves it unchanged (unlike std::make_unsigned). +template ::value> +struct make_unsigned_or_unchanged { + using type = T; +}; + +template struct make_unsigned_or_unchanged { + using type = typename std::make_unsigned::type; +}; + +#if FMT_SAFE_DURATION_CAST +// throwing version of safe_duration_cast +template +To fmt_safe_duration_cast(std::chrono::duration from) { + int ec; + To to = safe_duration_cast::safe_duration_cast(from, ec); + if (ec) FMT_THROW(format_error("cannot format duration")); + return to; +} +#endif + +template ::value)> +inline std::chrono::duration get_milliseconds( + std::chrono::duration d) { + // this may overflow and/or the result may not fit in the + // target type. +#if FMT_SAFE_DURATION_CAST + using CommonSecondsType = + typename std::common_type::type; + const auto d_as_common = fmt_safe_duration_cast(d); + const auto d_as_whole_seconds = + fmt_safe_duration_cast(d_as_common); + // this conversion should be nonproblematic + const auto diff = d_as_common - d_as_whole_seconds; + const auto ms = + fmt_safe_duration_cast>(diff); + return ms; +#else + auto s = std::chrono::duration_cast(d); + return std::chrono::duration_cast(d - s); +#endif +} + +template ::value)> +inline std::chrono::duration get_milliseconds( + std::chrono::duration d) { + using common_type = typename std::common_type::type; + auto ms = mod(d.count() * static_cast(Period::num) / + static_cast(Period::den) * 1000, + 1000); + return std::chrono::duration(static_cast(ms)); +} + +template ::value)> +OutputIt format_duration_value(OutputIt out, Rep val, int) { + return write(out, val); +} + +template ::value)> +OutputIt format_duration_value(OutputIt out, Rep val, int precision) { + auto specs = basic_format_specs(); + specs.precision = precision; + specs.type = precision > 0 ? 'f' : 'g'; + return write(out, val, specs); +} + +template +OutputIt copy_unit(string_view unit, OutputIt out, Char) { + return std::copy(unit.begin(), unit.end(), out); +} + +template +OutputIt copy_unit(string_view unit, OutputIt out, wchar_t) { + // This works when wchar_t is UTF-32 because units only contain characters + // that have the same representation in UTF-16 and UTF-32. + utf8_to_utf16 u(unit); + return std::copy(u.c_str(), u.c_str() + u.size(), out); +} + +template +OutputIt format_duration_unit(OutputIt out) { + if (const char* unit = get_units()) + return copy_unit(string_view(unit), out, Char()); + *out++ = '['; + out = write(out, Period::num); + if (const_check(Period::den != 1)) { + *out++ = '/'; + out = write(out, Period::den); + } + *out++ = ']'; + *out++ = 's'; + return out; +} + +template +struct chrono_formatter { + FormatContext& context; + OutputIt out; + int precision; + bool localized = false; + // rep is unsigned to avoid overflow. + using rep = + conditional_t::value && sizeof(Rep) < sizeof(int), + unsigned, typename make_unsigned_or_unchanged::type>; + rep val; + using seconds = std::chrono::duration; + seconds s; + using milliseconds = std::chrono::duration; + bool negative; + + using char_type = typename FormatContext::char_type; + + explicit chrono_formatter(FormatContext& ctx, OutputIt o, + std::chrono::duration d) + : context(ctx), + out(o), + val(static_cast(d.count())), + negative(false) { + if (d.count() < 0) { + val = 0 - val; + negative = true; + } + + // this may overflow and/or the result may not fit in the + // target type. +#if FMT_SAFE_DURATION_CAST + // might need checked conversion (rep!=Rep) + auto tmpval = std::chrono::duration(val); + s = fmt_safe_duration_cast(tmpval); +#else + s = std::chrono::duration_cast( + std::chrono::duration(val)); +#endif + } + + // returns true if nan or inf, writes to out. + bool handle_nan_inf() { + if (isfinite(val)) { + return false; + } + if (isnan(val)) { + write_nan(); + return true; + } + // must be +-inf + if (val > 0) { + write_pinf(); + } else { + write_ninf(); + } + return true; + } + + Rep hour() const { return static_cast(mod((s.count() / 3600), 24)); } + + Rep hour12() const { + Rep hour = static_cast(mod((s.count() / 3600), 12)); + return hour <= 0 ? 12 : hour; + } + + Rep minute() const { return static_cast(mod((s.count() / 60), 60)); } + Rep second() const { return static_cast(mod(s.count(), 60)); } + + std::tm time() const { + auto time = std::tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + time.tm_min = to_nonnegative_int(minute(), 60); + time.tm_sec = to_nonnegative_int(second(), 60); + return time; + } + + void write_sign() { + if (negative) { + *out++ = '-'; + negative = false; + } + } + + void write(Rep value, int width) { + write_sign(); + if (isnan(value)) return write_nan(); + uint32_or_64_or_128_t n = + to_unsigned(to_nonnegative_int(value, max_value())); + int num_digits = detail::count_digits(n); + if (width > num_digits) out = std::fill_n(out, width - num_digits, '0'); + out = format_decimal(out, n, num_digits).end; + } + + void write_nan() { std::copy_n("nan", 3, out); } + void write_pinf() { std::copy_n("inf", 3, out); } + void write_ninf() { std::copy_n("-inf", 4, out); } + + void format_localized(const tm& time, char format, char modifier = 0) { + if (isnan(val)) return write_nan(); + const auto& loc = localized ? context.locale().template get() + : std::locale::classic(); + out = detail::write(out, time, loc, format, modifier); + } + + void on_text(const char_type* begin, const char_type* end) { + std::copy(begin, end, out); + } + + // These are not implemented because durations don't have date information. + void on_abbr_weekday() {} + void on_full_weekday() {} + void on_dec0_weekday(numeric_system) {} + void on_dec1_weekday(numeric_system) {} + void on_abbr_month() {} + void on_full_month() {} + void on_datetime(numeric_system) {} + void on_loc_date(numeric_system) {} + void on_loc_time(numeric_system) {} + void on_us_date() {} + void on_iso_date() {} + void on_utc_offset() {} + void on_tz_name() {} + + void on_24_hour(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(hour(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + format_localized(time, 'H', 'O'); + } + + void on_12_hour(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(hour12(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour12(), 12); + format_localized(time, 'I', 'O'); + } + + void on_minute(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(minute(), 2); + auto time = tm(); + time.tm_min = to_nonnegative_int(minute(), 60); + format_localized(time, 'M', 'O'); + } + + void on_second(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) { + write(second(), 2); +#if FMT_SAFE_DURATION_CAST + // convert rep->Rep + using duration_rep = std::chrono::duration; + using duration_Rep = std::chrono::duration; + auto tmpval = fmt_safe_duration_cast(duration_rep{val}); +#else + auto tmpval = std::chrono::duration(val); +#endif + auto ms = get_milliseconds(tmpval); + if (ms != std::chrono::milliseconds(0)) { + *out++ = '.'; + write(ms.count(), 3); + } + return; + } + auto time = tm(); + time.tm_sec = to_nonnegative_int(second(), 60); + format_localized(time, 'S', 'O'); + } + + void on_12_hour_time() { + if (handle_nan_inf()) return; + format_localized(time(), 'r'); + } + + void on_24_hour_time() { + if (handle_nan_inf()) { + *out++ = ':'; + handle_nan_inf(); + return; + } + + write(hour(), 2); + *out++ = ':'; + write(minute(), 2); + } + + void on_iso_time() { + on_24_hour_time(); + *out++ = ':'; + if (handle_nan_inf()) return; + write(second(), 2); + } + + void on_am_pm() { + if (handle_nan_inf()) return; + format_localized(time(), 'p'); + } + + void on_duration_value() { + if (handle_nan_inf()) return; + write_sign(); + out = format_duration_value(out, val, precision); + } + + void on_duration_unit() { + out = format_duration_unit(out); + } +}; + +FMT_END_DETAIL_NAMESPACE + +#if defined(__cpp_lib_chrono) && __cpp_lib_chrono >= 201907 +using weekday = std::chrono::weekday; +#else +// A fallback version of weekday. +class weekday { + private: + unsigned char value; + + public: + weekday() = default; + explicit constexpr weekday(unsigned wd) noexcept + : value(static_cast(wd != 7 ? wd : 0)) {} + constexpr unsigned c_encoding() const noexcept { return value; } +}; +#endif + +// A rudimentary weekday formatter. +template <> struct formatter { + private: + bool localized = false; + + public: + FMT_CONSTEXPR auto parse(format_parse_context& ctx) -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + return begin; + } + + auto format(weekday wd, format_context& ctx) -> decltype(ctx.out()) { + auto time = std::tm(); + time.tm_wday = static_cast(wd.c_encoding()); + const auto& loc = localized ? ctx.locale().template get() + : std::locale::classic(); + return detail::write(ctx.out(), time, loc, 'a'); + } +}; + +template +struct formatter, Char> { + private: + basic_format_specs specs; + int precision = -1; + using arg_ref_type = detail::arg_ref; + arg_ref_type width_ref; + arg_ref_type precision_ref; + bool localized = false; + basic_string_view format_str; + using duration = std::chrono::duration; + + struct spec_handler { + formatter& f; + basic_format_parse_context& context; + basic_string_view format_str; + + template FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(detail::auto_id) { + return arg_ref_type(context.next_arg_id()); + } + + void on_error(const char* msg) { FMT_THROW(format_error(msg)); } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + f.specs.fill = fill; + } + FMT_CONSTEXPR void on_align(align_t align) { f.specs.align = align; } + FMT_CONSTEXPR void on_width(int width) { f.specs.width = width; } + FMT_CONSTEXPR void on_precision(int _precision) { + f.precision = _precision; + } + FMT_CONSTEXPR void end_precision() {} + + template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + f.width_ref = make_arg_ref(arg_id); + } + + template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + f.precision_ref = make_arg_ref(arg_id); + } + }; + + using iterator = typename basic_format_parse_context::iterator; + struct parse_range { + iterator begin; + iterator end; + }; + + FMT_CONSTEXPR parse_range do_parse(basic_format_parse_context& ctx) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end || *begin == '}') return {begin, begin}; + spec_handler handler{*this, ctx, format_str}; + begin = detail::parse_align(begin, end, handler); + if (begin == end) return {begin, begin}; + begin = detail::parse_width(begin, end, handler); + if (begin == end) return {begin, begin}; + if (*begin == '.') { + if (std::is_floating_point::value) + begin = detail::parse_precision(begin, end, handler); + else + handler.on_error("precision not allowed for this argument type"); + } + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + end = parse_chrono_format(begin, end, detail::chrono_format_checker()); + return {begin, end}; + } + + public: + FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) + -> decltype(ctx.begin()) { + auto range = do_parse(ctx); + format_str = basic_string_view( + &*range.begin, detail::to_unsigned(range.end - range.begin)); + return range.end; + } + + template + auto format(const duration& d, FormatContext& ctx) const + -> decltype(ctx.out()) { + auto specs_copy = specs; + auto precision_copy = precision; + auto begin = format_str.begin(), end = format_str.end(); + // As a possible future optimization, we could avoid extra copying if width + // is not specified. + basic_memory_buffer buf; + auto out = std::back_inserter(buf); + detail::handle_dynamic_spec(specs_copy.width, + width_ref, ctx); + detail::handle_dynamic_spec(precision_copy, + precision_ref, ctx); + if (begin == end || *begin == '}') { + out = detail::format_duration_value(out, d.count(), precision_copy); + detail::format_duration_unit(out); + } else { + detail::chrono_formatter f( + ctx, out, d); + f.precision = precision_copy; + f.localized = localized; + detail::parse_chrono_format(begin, end, f); + } + return detail::write( + ctx.out(), basic_string_view(buf.data(), buf.size()), specs_copy); + } +}; + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_CHRONO_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/color.h b/contrib/fmt-8.0.1/include/fmt/color.h new file mode 100644 index 0000000000..3d5490e87f --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/color.h @@ -0,0 +1,627 @@ +// Formatting library for C++ - color support +// +// Copyright (c) 2018 - present, Victor Zverovich and fmt contributors +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_COLOR_H_ +#define FMT_COLOR_H_ + +#include "format.h" + +// __declspec(deprecated) is broken in some MSVC versions. +#if FMT_MSC_VER +# define FMT_DEPRECATED_NONMSVC +#else +# define FMT_DEPRECATED_NONMSVC FMT_DEPRECATED +#endif + +FMT_BEGIN_NAMESPACE +FMT_MODULE_EXPORT_BEGIN + +enum class color : uint32_t { + alice_blue = 0xF0F8FF, // rgb(240,248,255) + antique_white = 0xFAEBD7, // rgb(250,235,215) + aqua = 0x00FFFF, // rgb(0,255,255) + aquamarine = 0x7FFFD4, // rgb(127,255,212) + azure = 0xF0FFFF, // rgb(240,255,255) + beige = 0xF5F5DC, // rgb(245,245,220) + bisque = 0xFFE4C4, // rgb(255,228,196) + black = 0x000000, // rgb(0,0,0) + blanched_almond = 0xFFEBCD, // rgb(255,235,205) + blue = 0x0000FF, // rgb(0,0,255) + blue_violet = 0x8A2BE2, // rgb(138,43,226) + brown = 0xA52A2A, // rgb(165,42,42) + burly_wood = 0xDEB887, // rgb(222,184,135) + cadet_blue = 0x5F9EA0, // rgb(95,158,160) + chartreuse = 0x7FFF00, // rgb(127,255,0) + chocolate = 0xD2691E, // rgb(210,105,30) + coral = 0xFF7F50, // rgb(255,127,80) + cornflower_blue = 0x6495ED, // rgb(100,149,237) + cornsilk = 0xFFF8DC, // rgb(255,248,220) + crimson = 0xDC143C, // rgb(220,20,60) + cyan = 0x00FFFF, // rgb(0,255,255) + dark_blue = 0x00008B, // rgb(0,0,139) + dark_cyan = 0x008B8B, // rgb(0,139,139) + dark_golden_rod = 0xB8860B, // rgb(184,134,11) + dark_gray = 0xA9A9A9, // rgb(169,169,169) + dark_green = 0x006400, // rgb(0,100,0) + dark_khaki = 0xBDB76B, // rgb(189,183,107) + dark_magenta = 0x8B008B, // rgb(139,0,139) + dark_olive_green = 0x556B2F, // rgb(85,107,47) + dark_orange = 0xFF8C00, // rgb(255,140,0) + dark_orchid = 0x9932CC, // rgb(153,50,204) + dark_red = 0x8B0000, // rgb(139,0,0) + dark_salmon = 0xE9967A, // rgb(233,150,122) + dark_sea_green = 0x8FBC8F, // rgb(143,188,143) + dark_slate_blue = 0x483D8B, // rgb(72,61,139) + dark_slate_gray = 0x2F4F4F, // rgb(47,79,79) + dark_turquoise = 0x00CED1, // rgb(0,206,209) + dark_violet = 0x9400D3, // rgb(148,0,211) + deep_pink = 0xFF1493, // rgb(255,20,147) + deep_sky_blue = 0x00BFFF, // rgb(0,191,255) + dim_gray = 0x696969, // rgb(105,105,105) + dodger_blue = 0x1E90FF, // rgb(30,144,255) + fire_brick = 0xB22222, // rgb(178,34,34) + floral_white = 0xFFFAF0, // rgb(255,250,240) + forest_green = 0x228B22, // rgb(34,139,34) + fuchsia = 0xFF00FF, // rgb(255,0,255) + gainsboro = 0xDCDCDC, // rgb(220,220,220) + ghost_white = 0xF8F8FF, // rgb(248,248,255) + gold = 0xFFD700, // rgb(255,215,0) + golden_rod = 0xDAA520, // rgb(218,165,32) + gray = 0x808080, // rgb(128,128,128) + green = 0x008000, // rgb(0,128,0) + green_yellow = 0xADFF2F, // rgb(173,255,47) + honey_dew = 0xF0FFF0, // rgb(240,255,240) + hot_pink = 0xFF69B4, // rgb(255,105,180) + indian_red = 0xCD5C5C, // rgb(205,92,92) + indigo = 0x4B0082, // rgb(75,0,130) + ivory = 0xFFFFF0, // rgb(255,255,240) + khaki = 0xF0E68C, // rgb(240,230,140) + lavender = 0xE6E6FA, // rgb(230,230,250) + lavender_blush = 0xFFF0F5, // rgb(255,240,245) + lawn_green = 0x7CFC00, // rgb(124,252,0) + lemon_chiffon = 0xFFFACD, // rgb(255,250,205) + light_blue = 0xADD8E6, // rgb(173,216,230) + light_coral = 0xF08080, // rgb(240,128,128) + light_cyan = 0xE0FFFF, // rgb(224,255,255) + light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210) + light_gray = 0xD3D3D3, // rgb(211,211,211) + light_green = 0x90EE90, // rgb(144,238,144) + light_pink = 0xFFB6C1, // rgb(255,182,193) + light_salmon = 0xFFA07A, // rgb(255,160,122) + light_sea_green = 0x20B2AA, // rgb(32,178,170) + light_sky_blue = 0x87CEFA, // rgb(135,206,250) + light_slate_gray = 0x778899, // rgb(119,136,153) + light_steel_blue = 0xB0C4DE, // rgb(176,196,222) + light_yellow = 0xFFFFE0, // rgb(255,255,224) + lime = 0x00FF00, // rgb(0,255,0) + lime_green = 0x32CD32, // rgb(50,205,50) + linen = 0xFAF0E6, // rgb(250,240,230) + magenta = 0xFF00FF, // rgb(255,0,255) + maroon = 0x800000, // rgb(128,0,0) + medium_aquamarine = 0x66CDAA, // rgb(102,205,170) + medium_blue = 0x0000CD, // rgb(0,0,205) + medium_orchid = 0xBA55D3, // rgb(186,85,211) + medium_purple = 0x9370DB, // rgb(147,112,219) + medium_sea_green = 0x3CB371, // rgb(60,179,113) + medium_slate_blue = 0x7B68EE, // rgb(123,104,238) + medium_spring_green = 0x00FA9A, // rgb(0,250,154) + medium_turquoise = 0x48D1CC, // rgb(72,209,204) + medium_violet_red = 0xC71585, // rgb(199,21,133) + midnight_blue = 0x191970, // rgb(25,25,112) + mint_cream = 0xF5FFFA, // rgb(245,255,250) + misty_rose = 0xFFE4E1, // rgb(255,228,225) + moccasin = 0xFFE4B5, // rgb(255,228,181) + navajo_white = 0xFFDEAD, // rgb(255,222,173) + navy = 0x000080, // rgb(0,0,128) + old_lace = 0xFDF5E6, // rgb(253,245,230) + olive = 0x808000, // rgb(128,128,0) + olive_drab = 0x6B8E23, // rgb(107,142,35) + orange = 0xFFA500, // rgb(255,165,0) + orange_red = 0xFF4500, // rgb(255,69,0) + orchid = 0xDA70D6, // rgb(218,112,214) + pale_golden_rod = 0xEEE8AA, // rgb(238,232,170) + pale_green = 0x98FB98, // rgb(152,251,152) + pale_turquoise = 0xAFEEEE, // rgb(175,238,238) + pale_violet_red = 0xDB7093, // rgb(219,112,147) + papaya_whip = 0xFFEFD5, // rgb(255,239,213) + peach_puff = 0xFFDAB9, // rgb(255,218,185) + peru = 0xCD853F, // rgb(205,133,63) + pink = 0xFFC0CB, // rgb(255,192,203) + plum = 0xDDA0DD, // rgb(221,160,221) + powder_blue = 0xB0E0E6, // rgb(176,224,230) + purple = 0x800080, // rgb(128,0,128) + rebecca_purple = 0x663399, // rgb(102,51,153) + red = 0xFF0000, // rgb(255,0,0) + rosy_brown = 0xBC8F8F, // rgb(188,143,143) + royal_blue = 0x4169E1, // rgb(65,105,225) + saddle_brown = 0x8B4513, // rgb(139,69,19) + salmon = 0xFA8072, // rgb(250,128,114) + sandy_brown = 0xF4A460, // rgb(244,164,96) + sea_green = 0x2E8B57, // rgb(46,139,87) + sea_shell = 0xFFF5EE, // rgb(255,245,238) + sienna = 0xA0522D, // rgb(160,82,45) + silver = 0xC0C0C0, // rgb(192,192,192) + sky_blue = 0x87CEEB, // rgb(135,206,235) + slate_blue = 0x6A5ACD, // rgb(106,90,205) + slate_gray = 0x708090, // rgb(112,128,144) + snow = 0xFFFAFA, // rgb(255,250,250) + spring_green = 0x00FF7F, // rgb(0,255,127) + steel_blue = 0x4682B4, // rgb(70,130,180) + tan = 0xD2B48C, // rgb(210,180,140) + teal = 0x008080, // rgb(0,128,128) + thistle = 0xD8BFD8, // rgb(216,191,216) + tomato = 0xFF6347, // rgb(255,99,71) + turquoise = 0x40E0D0, // rgb(64,224,208) + violet = 0xEE82EE, // rgb(238,130,238) + wheat = 0xF5DEB3, // rgb(245,222,179) + white = 0xFFFFFF, // rgb(255,255,255) + white_smoke = 0xF5F5F5, // rgb(245,245,245) + yellow = 0xFFFF00, // rgb(255,255,0) + yellow_green = 0x9ACD32 // rgb(154,205,50) +}; // enum class color + +enum class terminal_color : uint8_t { + black = 30, + red, + green, + yellow, + blue, + magenta, + cyan, + white, + bright_black = 90, + bright_red, + bright_green, + bright_yellow, + bright_blue, + bright_magenta, + bright_cyan, + bright_white +}; + +enum class emphasis : uint8_t { + bold = 1, + italic = 1 << 1, + underline = 1 << 2, + strikethrough = 1 << 3 +}; + +// rgb is a struct for red, green and blue colors. +// Using the name "rgb" makes some editors show the color in a tooltip. +struct rgb { + FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {} + FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {} + FMT_CONSTEXPR rgb(uint32_t hex) + : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {} + FMT_CONSTEXPR rgb(color hex) + : r((uint32_t(hex) >> 16) & 0xFF), + g((uint32_t(hex) >> 8) & 0xFF), + b(uint32_t(hex) & 0xFF) {} + uint8_t r; + uint8_t g; + uint8_t b; +}; + +FMT_BEGIN_DETAIL_NAMESPACE + +// color is a struct of either a rgb color or a terminal color. +struct color_type { + FMT_CONSTEXPR color_type() FMT_NOEXCEPT : is_rgb(), value{} {} + FMT_CONSTEXPR color_type(color rgb_color) FMT_NOEXCEPT : is_rgb(true), + value{} { + value.rgb_color = static_cast(rgb_color); + } + FMT_CONSTEXPR color_type(rgb rgb_color) FMT_NOEXCEPT : is_rgb(true), value{} { + value.rgb_color = (static_cast(rgb_color.r) << 16) | + (static_cast(rgb_color.g) << 8) | rgb_color.b; + } + FMT_CONSTEXPR color_type(terminal_color term_color) FMT_NOEXCEPT : is_rgb(), + value{} { + value.term_color = static_cast(term_color); + } + bool is_rgb; + union color_union { + uint8_t term_color; + uint32_t rgb_color; + } value; +}; + +FMT_END_DETAIL_NAMESPACE + +/** A text style consisting of foreground and background colors and emphasis. */ +class text_style { + public: + FMT_CONSTEXPR text_style(emphasis em = emphasis()) FMT_NOEXCEPT + : set_foreground_color(), + set_background_color(), + ems(em) {} + + FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + background_color.value.rgb_color |= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) | + static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR text_style operator|(text_style lhs, + const text_style& rhs) { + return lhs |= rhs; + } + + FMT_DEPRECATED_NONMSVC FMT_CONSTEXPR text_style& operator&=( + const text_style& rhs) { + return and_assign(rhs); + } + + FMT_DEPRECATED_NONMSVC friend FMT_CONSTEXPR text_style + operator&(text_style lhs, const text_style& rhs) { + return lhs.and_assign(rhs); + } + + FMT_CONSTEXPR bool has_foreground() const FMT_NOEXCEPT { + return set_foreground_color; + } + FMT_CONSTEXPR bool has_background() const FMT_NOEXCEPT { + return set_background_color; + } + FMT_CONSTEXPR bool has_emphasis() const FMT_NOEXCEPT { + return static_cast(ems) != 0; + } + FMT_CONSTEXPR detail::color_type get_foreground() const FMT_NOEXCEPT { + FMT_ASSERT(has_foreground(), "no foreground specified for this style"); + return foreground_color; + } + FMT_CONSTEXPR detail::color_type get_background() const FMT_NOEXCEPT { + FMT_ASSERT(has_background(), "no background specified for this style"); + return background_color; + } + FMT_CONSTEXPR emphasis get_emphasis() const FMT_NOEXCEPT { + FMT_ASSERT(has_emphasis(), "no emphasis specified for this style"); + return ems; + } + + private: + FMT_CONSTEXPR text_style(bool is_foreground, + detail::color_type text_color) FMT_NOEXCEPT + : set_foreground_color(), + set_background_color(), + ems() { + if (is_foreground) { + foreground_color = text_color; + set_foreground_color = true; + } else { + background_color = text_color; + set_background_color = true; + } + } + + // DEPRECATED! + FMT_CONSTEXPR text_style& and_assign(const text_style& rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + background_color.value.rgb_color &= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) & + static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR_DECL text_style fg(detail::color_type foreground) + FMT_NOEXCEPT; + + friend FMT_CONSTEXPR_DECL text_style bg(detail::color_type background) + FMT_NOEXCEPT; + + detail::color_type foreground_color; + detail::color_type background_color; + bool set_foreground_color; + bool set_background_color; + emphasis ems; +}; + +/** Creates a text style from the foreground (text) color. */ +FMT_CONSTEXPR inline text_style fg(detail::color_type foreground) FMT_NOEXCEPT { + return text_style(true, foreground); +} + +/** Creates a text style from the background color. */ +FMT_CONSTEXPR inline text_style bg(detail::color_type background) FMT_NOEXCEPT { + return text_style(false, background); +} + +FMT_CONSTEXPR inline text_style operator|(emphasis lhs, + emphasis rhs) FMT_NOEXCEPT { + return text_style(lhs) | rhs; +} + +FMT_BEGIN_DETAIL_NAMESPACE + +template struct ansi_color_escape { + FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color, + const char* esc) FMT_NOEXCEPT { + // If we have a terminal color, we need to output another escape code + // sequence. + if (!text_color.is_rgb) { + bool is_background = esc == string_view("\x1b[48;2;"); + uint32_t value = text_color.value.term_color; + // Background ASCII codes are the same as the foreground ones but with + // 10 more. + if (is_background) value += 10u; + + size_t index = 0; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + + if (value >= 100u) { + buffer[index++] = static_cast('1'); + value %= 100u; + } + buffer[index++] = static_cast('0' + value / 10u); + buffer[index++] = static_cast('0' + value % 10u); + + buffer[index++] = static_cast('m'); + buffer[index++] = static_cast('\0'); + return; + } + + for (int i = 0; i < 7; i++) { + buffer[i] = static_cast(esc[i]); + } + rgb color(text_color.value.rgb_color); + to_esc(color.r, buffer + 7, ';'); + to_esc(color.g, buffer + 11, ';'); + to_esc(color.b, buffer + 15, 'm'); + buffer[19] = static_cast(0); + } + FMT_CONSTEXPR ansi_color_escape(emphasis em) FMT_NOEXCEPT { + uint8_t em_codes[4] = {}; + uint8_t em_bits = static_cast(em); + if (em_bits & static_cast(emphasis::bold)) em_codes[0] = 1; + if (em_bits & static_cast(emphasis::italic)) em_codes[1] = 3; + if (em_bits & static_cast(emphasis::underline)) em_codes[2] = 4; + if (em_bits & static_cast(emphasis::strikethrough)) + em_codes[3] = 9; + + size_t index = 0; + for (int i = 0; i < 4; ++i) { + if (!em_codes[i]) continue; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + buffer[index++] = static_cast('0' + em_codes[i]); + buffer[index++] = static_cast('m'); + } + buffer[index++] = static_cast(0); + } + FMT_CONSTEXPR operator const Char*() const FMT_NOEXCEPT { return buffer; } + + FMT_CONSTEXPR const Char* begin() const FMT_NOEXCEPT { return buffer; } + FMT_CONSTEXPR_CHAR_TRAITS const Char* end() const FMT_NOEXCEPT { + return buffer + std::char_traits::length(buffer); + } + + private: + Char buffer[7u + 3u * 4u + 1u]; + + static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out, + char delimiter) FMT_NOEXCEPT { + out[0] = static_cast('0' + c / 100); + out[1] = static_cast('0' + c / 10 % 10); + out[2] = static_cast('0' + c % 10); + out[3] = static_cast(delimiter); + } +}; + +template +FMT_CONSTEXPR ansi_color_escape make_foreground_color( + detail::color_type foreground) FMT_NOEXCEPT { + return ansi_color_escape(foreground, "\x1b[38;2;"); +} + +template +FMT_CONSTEXPR ansi_color_escape make_background_color( + detail::color_type background) FMT_NOEXCEPT { + return ansi_color_escape(background, "\x1b[48;2;"); +} + +template +FMT_CONSTEXPR ansi_color_escape make_emphasis(emphasis em) FMT_NOEXCEPT { + return ansi_color_escape(em); +} + +template +inline void fputs(const Char* chars, FILE* stream) FMT_NOEXCEPT { + std::fputs(chars, stream); +} + +template <> +inline void fputs(const wchar_t* chars, FILE* stream) FMT_NOEXCEPT { + std::fputws(chars, stream); +} + +template inline void reset_color(FILE* stream) FMT_NOEXCEPT { + fputs("\x1b[0m", stream); +} + +template <> inline void reset_color(FILE* stream) FMT_NOEXCEPT { + fputs(L"\x1b[0m", stream); +} + +template +inline void reset_color(buffer& buffer) FMT_NOEXCEPT { + auto reset_color = string_view("\x1b[0m"); + buffer.append(reset_color.begin(), reset_color.end()); +} + +template +void vformat_to(buffer& buf, const text_style& ts, + basic_string_view format_str, + basic_format_args>> args) { + bool has_style = false; + if (ts.has_emphasis()) { + has_style = true; + auto emphasis = detail::make_emphasis(ts.get_emphasis()); + buf.append(emphasis.begin(), emphasis.end()); + } + if (ts.has_foreground()) { + has_style = true; + auto foreground = detail::make_foreground_color(ts.get_foreground()); + buf.append(foreground.begin(), foreground.end()); + } + if (ts.has_background()) { + has_style = true; + auto background = detail::make_background_color(ts.get_background()); + buf.append(background.begin(), background.end()); + } + detail::vformat_to(buf, format_str, args, {}); + if (has_style) detail::reset_color(buf); +} + +FMT_END_DETAIL_NAMESPACE + +template > +void vprint(std::FILE* f, const text_style& ts, const S& format, + basic_format_args>> args) { + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format), args); + buf.push_back(Char(0)); + detail::fputs(buf.data(), f); +} + +/** + \rst + Formats a string and prints it to the specified file stream using ANSI + escape sequences to specify text formatting. + + **Example**:: + + fmt::print(fmt::emphasis::bold | fg(fmt::color::red), + "Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template ::value)> +void print(std::FILE* f, const text_style& ts, const S& format_str, + const Args&... args) { + vprint(f, ts, format_str, + fmt::make_args_checked(format_str, args...)); +} + +/** + \rst + Formats a string and prints it to stdout using ANSI escape sequences to + specify text formatting. + + **Example**:: + + fmt::print(fmt::emphasis::bold | fg(fmt::color::red), + "Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template ::value)> +void print(const text_style& ts, const S& format_str, const Args&... args) { + return print(stdout, ts, format_str, args...); +} + +template > +inline std::basic_string vformat( + const text_style& ts, const S& format_str, + basic_format_args>> args) { + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format_str), args); + return fmt::to_string(buf); +} + +/** + \rst + Formats arguments and returns the result as a string using ANSI + escape sequences to specify text formatting. + + **Example**:: + + #include + std::string message = fmt::format(fmt::emphasis::bold | fg(fmt::color::red), + "The answer is {}", 42); + \endrst +*/ +template > +inline std::basic_string format(const text_style& ts, const S& format_str, + const Args&... args) { + return fmt::vformat(ts, to_string_view(format_str), + fmt::make_args_checked(format_str, args...)); +} + +/** + Formats a string with the given text_style and writes the output to ``out``. + */ +template ::value)> +OutputIt vformat_to( + OutputIt out, const text_style& ts, basic_string_view format_str, + basic_format_args>> args) { + auto&& buf = detail::get_buffer(out); + detail::vformat_to(buf, ts, format_str, args); + return detail::get_iterator(buf); +} + +/** + \rst + Formats arguments with the given text_style, writes the result to the output + iterator ``out`` and returns the iterator past the end of the output range. + + **Example**:: + + std::vector out; + fmt::format_to(std::back_inserter(out), + fmt::emphasis::bold | fg(fmt::color::red), "{}", 42); + \endrst +*/ +template >::value&& + detail::is_string::value> +inline auto format_to(OutputIt out, const text_style& ts, const S& format_str, + Args&&... args) -> + typename std::enable_if::type { + return vformat_to(out, ts, to_string_view(format_str), + fmt::make_args_checked(format_str, args...)); +} + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_COLOR_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/compile.h b/contrib/fmt-8.0.1/include/fmt/compile.h new file mode 100644 index 0000000000..00000c92e3 --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/compile.h @@ -0,0 +1,639 @@ +// Formatting library for C++ - experimental format string compilation +// +// Copyright (c) 2012 - present, Victor Zverovich and fmt contributors +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_COMPILE_H_ +#define FMT_COMPILE_H_ + +#include "format.h" + +FMT_BEGIN_NAMESPACE +namespace detail { + +// An output iterator that counts the number of objects written to it and +// discards them. +class counting_iterator { + private: + size_t count_; + + public: + using iterator_category = std::output_iterator_tag; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + using _Unchecked_type = counting_iterator; // Mark iterator as checked. + + struct value_type { + template void operator=(const T&) {} + }; + + counting_iterator() : count_(0) {} + + size_t count() const { return count_; } + + counting_iterator& operator++() { + ++count_; + return *this; + } + counting_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } + + friend counting_iterator operator+(counting_iterator it, difference_type n) { + it.count_ += static_cast(n); + return it; + } + + value_type operator*() const { return {}; } +}; + +template +inline counting_iterator copy_str(InputIt begin, InputIt end, + counting_iterator it) { + return it + (end - begin); +} + +template class truncating_iterator_base { + protected: + OutputIt out_; + size_t limit_; + size_t count_ = 0; + + truncating_iterator_base() : out_(), limit_(0) {} + + truncating_iterator_base(OutputIt out, size_t limit) + : out_(out), limit_(limit) {} + + public: + using iterator_category = std::output_iterator_tag; + using value_type = typename std::iterator_traits::value_type; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + using _Unchecked_type = + truncating_iterator_base; // Mark iterator as checked. + + OutputIt base() const { return out_; } + size_t count() const { return count_; } +}; + +// An output iterator that truncates the output and counts the number of objects +// written to it. +template ::value_type>::type> +class truncating_iterator; + +template +class truncating_iterator + : public truncating_iterator_base { + mutable typename truncating_iterator_base::value_type blackhole_; + + public: + using value_type = typename truncating_iterator_base::value_type; + + truncating_iterator() = default; + + truncating_iterator(OutputIt out, size_t limit) + : truncating_iterator_base(out, limit) {} + + truncating_iterator& operator++() { + if (this->count_++ < this->limit_) ++this->out_; + return *this; + } + + truncating_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } + + value_type& operator*() const { + return this->count_ < this->limit_ ? *this->out_ : blackhole_; + } +}; + +template +class truncating_iterator + : public truncating_iterator_base { + public: + truncating_iterator() = default; + + truncating_iterator(OutputIt out, size_t limit) + : truncating_iterator_base(out, limit) {} + + template truncating_iterator& operator=(T val) { + if (this->count_++ < this->limit_) *this->out_++ = val; + return *this; + } + + truncating_iterator& operator++() { return *this; } + truncating_iterator& operator++(int) { return *this; } + truncating_iterator& operator*() { return *this; } +}; + +// A compile-time string which is compiled into fast formatting code. +class compiled_string {}; + +template +struct is_compiled_string : std::is_base_of {}; + +/** + \rst + Converts a string literal *s* into a format string that will be parsed at + compile time and converted into efficient formatting code. Requires C++17 + ``constexpr if`` compiler support. + + **Example**:: + + // Converts 42 into std::string using the most efficient method and no + // runtime format string processing. + std::string s = fmt::format(FMT_COMPILE("{}"), 42); + \endrst + */ +#ifdef __cpp_if_constexpr +# define FMT_COMPILE(s) \ + FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit) +#else +# define FMT_COMPILE(s) FMT_STRING(s) +#endif + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template Str> +struct udl_compiled_string : compiled_string { + using char_type = Char; + constexpr operator basic_string_view() const { + return {Str.data, N - 1}; + } +}; +#endif + +template +const T& first(const T& value, const Tail&...) { + return value; +} + +#ifdef __cpp_if_constexpr +template struct type_list {}; + +// Returns a reference to the argument at index N from [first, rest...]. +template +constexpr const auto& get([[maybe_unused]] const T& first, + [[maybe_unused]] const Args&... rest) { + static_assert(N < 1 + sizeof...(Args), "index is out of bounds"); + if constexpr (N == 0) + return first; + else + return get(rest...); +} + +template +constexpr int get_arg_index_by_name(basic_string_view name, + type_list) { + return get_arg_index_by_name(name); +} + +template struct get_type_impl; + +template struct get_type_impl> { + using type = remove_cvref_t(std::declval()...))>; +}; + +template +using get_type = typename get_type_impl::type; + +template struct is_compiled_format : std::false_type {}; + +template struct text { + basic_string_view data; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&...) const { + return write(out, data); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template +constexpr text make_text(basic_string_view s, size_t pos, + size_t size) { + return {{&s[pos], size}}; +} + +template struct code_unit { + Char value; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&...) const { + return write(out, value); + } +}; + +// This ensures that the argument type is convertible to `const T&`. +template +constexpr const T& get_arg_checked(const Args&... args) { + const auto& arg = get(args...); + if constexpr (detail::is_named_arg>()) { + return arg.value; + } else { + return arg; + } +} + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument N. +template struct field { + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + return write(out, get_arg_checked(args...)); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument with name. +template struct runtime_named_field { + using char_type = Char; + basic_string_view name; + + template + constexpr static bool try_format_argument( + OutputIt& out, + // [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9 + [[maybe_unused]] basic_string_view arg_name, const T& arg) { + if constexpr (is_named_arg::type>::value) { + if (arg_name == arg.name) { + out = write(out, arg.value); + return true; + } + } + return false; + } + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + bool found = (try_format_argument(out, name, args) || ...); + if (!found) { + throw format_error("argument with specified name is not found"); + } + return out; + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument N and has format specifiers. +template struct spec_field { + using char_type = Char; + formatter fmt; + + template + constexpr FMT_INLINE OutputIt format(OutputIt out, + const Args&... args) const { + const auto& vargs = + fmt::make_format_args>(args...); + basic_format_context ctx(out, vargs); + return fmt.format(get_arg_checked(args...), ctx); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template struct concat { + L lhs; + R rhs; + using char_type = typename L::char_type; + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + out = lhs.format(out, args...); + return rhs.format(out, args...); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template +constexpr concat make_concat(L lhs, R rhs) { + return {lhs, rhs}; +} + +struct unknown_format {}; + +template +constexpr size_t parse_text(basic_string_view str, size_t pos) { + for (size_t size = str.size(); pos != size; ++pos) { + if (str[pos] == '{' || str[pos] == '}') break; + } + return pos; +} + +template +constexpr auto compile_format_string(S format_str); + +template +constexpr auto parse_tail(T head, S format_str) { + if constexpr (POS != + basic_string_view(format_str).size()) { + constexpr auto tail = compile_format_string(format_str); + if constexpr (std::is_same, + unknown_format>()) + return tail; + else + return make_concat(head, tail); + } else { + return head; + } +} + +template struct parse_specs_result { + formatter fmt; + size_t end; + int next_arg_id; +}; + +constexpr int manual_indexing_id = -1; + +template +constexpr parse_specs_result parse_specs(basic_string_view str, + size_t pos, int next_arg_id) { + str.remove_prefix(pos); + auto ctx = basic_format_parse_context(str, {}, next_arg_id); + auto f = formatter(); + auto end = f.parse(ctx); + return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1, + next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()}; +} + +template struct arg_id_handler { + arg_ref arg_id; + + constexpr int operator()() { + FMT_ASSERT(false, "handler cannot be used with automatic indexing"); + return 0; + } + constexpr int operator()(int id) { + arg_id = arg_ref(id); + return 0; + } + constexpr int operator()(basic_string_view id) { + arg_id = arg_ref(id); + return 0; + } + + constexpr void on_error(const char* message) { throw format_error(message); } +}; + +template struct parse_arg_id_result { + arg_ref arg_id; + const Char* arg_id_end; +}; + +template +constexpr auto parse_arg_id(const Char* begin, const Char* end) { + auto handler = arg_id_handler{arg_ref{}}; + auto arg_id_end = parse_arg_id(begin, end, handler); + return parse_arg_id_result{handler.arg_id, arg_id_end}; +} + +template struct field_type { + using type = remove_cvref_t; +}; + +template +struct field_type::value>> { + using type = remove_cvref_t; +}; + +template +constexpr auto parse_replacement_field_then_tail(S format_str) { + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type(); + if constexpr (c == '}') { + return parse_tail( + field::type, ARG_INDEX>(), + format_str); + } else if constexpr (c == ':') { + constexpr auto result = parse_specs::type>( + str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID); + return parse_tail( + spec_field::type, ARG_INDEX>{ + result.fmt}, + format_str); + } +} + +// Compiles a non-empty format string and returns the compiled representation +// or unknown_format() on unrecognized input. +template +constexpr auto compile_format_string(S format_str) { + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + if constexpr (str[POS] == '{') { + if constexpr (POS + 1 == str.size()) + throw format_error("unmatched '{' in format string"); + if constexpr (str[POS + 1] == '{') { + return parse_tail(make_text(str, POS, 1), format_str); + } else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') { + static_assert(ID != manual_indexing_id, + "cannot switch from manual to automatic argument indexing"); + constexpr auto next_id = + ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail, Args, + POS + 1, ID, next_id>( + format_str); + } else { + constexpr auto arg_id_result = + parse_arg_id(str.data() + POS + 1, str.data() + str.size()); + constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data(); + constexpr char_type c = + arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type(); + static_assert(c == '}' || c == ':', "missing '}' in format string"); + if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) { + static_assert( + ID == manual_indexing_id || ID == 0, + "cannot switch from automatic to manual argument indexing"); + constexpr auto arg_index = arg_id_result.arg_id.val.index; + return parse_replacement_field_then_tail, + Args, arg_id_end_pos, + arg_index, manual_indexing_id>( + format_str); + } else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) { + constexpr auto arg_index = + get_arg_index_by_name(arg_id_result.arg_id.val.name, Args{}); + if constexpr (arg_index != invalid_arg_index) { + constexpr auto next_id = + ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail< + decltype(get_type::value), Args, arg_id_end_pos, + arg_index, next_id>(format_str); + } else { + if constexpr (c == '}') { + return parse_tail( + runtime_named_field{arg_id_result.arg_id.val.name}, + format_str); + } else if constexpr (c == ':') { + return unknown_format(); // no type info for specs parsing + } + } + } + } + } else if constexpr (str[POS] == '}') { + if constexpr (POS + 1 == str.size()) + throw format_error("unmatched '}' in format string"); + return parse_tail(make_text(str, POS, 1), format_str); + } else { + constexpr auto end = parse_text(str, POS + 1); + if constexpr (end - POS > 1) { + return parse_tail(make_text(str, POS, end - POS), + format_str); + } else { + return parse_tail(code_unit{str[POS]}, + format_str); + } + } +} + +template ::value)> +constexpr auto compile(S format_str) { + constexpr auto str = basic_string_view(format_str); + if constexpr (str.size() == 0) { + return detail::make_text(str, 0, 0); + } else { + constexpr auto result = + detail::compile_format_string, 0, 0>( + format_str); + return result; + } +} +#endif // __cpp_if_constexpr +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +#ifdef __cpp_if_constexpr + +template ::value)> +FMT_INLINE std::basic_string format(const CompiledFormat& cf, + const Args&... args) { + auto s = std::basic_string(); + cf.format(std::back_inserter(s), args...); + return s; +} + +template ::value)> +constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat& cf, + const Args&... args) { + return cf.format(out, args...); +} + +template ::value)> +FMT_INLINE std::basic_string format(const S&, + Args&&... args) { + if constexpr (std::is_same::value) { + constexpr auto str = basic_string_view(S()); + if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') { + const auto& first = detail::first(args...); + if constexpr (detail::is_named_arg< + remove_cvref_t>::value) { + return fmt::to_string(first.value); + } else { + return fmt::to_string(first); + } + } + } + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, + detail::unknown_format>()) { + return format(static_cast>(S()), + std::forward(args)...); + } else { + return format(compiled, std::forward(args)...); + } +} + +template ::value)> +FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S&, Args&&... args) { + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, + detail::unknown_format>()) { + return format_to(out, + static_cast>(S()), + std::forward(args)...); + } else { + return format_to(out, compiled, std::forward(args)...); + } +} +#endif + +template ::value)> +format_to_n_result format_to_n(OutputIt out, size_t n, + const S& format_str, Args&&... args) { + auto it = format_to(detail::truncating_iterator(out, n), format_str, + std::forward(args)...); + return {it.base(), it.count()}; +} + +template ::value)> +size_t formatted_size(const S& format_str, const Args&... args) { + return format_to(detail::counting_iterator(), format_str, args...).count(); +} + +template ::value)> +void print(std::FILE* f, const S& format_str, const Args&... args) { + memory_buffer buffer; + format_to(std::back_inserter(buffer), format_str, args...); + detail::print(f, {buffer.data(), buffer.size()}); +} + +template ::value)> +void print(const S& format_str, const Args&... args) { + print(stdout, format_str, args...); +} + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +inline namespace literals { +template +constexpr detail::udl_compiled_string< + remove_cvref_t, + sizeof(Str.data) / sizeof(decltype(Str.data[0])), Str> +operator""_cf() { + return {}; +} +} // namespace literals +#endif + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_COMPILE_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/core.h b/contrib/fmt-8.0.1/include/fmt/core.h new file mode 100644 index 0000000000..d058398ac9 --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/core.h @@ -0,0 +1,3002 @@ +// Formatting library for C++ - the core API for char/UTF-8 +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_CORE_H_ +#define FMT_CORE_H_ + +#include // std::FILE +#include +#include +#include +#include +#include + +// The fmt library version in the form major * 10000 + minor * 100 + patch. +#define FMT_VERSION 80001 + +#ifdef __clang__ +# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) +#else +# define FMT_CLANG_VERSION 0 +#endif + +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) +# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# define FMT_GCC_PRAGMA(arg) _Pragma(arg) +#else +# define FMT_GCC_VERSION 0 +# define FMT_GCC_PRAGMA(arg) +#endif + +#if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) +# define FMT_HAS_GXX_CXX11 FMT_GCC_VERSION +#else +# define FMT_HAS_GXX_CXX11 0 +#endif + +#if defined(__INTEL_COMPILER) +# define FMT_ICC_VERSION __INTEL_COMPILER +#else +# define FMT_ICC_VERSION 0 +#endif + +#ifdef __NVCC__ +# define FMT_NVCC __NVCC__ +#else +# define FMT_NVCC 0 +#endif + +#ifdef _MSC_VER +# define FMT_MSC_VER _MSC_VER +# define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) +#else +# define FMT_MSC_VER 0 +# define FMT_MSC_WARNING(...) +#endif + +#ifdef __has_feature +# define FMT_HAS_FEATURE(x) __has_feature(x) +#else +# define FMT_HAS_FEATURE(x) 0 +#endif + +#if defined(__has_include) && \ + (!defined(__INTELLISENSE__) || FMT_MSC_VER > 1900) && \ + (!FMT_ICC_VERSION || FMT_ICC_VERSION >= 1600) +# define FMT_HAS_INCLUDE(x) __has_include(x) +#else +# define FMT_HAS_INCLUDE(x) 0 +#endif + +#ifdef __has_cpp_attribute +# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define FMT_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +#define FMT_HAS_CPP14_ATTRIBUTE(attribute) \ + (__cplusplus >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) + +#define FMT_HAS_CPP17_ATTRIBUTE(attribute) \ + (__cplusplus >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) + +// Check if relaxed C++14 constexpr is supported. +// GCC doesn't allow throw in constexpr until version 6 (bug 67371). +#ifndef FMT_USE_CONSTEXPR +# define FMT_USE_CONSTEXPR \ + (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1910 || \ + (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L)) && \ + !FMT_NVCC && !FMT_ICC_VERSION +#endif +#if FMT_USE_CONSTEXPR +# define FMT_CONSTEXPR constexpr +# define FMT_CONSTEXPR_DECL constexpr +#else +# define FMT_CONSTEXPR +# define FMT_CONSTEXPR_DECL +#endif + +// Check if constexpr std::char_traits<>::compare,length is supported. +#if defined(__GLIBCXX__) +# if __cplusplus >= 201703L && defined(_GLIBCXX_RELEASE) && \ + _GLIBCXX_RELEASE >= 7 // GCC 7+ libstdc++ has _GLIBCXX_RELEASE. +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +# endif +#elif defined(_LIBCPP_VERSION) && __cplusplus >= 201703L && \ + _LIBCPP_VERSION >= 4000 +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#elif FMT_MSC_VER >= 1914 && _MSVC_LANG >= 201703L +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#endif +#ifndef FMT_CONSTEXPR_CHAR_TRAITS +# define FMT_CONSTEXPR_CHAR_TRAITS +#endif + +#ifndef FMT_OVERRIDE +# if FMT_HAS_FEATURE(cxx_override_control) || \ + (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 +# define FMT_OVERRIDE override +# else +# define FMT_OVERRIDE +# endif +#endif + +// Check if exceptions are disabled. +#ifndef FMT_EXCEPTIONS +# if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \ + FMT_MSC_VER && !_HAS_EXCEPTIONS +# define FMT_EXCEPTIONS 0 +# else +# define FMT_EXCEPTIONS 1 +# endif +#endif + +// Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature). +#ifndef FMT_USE_NOEXCEPT +# define FMT_USE_NOEXCEPT 0 +#endif + +#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \ + (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 +# define FMT_DETECTED_NOEXCEPT noexcept +# define FMT_HAS_CXX11_NOEXCEPT 1 +#else +# define FMT_DETECTED_NOEXCEPT throw() +# define FMT_HAS_CXX11_NOEXCEPT 0 +#endif + +#ifndef FMT_NOEXCEPT +# if FMT_EXCEPTIONS || FMT_HAS_CXX11_NOEXCEPT +# define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT +# else +# define FMT_NOEXCEPT +# endif +#endif + +// [[noreturn]] is disabled on MSVC and NVCC because of bogus unreachable code +// warnings. +#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER && \ + !FMT_NVCC +# define FMT_NORETURN [[noreturn]] +#else +# define FMT_NORETURN +#endif + +#ifndef FMT_MAYBE_UNUSED +# if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) +# define FMT_MAYBE_UNUSED [[maybe_unused]] +# else +# define FMT_MAYBE_UNUSED +# endif +#endif + +#if __cplusplus == 201103L || __cplusplus == 201402L +# if defined(__INTEL_COMPILER) || defined(__PGI) +# define FMT_FALLTHROUGH +# elif defined(__clang__) +# define FMT_FALLTHROUGH [[clang::fallthrough]] +# elif FMT_GCC_VERSION >= 700 && \ + (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) +# define FMT_FALLTHROUGH [[gnu::fallthrough]] +# else +# define FMT_FALLTHROUGH +# endif +#elif FMT_HAS_CPP17_ATTRIBUTE(fallthrough) || \ + (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define FMT_FALLTHROUGH [[fallthrough]] +#else +# define FMT_FALLTHROUGH +#endif + +#ifndef FMT_USE_FLOAT +# define FMT_USE_FLOAT 1 +#endif +#ifndef FMT_USE_DOUBLE +# define FMT_USE_DOUBLE 1 +#endif +#ifndef FMT_USE_LONG_DOUBLE +# define FMT_USE_LONG_DOUBLE 1 +#endif + +#ifndef FMT_INLINE +# if FMT_GCC_VERSION || FMT_CLANG_VERSION +# define FMT_INLINE inline __attribute__((always_inline)) +# else +# define FMT_INLINE inline +# endif +#endif + +#ifndef FMT_USE_INLINE_NAMESPACES +# if FMT_HAS_FEATURE(cxx_inline_namespaces) || FMT_GCC_VERSION >= 404 || \ + (FMT_MSC_VER >= 1900 && (!defined(_MANAGED) || !_MANAGED)) +# define FMT_USE_INLINE_NAMESPACES 1 +# else +# define FMT_USE_INLINE_NAMESPACES 0 +# endif +#endif + +#ifndef FMT_BEGIN_NAMESPACE +# if FMT_USE_INLINE_NAMESPACES +# define FMT_INLINE_NAMESPACE inline namespace +# define FMT_END_NAMESPACE \ + } \ + } +# else +# define FMT_INLINE_NAMESPACE namespace +# define FMT_END_NAMESPACE \ + } \ + using namespace v8; \ + } +# endif +# define FMT_BEGIN_NAMESPACE \ + namespace fmt { \ + FMT_INLINE_NAMESPACE v8 { +#endif + +#ifndef FMT_MODULE_EXPORT +# define FMT_MODULE_EXPORT +# define FMT_MODULE_EXPORT_BEGIN +# define FMT_MODULE_EXPORT_END +# define FMT_BEGIN_DETAIL_NAMESPACE namespace detail { +# define FMT_END_DETAIL_NAMESPACE } +#endif + +#if !defined(FMT_HEADER_ONLY) && defined(_WIN32) +# define FMT_CLASS_API FMT_MSC_WARNING(suppress : 4275) +# ifdef FMT_EXPORT +# define FMT_API __declspec(dllexport) +# elif defined(FMT_SHARED) +# define FMT_API __declspec(dllimport) +# endif +#else +# define FMT_CLASS_API +# if defined(FMT_EXPORT) || defined(FMT_SHARED) +# if defined(__GNUC__) || defined(__clang__) +# define FMT_API __attribute__((visibility("default"))) +# endif +# endif +#endif +#ifndef FMT_API +# define FMT_API +#endif + +#if FMT_GCC_VERSION +# define FMT_GCC_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#else +# define FMT_GCC_VISIBILITY_HIDDEN +#endif + +// libc++ supports string_view in pre-c++17. +#if (FMT_HAS_INCLUDE() && \ + (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \ + (defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910) +# include +# define FMT_USE_STRING_VIEW +#elif FMT_HAS_INCLUDE("experimental/string_view") && __cplusplus >= 201402L +# include +# define FMT_USE_EXPERIMENTAL_STRING_VIEW +#endif + +#ifndef FMT_UNICODE +# define FMT_UNICODE !FMT_MSC_VER +#endif + +#ifndef FMT_CONSTEVAL +# if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && \ + __cplusplus > 201703L) || \ + (defined(__cpp_consteval) && \ + !FMT_MSC_VER) // consteval is broken in MSVC. +# define FMT_CONSTEVAL consteval +# define FMT_HAS_CONSTEVAL +# else +# define FMT_CONSTEVAL +# endif +#endif + +#ifndef FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +# if defined(__cpp_nontype_template_args) && \ + ((FMT_GCC_VERSION >= 903 && __cplusplus >= 201709L) || \ + __cpp_nontype_template_args >= 201911L) +# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 1 +# else +# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 0 +# endif +#endif + +// Enable minimal optimizations for more compact code in debug mode. +FMT_GCC_PRAGMA("GCC push_options") +#ifndef __OPTIMIZE__ +FMT_GCC_PRAGMA("GCC optimize(\"Og\")") +#endif + +FMT_BEGIN_NAMESPACE +FMT_MODULE_EXPORT_BEGIN + +// Implementations of enable_if_t and other metafunctions for older systems. +template +using enable_if_t = typename std::enable_if::type; +template +using conditional_t = typename std::conditional::type; +template using bool_constant = std::integral_constant; +template +using remove_reference_t = typename std::remove_reference::type; +template +using remove_cvref_t = typename std::remove_cv>::type; +template struct type_identity { using type = T; }; +template using type_identity_t = typename type_identity::type; + +struct monostate { + constexpr monostate() {} +}; + +// Suppress "unused variable" warnings with the method described in +// https://herbsutter.com/2009/10/18/mailbag-shutting-up-compiler-warnings/. +// (void)var does not work on many Intel compilers. +template FMT_CONSTEXPR void ignore_unused(const T&...) {} + +// An enable_if helper to be used in template parameters which results in much +// shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed +// to workaround a bug in MSVC 2019 (see #1140 and #1186). +#ifdef FMT_DOC +# define FMT_ENABLE_IF(...) +#else +# define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 +#endif + +FMT_BEGIN_DETAIL_NAMESPACE + +constexpr FMT_INLINE auto is_constant_evaluated() FMT_NOEXCEPT -> bool { +#ifdef __cpp_lib_is_constant_evaluated + return std::is_constant_evaluated(); +#else + return false; +#endif +} + +// A function to suppress "conditional expression is constant" warnings. +template constexpr auto const_check(T value) -> T { return value; } + +FMT_NORETURN FMT_API void assert_fail(const char* file, int line, + const char* message); + +#ifndef FMT_ASSERT +# ifdef NDEBUG +// FMT_ASSERT is not empty to avoid -Werror=empty-body. +# define FMT_ASSERT(condition, message) \ + ::fmt::ignore_unused((condition), (message)) +# else +# define FMT_ASSERT(condition, message) \ + ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ + ? (void)0 \ + : ::fmt::detail::assert_fail(__FILE__, __LINE__, (message))) +# endif +#endif + +#if defined(FMT_USE_STRING_VIEW) +template using std_string_view = std::basic_string_view; +#elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW) +template +using std_string_view = std::experimental::basic_string_view; +#else +template struct std_string_view {}; +#endif + +#ifdef FMT_USE_INT128 +// Do nothing. +#elif defined(__SIZEOF_INT128__) && !FMT_NVCC && \ + !(FMT_CLANG_VERSION && FMT_MSC_VER) +# define FMT_USE_INT128 1 +using int128_t = __int128_t; +using uint128_t = __uint128_t; +template inline auto convert_for_visit(T value) -> T { + return value; +} +#else +# define FMT_USE_INT128 0 +#endif +#if !FMT_USE_INT128 +enum class int128_t {}; +enum class uint128_t {}; +// Reduce template instantiations. +template inline auto convert_for_visit(T) -> monostate { + return {}; +} +#endif + +// Casts a nonnegative integer to unsigned. +template +FMT_CONSTEXPR auto to_unsigned(Int value) -> + typename std::make_unsigned::type { + FMT_ASSERT(value >= 0, "negative value"); + return static_cast::type>(value); +} + +FMT_MSC_WARNING(suppress : 4566) constexpr unsigned char micro[] = "\u00B5"; + +constexpr auto is_utf8() -> bool { + // Avoid buggy sign extensions in MSVC's constant evaluation mode. + // https://developercommunity.visualstudio.com/t/C-difference-in-behavior-for-unsigned/1233612 + using uchar = unsigned char; + return FMT_UNICODE || (sizeof(micro) == 3 && uchar(micro[0]) == 0xC2 && + uchar(micro[1]) == 0xB5); +} +FMT_END_DETAIL_NAMESPACE + +/** + An implementation of ``std::basic_string_view`` for pre-C++17. It provides a + subset of the API. ``fmt::basic_string_view`` is used for format strings even + if ``std::string_view`` is available to prevent issues when a library is + compiled with a different ``-std`` option than the client code (which is not + recommended). + */ +template class basic_string_view { + private: + const Char* data_; + size_t size_; + + public: + using value_type = Char; + using iterator = const Char*; + + constexpr basic_string_view() FMT_NOEXCEPT : data_(nullptr), size_(0) {} + + /** Constructs a string reference object from a C string and a size. */ + constexpr basic_string_view(const Char* s, size_t count) FMT_NOEXCEPT + : data_(s), + size_(count) {} + + /** + \rst + Constructs a string reference object from a C string computing + the size with ``std::char_traits::length``. + \endrst + */ + FMT_CONSTEXPR_CHAR_TRAITS + FMT_INLINE + basic_string_view(const Char* s) : data_(s) { + if (detail::const_check(std::is_same::value && + !detail::is_constant_evaluated())) + size_ = std::strlen(reinterpret_cast(s)); + else + size_ = std::char_traits::length(s); + } + + /** Constructs a string reference from a ``std::basic_string`` object. */ + template + FMT_CONSTEXPR basic_string_view( + const std::basic_string& s) FMT_NOEXCEPT + : data_(s.data()), + size_(s.size()) {} + + template >::value)> + FMT_CONSTEXPR basic_string_view(S s) FMT_NOEXCEPT : data_(s.data()), + size_(s.size()) {} + + /** Returns a pointer to the string data. */ + constexpr auto data() const -> const Char* { return data_; } + + /** Returns the string size. */ + constexpr auto size() const -> size_t { return size_; } + + constexpr auto begin() const -> iterator { return data_; } + constexpr auto end() const -> iterator { return data_ + size_; } + + constexpr auto operator[](size_t pos) const -> const Char& { + return data_[pos]; + } + + FMT_CONSTEXPR void remove_prefix(size_t n) { + data_ += n; + size_ -= n; + } + + // Lexicographically compare this string reference to other. + FMT_CONSTEXPR_CHAR_TRAITS auto compare(basic_string_view other) const -> int { + size_t str_size = size_ < other.size_ ? size_ : other.size_; + int result = std::char_traits::compare(data_, other.data_, str_size); + if (result == 0) + result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); + return result; + } + + FMT_CONSTEXPR_CHAR_TRAITS friend auto operator==(basic_string_view lhs, + basic_string_view rhs) + -> bool { + return lhs.compare(rhs) == 0; + } + friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) != 0; + } + friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) < 0; + } + friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) <= 0; + } + friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) > 0; + } + friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) >= 0; + } +}; + +using string_view = basic_string_view; + +/** Specifies if ``T`` is a character type. Can be specialized by users. */ +template struct is_char : std::false_type {}; +template <> struct is_char : std::true_type {}; + +// Returns a string view of `s`. +template ::value)> +FMT_INLINE auto to_string_view(const Char* s) -> basic_string_view { + return s; +} +template +inline auto to_string_view(const std::basic_string& s) + -> basic_string_view { + return s; +} +template +constexpr auto to_string_view(basic_string_view s) + -> basic_string_view { + return s; +} +template >::value)> +inline auto to_string_view(detail::std_string_view s) + -> basic_string_view { + return s; +} + +// A base class for compile-time strings. It is defined in the fmt namespace to +// make formatting functions visible via ADL, e.g. format(FMT_STRING("{}"), 42). +struct compile_string {}; + +template +struct is_compile_string : std::is_base_of {}; + +template ::value)> +constexpr auto to_string_view(const S& s) + -> basic_string_view { + return basic_string_view(s); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +void to_string_view(...); +using fmt::v8::to_string_view; + +// Specifies whether S is a string type convertible to fmt::basic_string_view. +// It should be a constexpr function but MSVC 2017 fails to compile it in +// enable_if and MSVC 2015 fails to compile it as an alias template. +template +struct is_string : std::is_class()))> { +}; + +template struct char_t_impl {}; +template struct char_t_impl::value>> { + using result = decltype(to_string_view(std::declval())); + using type = typename result::value_type; +}; + +// Reports a compile-time error if S is not a valid format string. +template ::value)> +FMT_INLINE void check_format_string(const S&) { +#ifdef FMT_ENFORCE_COMPILE_STRING + static_assert(is_compile_string::value, + "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " + "FMT_STRING."); +#endif +} +template ::value)> +void check_format_string(S); + +struct error_handler { + constexpr error_handler() = default; + constexpr error_handler(const error_handler&) = default; + + // This function is intentionally not constexpr to give a compile-time error. + FMT_NORETURN FMT_API void on_error(const char* message); +}; +FMT_END_DETAIL_NAMESPACE + +/** String's character type. */ +template using char_t = typename detail::char_t_impl::type; + +/** + \rst + Parsing context consisting of a format string range being parsed and an + argument counter for automatic indexing. + You can use the ``format_parse_context`` type alias for ``char`` instead. + \endrst + */ +template +class basic_format_parse_context : private ErrorHandler { + private: + basic_string_view format_str_; + int next_arg_id_; + + public: + using char_type = Char; + using iterator = typename basic_string_view::iterator; + + explicit constexpr basic_format_parse_context( + basic_string_view format_str, ErrorHandler eh = {}, + int next_arg_id = 0) + : ErrorHandler(eh), format_str_(format_str), next_arg_id_(next_arg_id) {} + + /** + Returns an iterator to the beginning of the format string range being + parsed. + */ + constexpr auto begin() const FMT_NOEXCEPT -> iterator { + return format_str_.begin(); + } + + /** + Returns an iterator past the end of the format string range being parsed. + */ + constexpr auto end() const FMT_NOEXCEPT -> iterator { + return format_str_.end(); + } + + /** Advances the begin iterator to ``it``. */ + FMT_CONSTEXPR void advance_to(iterator it) { + format_str_.remove_prefix(detail::to_unsigned(it - begin())); + } + + /** + Reports an error if using the manual argument indexing; otherwise returns + the next argument index and switches to the automatic indexing. + */ + FMT_CONSTEXPR auto next_arg_id() -> int { + // Don't check if the argument id is valid to avoid overhead and because it + // will be checked during formatting anyway. + if (next_arg_id_ >= 0) return next_arg_id_++; + on_error("cannot switch from manual to automatic argument indexing"); + return 0; + } + + /** + Reports an error if using the automatic argument indexing; otherwise + switches to the manual indexing. + */ + FMT_CONSTEXPR void check_arg_id(int) { + if (next_arg_id_ > 0) + on_error("cannot switch from automatic to manual argument indexing"); + else + next_arg_id_ = -1; + } + + FMT_CONSTEXPR void check_arg_id(basic_string_view) {} + + FMT_CONSTEXPR void on_error(const char* message) { + ErrorHandler::on_error(message); + } + + constexpr auto error_handler() const -> ErrorHandler { return *this; } +}; + +using format_parse_context = basic_format_parse_context; + +template class basic_format_arg; +template class basic_format_args; +template class dynamic_format_arg_store; + +// A formatter for objects of type T. +template +struct formatter { + // A deleted default constructor indicates a disabled formatter. + formatter() = delete; +}; + +// Specifies if T has an enabled formatter specialization. A type can be +// formattable even if it doesn't have a formatter e.g. via a conversion. +template +using has_formatter = + std::is_constructible>; + +// Checks whether T is a container with contiguous storage. +template struct is_contiguous : std::false_type {}; +template +struct is_contiguous> : std::true_type {}; + +class appender; + +FMT_BEGIN_DETAIL_NAMESPACE + +// Extracts a reference to the container from back_insert_iterator. +template +inline auto get_container(std::back_insert_iterator it) + -> Container& { + using bi_iterator = std::back_insert_iterator; + struct accessor : bi_iterator { + accessor(bi_iterator iter) : bi_iterator(iter) {} + using bi_iterator::container; + }; + return *accessor(it).container; +} + +template +FMT_CONSTEXPR auto copy_str(InputIt begin, InputIt end, OutputIt out) + -> OutputIt { + while (begin != end) *out++ = static_cast(*begin++); + return out; +} + +template ::value)> +FMT_CONSTEXPR auto copy_str(const Char* begin, const Char* end, Char* out) + -> Char* { + if (is_constant_evaluated()) + return copy_str(begin, end, out); + auto size = to_unsigned(end - begin); + memcpy(out, begin, size); + return out + size; +} + +/** + \rst + A contiguous memory buffer with an optional growing ability. It is an internal + class and shouldn't be used directly, only via `~fmt::basic_memory_buffer`. + \endrst + */ +template class buffer { + private: + T* ptr_; + size_t size_; + size_t capacity_; + + protected: + // Don't initialize ptr_ since it is not accessed to save a few cycles. + FMT_MSC_WARNING(suppress : 26495) + buffer(size_t sz) FMT_NOEXCEPT : size_(sz), capacity_(sz) {} + + buffer(T* p = nullptr, size_t sz = 0, size_t cap = 0) FMT_NOEXCEPT + : ptr_(p), + size_(sz), + capacity_(cap) {} + + ~buffer() = default; + buffer(buffer&&) = default; + + /** Sets the buffer data and capacity. */ + void set(T* buf_data, size_t buf_capacity) FMT_NOEXCEPT { + ptr_ = buf_data; + capacity_ = buf_capacity; + } + + /** Increases the buffer capacity to hold at least *capacity* elements. */ + virtual void grow(size_t capacity) = 0; + + public: + using value_type = T; + using const_reference = const T&; + + buffer(const buffer&) = delete; + void operator=(const buffer&) = delete; + + auto begin() FMT_NOEXCEPT -> T* { return ptr_; } + auto end() FMT_NOEXCEPT -> T* { return ptr_ + size_; } + + auto begin() const FMT_NOEXCEPT -> const T* { return ptr_; } + auto end() const FMT_NOEXCEPT -> const T* { return ptr_ + size_; } + + /** Returns the size of this buffer. */ + auto size() const FMT_NOEXCEPT -> size_t { return size_; } + + /** Returns the capacity of this buffer. */ + auto capacity() const FMT_NOEXCEPT -> size_t { return capacity_; } + + /** Returns a pointer to the buffer data. */ + auto data() FMT_NOEXCEPT -> T* { return ptr_; } + + /** Returns a pointer to the buffer data. */ + auto data() const FMT_NOEXCEPT -> const T* { return ptr_; } + + /** Clears this buffer. */ + void clear() { size_ = 0; } + + // Tries resizing the buffer to contain *count* elements. If T is a POD type + // the new elements may not be initialized. + void try_resize(size_t count) { + try_reserve(count); + size_ = count <= capacity_ ? count : capacity_; + } + + // Tries increasing the buffer capacity to *new_capacity*. It can increase the + // capacity by a smaller amount than requested but guarantees there is space + // for at least one additional element either by increasing the capacity or by + // flushing the buffer if it is full. + void try_reserve(size_t new_capacity) { + if (new_capacity > capacity_) grow(new_capacity); + } + + void push_back(const T& value) { + try_reserve(size_ + 1); + ptr_[size_++] = value; + } + + /** Appends data to the end of the buffer. */ + template void append(const U* begin, const U* end); + + template auto operator[](I index) -> T& { return ptr_[index]; } + template auto operator[](I index) const -> const T& { + return ptr_[index]; + } +}; + +struct buffer_traits { + explicit buffer_traits(size_t) {} + auto count() const -> size_t { return 0; } + auto limit(size_t size) -> size_t { return size; } +}; + +class fixed_buffer_traits { + private: + size_t count_ = 0; + size_t limit_; + + public: + explicit fixed_buffer_traits(size_t limit) : limit_(limit) {} + auto count() const -> size_t { return count_; } + auto limit(size_t size) -> size_t { + size_t n = limit_ > count_ ? limit_ - count_ : 0; + count_ += size; + return size < n ? size : n; + } +}; + +// A buffer that writes to an output iterator when flushed. +template +class iterator_buffer final : public Traits, public buffer { + private: + OutputIt out_; + enum { buffer_size = 256 }; + T data_[buffer_size]; + + protected: + void grow(size_t) final FMT_OVERRIDE { + if (this->size() == buffer_size) flush(); + } + + void flush() { + auto size = this->size(); + this->clear(); + out_ = copy_str(data_, data_ + this->limit(size), out_); + } + + public: + explicit iterator_buffer(OutputIt out, size_t n = buffer_size) + : Traits(n), buffer(data_, 0, buffer_size), out_(out) {} + iterator_buffer(iterator_buffer&& other) + : Traits(other), buffer(data_, 0, buffer_size), out_(other.out_) {} + ~iterator_buffer() { flush(); } + + auto out() -> OutputIt { + flush(); + return out_; + } + auto count() const -> size_t { return Traits::count() + this->size(); } +}; + +template class iterator_buffer final : public buffer { + protected: + void grow(size_t) final FMT_OVERRIDE {} + + public: + explicit iterator_buffer(T* out, size_t = 0) : buffer(out, 0, ~size_t()) {} + + auto out() -> T* { return &*this->end(); } +}; + +// A buffer that writes to a container with the contiguous storage. +template +class iterator_buffer, + enable_if_t::value, + typename Container::value_type>> + final : public buffer { + private: + Container& container_; + + protected: + void grow(size_t capacity) final FMT_OVERRIDE { + container_.resize(capacity); + this->set(&container_[0], capacity); + } + + public: + explicit iterator_buffer(Container& c) + : buffer(c.size()), container_(c) {} + explicit iterator_buffer(std::back_insert_iterator out, size_t = 0) + : iterator_buffer(get_container(out)) {} + auto out() -> std::back_insert_iterator { + return std::back_inserter(container_); + } +}; + +// A buffer that counts the number of code units written discarding the output. +template class counting_buffer final : public buffer { + private: + enum { buffer_size = 256 }; + T data_[buffer_size]; + size_t count_ = 0; + + protected: + void grow(size_t) final FMT_OVERRIDE { + if (this->size() != buffer_size) return; + count_ += this->size(); + this->clear(); + } + + public: + counting_buffer() : buffer(data_, 0, buffer_size) {} + + auto count() -> size_t { return count_ + this->size(); } +}; + +template +using buffer_appender = conditional_t::value, appender, + std::back_insert_iterator>>; + +// Maps an output iterator to a buffer. +template +auto get_buffer(OutputIt out) -> iterator_buffer { + return iterator_buffer(out); +} + +template +auto get_iterator(Buffer& buf) -> decltype(buf.out()) { + return buf.out(); +} +template auto get_iterator(buffer& buf) -> buffer_appender { + return buffer_appender(buf); +} + +template +struct fallback_formatter { + fallback_formatter() = delete; +}; + +// Specifies if T has an enabled fallback_formatter specialization. +template +using has_fallback_formatter = + std::is_constructible>; + +struct view {}; + +template struct named_arg : view { + const Char* name; + const T& value; + named_arg(const Char* n, const T& v) : name(n), value(v) {} +}; + +template struct named_arg_info { + const Char* name; + int id; +}; + +template +struct arg_data { + // args_[0].named_args points to named_args_ to avoid bloating format_args. + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. + T args_[1 + (NUM_ARGS != 0 ? NUM_ARGS : +1)]; + named_arg_info named_args_[NUM_NAMED_ARGS]; + + template + arg_data(const U&... init) : args_{T(named_args_, NUM_NAMED_ARGS), init...} {} + arg_data(const arg_data& other) = delete; + auto args() const -> const T* { return args_ + 1; } + auto named_args() -> named_arg_info* { return named_args_; } +}; + +template +struct arg_data { + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. + T args_[NUM_ARGS != 0 ? NUM_ARGS : +1]; + + template + FMT_CONSTEXPR FMT_INLINE arg_data(const U&... init) : args_{init...} {} + FMT_CONSTEXPR FMT_INLINE auto args() const -> const T* { return args_; } + FMT_CONSTEXPR FMT_INLINE auto named_args() -> std::nullptr_t { + return nullptr; + } +}; + +template +inline void init_named_args(named_arg_info*, int, int) {} + +template struct is_named_arg : std::false_type {}; +template struct is_statically_named_arg : std::false_type {}; + +template +struct is_named_arg> : std::true_type {}; + +template ::value)> +void init_named_args(named_arg_info* named_args, int arg_count, + int named_arg_count, const T&, const Tail&... args) { + init_named_args(named_args, arg_count + 1, named_arg_count, args...); +} + +template ::value)> +void init_named_args(named_arg_info* named_args, int arg_count, + int named_arg_count, const T& arg, const Tail&... args) { + named_args[named_arg_count++] = {arg.name, arg_count}; + init_named_args(named_args, arg_count + 1, named_arg_count, args...); +} + +template +FMT_CONSTEXPR FMT_INLINE void init_named_args(std::nullptr_t, int, int, + const Args&...) {} + +template constexpr auto count() -> size_t { return B ? 1 : 0; } +template constexpr auto count() -> size_t { + return (B1 ? 1 : 0) + count(); +} + +template constexpr auto count_named_args() -> size_t { + return count::value...>(); +} + +enum class type { + none_type, + // Integer types should go first, + int_type, + uint_type, + long_long_type, + ulong_long_type, + int128_type, + uint128_type, + bool_type, + char_type, + last_integer_type = char_type, + // followed by floating-point types. + float_type, + double_type, + long_double_type, + last_numeric_type = long_double_type, + cstring_type, + string_type, + pointer_type, + custom_type +}; + +// Maps core type T to the corresponding type enum constant. +template +struct type_constant : std::integral_constant {}; + +#define FMT_TYPE_CONSTANT(Type, constant) \ + template \ + struct type_constant \ + : std::integral_constant {} + +FMT_TYPE_CONSTANT(int, int_type); +FMT_TYPE_CONSTANT(unsigned, uint_type); +FMT_TYPE_CONSTANT(long long, long_long_type); +FMT_TYPE_CONSTANT(unsigned long long, ulong_long_type); +FMT_TYPE_CONSTANT(int128_t, int128_type); +FMT_TYPE_CONSTANT(uint128_t, uint128_type); +FMT_TYPE_CONSTANT(bool, bool_type); +FMT_TYPE_CONSTANT(Char, char_type); +FMT_TYPE_CONSTANT(float, float_type); +FMT_TYPE_CONSTANT(double, double_type); +FMT_TYPE_CONSTANT(long double, long_double_type); +FMT_TYPE_CONSTANT(const Char*, cstring_type); +FMT_TYPE_CONSTANT(basic_string_view, string_type); +FMT_TYPE_CONSTANT(const void*, pointer_type); + +constexpr bool is_integral_type(type t) { + return t > type::none_type && t <= type::last_integer_type; +} + +constexpr bool is_arithmetic_type(type t) { + return t > type::none_type && t <= type::last_numeric_type; +} + +template struct string_value { + const Char* data; + size_t size; +}; + +template struct named_arg_value { + const named_arg_info* data; + size_t size; +}; + +template struct custom_value { + using parse_context = typename Context::parse_context_type; + const void* value; + void (*format)(const void* arg, parse_context& parse_ctx, Context& ctx); +}; + +// A formatting argument value. +template class value { + public: + using char_type = typename Context::char_type; + + union { + monostate no_value; + int int_value; + unsigned uint_value; + long long long_long_value; + unsigned long long ulong_long_value; + int128_t int128_value; + uint128_t uint128_value; + bool bool_value; + char_type char_value; + float float_value; + double double_value; + long double long_double_value; + const void* pointer; + string_value string; + custom_value custom; + named_arg_value named_args; + }; + + constexpr FMT_INLINE value() : no_value() {} + constexpr FMT_INLINE value(int val) : int_value(val) {} + constexpr FMT_INLINE value(unsigned val) : uint_value(val) {} + constexpr FMT_INLINE value(long long val) : long_long_value(val) {} + constexpr FMT_INLINE value(unsigned long long val) : ulong_long_value(val) {} + FMT_INLINE value(int128_t val) : int128_value(val) {} + FMT_INLINE value(uint128_t val) : uint128_value(val) {} + FMT_INLINE value(float val) : float_value(val) {} + FMT_INLINE value(double val) : double_value(val) {} + FMT_INLINE value(long double val) : long_double_value(val) {} + constexpr FMT_INLINE value(bool val) : bool_value(val) {} + constexpr FMT_INLINE value(char_type val) : char_value(val) {} + FMT_CONSTEXPR FMT_INLINE value(const char_type* val) { + string.data = val; + if (is_constant_evaluated()) string.size = {}; + } + FMT_CONSTEXPR FMT_INLINE value(basic_string_view val) { + string.data = val.data(); + string.size = val.size(); + } + FMT_INLINE value(const void* val) : pointer(val) {} + FMT_INLINE value(const named_arg_info* args, size_t size) + : named_args{args, size} {} + + template FMT_CONSTEXPR FMT_INLINE value(const T& val) { + custom.value = &val; + // Get the formatter type through the context to allow different contexts + // have different extension points, e.g. `formatter` for `format` and + // `printf_formatter` for `printf`. + custom.format = format_custom_arg< + T, conditional_t::value, + typename Context::template formatter_type, + fallback_formatter>>; + } + + private: + // Formats an argument of a custom type, such as a user-defined class. + template + static void format_custom_arg(const void* arg, + typename Context::parse_context_type& parse_ctx, + Context& ctx) { + Formatter f; + parse_ctx.advance_to(f.parse(parse_ctx)); + ctx.advance_to(f.format(*static_cast(arg), ctx)); + } +}; + +template +FMT_CONSTEXPR auto make_arg(const T& value) -> basic_format_arg; + +// To minimize the number of types we need to deal with, long is translated +// either to int or to long long depending on its size. +enum { long_short = sizeof(long) == sizeof(int) }; +using long_type = conditional_t; +using ulong_type = conditional_t; + +struct unformattable {}; + +// Maps formatting arguments to core types. +template struct arg_mapper { + using char_type = typename Context::char_type; + + FMT_CONSTEXPR FMT_INLINE auto map(signed char val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned char val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(short val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned short val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned val) -> unsigned { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(long val) -> long_type { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long val) -> ulong_type { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(long long val) -> long long { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long long val) + -> unsigned long long { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int128_t val) -> int128_t { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(uint128_t val) -> uint128_t { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(bool val) -> bool { return val; } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(T val) -> char_type { + static_assert( + std::is_same::value || std::is_same::value, + "mixing character types is disallowed"); + return val; + } + + FMT_CONSTEXPR FMT_INLINE auto map(float val) -> float { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(double val) -> double { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(long double val) -> long double { + return val; + } + + FMT_CONSTEXPR FMT_INLINE auto map(char_type* val) -> const char_type* { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(const char_type* val) -> const char_type* { + return val; + } + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + static_assert(std::is_same>::value, + "mixing character types is disallowed"); + return to_string_view(val); + } + template , T>::value && + !is_string::value && !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + return basic_string_view(val); + } + template < + typename T, + FMT_ENABLE_IF( + std::is_constructible, T>::value && + !std::is_constructible, T>::value && + !is_string::value && !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + return std_string_view(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(const signed char* val) -> const char* { + static_assert(std::is_same::value, "invalid string type"); + return reinterpret_cast(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(const unsigned char* val) -> const char* { + static_assert(std::is_same::value, "invalid string type"); + return reinterpret_cast(val); + } + FMT_CONSTEXPR FMT_INLINE auto map(signed char* val) -> const char* { + const auto* const_val = val; + return map(const_val); + } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned char* val) -> const char* { + const auto* const_val = val; + return map(const_val); + } + + FMT_CONSTEXPR FMT_INLINE auto map(void* val) -> const void* { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(const void* val) -> const void* { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(std::nullptr_t val) -> const void* { + return val; + } + + // We use SFINAE instead of a const T* parameter to avoid conflicting with + // the C array overload. + template + FMT_CONSTEXPR auto map(T) -> enable_if_t::value, int> { + // Formatting of arbitrary pointers is disallowed. If you want to output + // a pointer cast it to "void *" or "const void *". In particular, this + // forbids formatting of "[const] volatile char *" which is printed as bool + // by iostreams. + static_assert(!sizeof(T), "formatting of non-void pointers is disallowed"); + return 0; + } + + template + FMT_CONSTEXPR FMT_INLINE auto map(const T (&values)[N]) -> const T (&)[N] { + return values; + } + + template ::value && + !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> decltype(std::declval().map( + static_cast::type>(val))) { + return map(static_cast::type>(val)); + } + template ::value && !is_char::value && + (has_formatter::value || + has_fallback_formatter::value))> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> const T& { + return val; + } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& named_arg) + -> decltype(std::declval().map(named_arg.value)) { + return map(named_arg.value); + } + + auto map(...) -> unformattable { return {}; } +}; + +// A type constant after applying arg_mapper. +template +using mapped_type_constant = + type_constant().map(std::declval())), + typename Context::char_type>; + +enum { packed_arg_bits = 4 }; +// Maximum number of arguments with packed types. +enum { max_packed_args = 62 / packed_arg_bits }; +enum : unsigned long long { is_unpacked_bit = 1ULL << 63 }; +enum : unsigned long long { has_named_args_bit = 1ULL << 62 }; + +FMT_END_DETAIL_NAMESPACE + +// An output iterator that appends to a buffer. +// It is used to reduce symbol sizes for the common case. +class appender : public std::back_insert_iterator> { + using base = std::back_insert_iterator>; + + template + friend auto get_buffer(appender out) -> detail::buffer& { + return detail::get_container(out); + } + + public: + using std::back_insert_iterator>::back_insert_iterator; + appender(base it) : base(it) {} + using _Unchecked_type = appender; // Mark iterator as checked. + + auto operator++() -> appender& { + base::operator++(); + return *this; + } + + auto operator++(int) -> appender { + auto tmp = *this; + ++*this; + return tmp; + } +}; + +// A formatting argument. It is a trivially copyable/constructible type to +// allow storage in basic_memory_buffer. +template class basic_format_arg { + private: + detail::value value_; + detail::type type_; + + template + friend FMT_CONSTEXPR auto detail::make_arg(const T& value) + -> basic_format_arg; + + template + friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, + const basic_format_arg& arg) + -> decltype(vis(0)); + + friend class basic_format_args; + friend class dynamic_format_arg_store; + + using char_type = typename Context::char_type; + + template + friend struct detail::arg_data; + + basic_format_arg(const detail::named_arg_info* args, size_t size) + : value_(args, size) {} + + public: + class handle { + public: + explicit handle(detail::custom_value custom) : custom_(custom) {} + + void format(typename Context::parse_context_type& parse_ctx, + Context& ctx) const { + custom_.format(custom_.value, parse_ctx, ctx); + } + + private: + detail::custom_value custom_; + }; + + constexpr basic_format_arg() : type_(detail::type::none_type) {} + + constexpr explicit operator bool() const FMT_NOEXCEPT { + return type_ != detail::type::none_type; + } + + auto type() const -> detail::type { return type_; } + + auto is_integral() const -> bool { return detail::is_integral_type(type_); } + auto is_arithmetic() const -> bool { + return detail::is_arithmetic_type(type_); + } +}; + +/** + \rst + Visits an argument dispatching to the appropriate visit method based on + the argument type. For example, if the argument type is ``double`` then + ``vis(value)`` will be called with the value of type ``double``. + \endrst + */ +template +FMT_CONSTEXPR FMT_INLINE auto visit_format_arg( + Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)) { + switch (arg.type_) { + case detail::type::none_type: + break; + case detail::type::int_type: + return vis(arg.value_.int_value); + case detail::type::uint_type: + return vis(arg.value_.uint_value); + case detail::type::long_long_type: + return vis(arg.value_.long_long_value); + case detail::type::ulong_long_type: + return vis(arg.value_.ulong_long_value); + case detail::type::int128_type: + return vis(detail::convert_for_visit(arg.value_.int128_value)); + case detail::type::uint128_type: + return vis(detail::convert_for_visit(arg.value_.uint128_value)); + case detail::type::bool_type: + return vis(arg.value_.bool_value); + case detail::type::char_type: + return vis(arg.value_.char_value); + case detail::type::float_type: + return vis(arg.value_.float_value); + case detail::type::double_type: + return vis(arg.value_.double_value); + case detail::type::long_double_type: + return vis(arg.value_.long_double_value); + case detail::type::cstring_type: + return vis(arg.value_.string.data); + case detail::type::string_type: + using sv = basic_string_view; + return vis(sv(arg.value_.string.data, arg.value_.string.size)); + case detail::type::pointer_type: + return vis(arg.value_.pointer); + case detail::type::custom_type: + return vis(typename basic_format_arg::handle(arg.value_.custom)); + } + return vis(monostate()); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +template +auto copy_str(InputIt begin, InputIt end, appender out) -> appender { + get_container(out).append(begin, end); + return out; +} + +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 +// A workaround for gcc 4.8 to make void_t work in a SFINAE context. +template struct void_t_impl { using type = void; }; +template +using void_t = typename detail::void_t_impl::type; +#else +template using void_t = void; +#endif + +template +struct is_output_iterator : std::false_type {}; + +template +struct is_output_iterator< + It, T, + void_t::iterator_category, + decltype(*std::declval() = std::declval())>> + : std::true_type {}; + +template +struct is_back_insert_iterator : std::false_type {}; +template +struct is_back_insert_iterator> + : std::true_type {}; + +template +struct is_contiguous_back_insert_iterator : std::false_type {}; +template +struct is_contiguous_back_insert_iterator> + : is_contiguous {}; +template <> +struct is_contiguous_back_insert_iterator : std::true_type {}; + +// A type-erased reference to an std::locale to avoid heavy include. +class locale_ref { + private: + const void* locale_; // A type-erased pointer to std::locale. + + public: + constexpr locale_ref() : locale_(nullptr) {} + template explicit locale_ref(const Locale& loc); + + explicit operator bool() const FMT_NOEXCEPT { return locale_ != nullptr; } + + template auto get() const -> Locale; +}; + +template constexpr auto encode_types() -> unsigned long long { + return 0; +} + +template +constexpr auto encode_types() -> unsigned long long { + return static_cast(mapped_type_constant::value) | + (encode_types() << packed_arg_bits); +} + +template +FMT_CONSTEXPR auto make_arg(const T& value) -> basic_format_arg { + basic_format_arg arg; + arg.type_ = mapped_type_constant::value; + arg.value_ = arg_mapper().map(value); + return arg; +} + +// The type template parameter is there to avoid an ODR violation when using +// a fallback formatter in one translation unit and an implicit conversion in +// another (not recommended). +template +FMT_CONSTEXPR FMT_INLINE auto make_arg(const T& val) -> value { + const auto& arg = arg_mapper().map(val); + static_assert( + !std::is_same::value, + "Cannot format an argument. To make type T formattable provide a " + "formatter specialization: https://fmt.dev/latest/api.html#udt"); + return {arg}; +} + +template +inline auto make_arg(const T& value) -> basic_format_arg { + return make_arg(value); +} +FMT_END_DETAIL_NAMESPACE + +// Formatting context. +template class basic_format_context { + public: + /** The character type for the output. */ + using char_type = Char; + + private: + OutputIt out_; + basic_format_args args_; + detail::locale_ref loc_; + + public: + using iterator = OutputIt; + using format_arg = basic_format_arg; + using parse_context_type = basic_format_parse_context; + template using formatter_type = formatter; + + basic_format_context(basic_format_context&&) = default; + basic_format_context(const basic_format_context&) = delete; + void operator=(const basic_format_context&) = delete; + /** + Constructs a ``basic_format_context`` object. References to the arguments are + stored in the object so make sure they have appropriate lifetimes. + */ + constexpr basic_format_context( + OutputIt out, basic_format_args ctx_args, + detail::locale_ref loc = detail::locale_ref()) + : out_(out), args_(ctx_args), loc_(loc) {} + + constexpr auto arg(int id) const -> format_arg { return args_.get(id); } + FMT_CONSTEXPR auto arg(basic_string_view name) -> format_arg { + return args_.get(name); + } + FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { + return args_.get_id(name); + } + auto args() const -> const basic_format_args& { + return args_; + } + + FMT_CONSTEXPR auto error_handler() -> detail::error_handler { return {}; } + void on_error(const char* message) { error_handler().on_error(message); } + + // Returns an iterator to the beginning of the output range. + FMT_CONSTEXPR auto out() -> iterator { return out_; } + + // Advances the begin iterator to ``it``. + void advance_to(iterator it) { + if (!detail::is_back_insert_iterator()) out_ = it; + } + + FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } +}; + +template +using buffer_context = + basic_format_context, Char>; +using format_context = buffer_context; + +// Workaround an alias issue: https://stackoverflow.com/q/62767544/471164. +#define FMT_BUFFER_CONTEXT(Char) \ + basic_format_context, Char> + +template +using is_formattable = bool_constant< + !std::is_same>().map( + std::declval())), + detail::unformattable>::value && + !detail::has_fallback_formatter::value>; + +/** + \rst + An array of references to arguments. It can be implicitly converted into + `~fmt::basic_format_args` for passing into type-erased formatting functions + such as `~fmt::vformat`. + \endrst + */ +template +class format_arg_store +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + // Workaround a GCC template argument substitution bug. + : public basic_format_args +#endif +{ + private: + static const size_t num_args = sizeof...(Args); + static const size_t num_named_args = detail::count_named_args(); + static const bool is_packed = num_args <= detail::max_packed_args; + + using value_type = conditional_t, + basic_format_arg>; + + detail::arg_data + data_; + + friend class basic_format_args; + + static constexpr unsigned long long desc = + (is_packed ? detail::encode_types() + : detail::is_unpacked_bit | num_args) | + (num_named_args != 0 + ? static_cast(detail::has_named_args_bit) + : 0); + + public: + FMT_CONSTEXPR FMT_INLINE format_arg_store(const Args&... args) + : +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + basic_format_args(*this), +#endif + data_{detail::make_arg< + is_packed, Context, + detail::mapped_type_constant::value>(args)...} { + detail::init_named_args(data_.named_args(), 0, 0, args...); + } +}; + +/** + \rst + Constructs a `~fmt::format_arg_store` object that contains references to + arguments and can be implicitly converted to `~fmt::format_args`. `Context` + can be omitted in which case it defaults to `~fmt::context`. + See `~fmt::arg` for lifetime considerations. + \endrst + */ +template +constexpr auto make_format_args(const Args&... args) + -> format_arg_store { + return {args...}; +} + +/** + \rst + Returns a named argument to be used in a formatting function. + It should only be used in a call to a formatting function or + `dynamic_format_arg_store::push_back`. + + **Example**:: + + fmt::print("Elapsed time: {s:.2f} seconds", fmt::arg("s", 1.23)); + \endrst + */ +template +inline auto arg(const Char* name, const T& arg) -> detail::named_arg { + static_assert(!detail::is_named_arg(), "nested named arguments"); + return {name, arg}; +} + +/** + \rst + A view of a collection of formatting arguments. To avoid lifetime issues it + should only be used as a parameter type in type-erased functions such as + ``vformat``:: + + void vlog(string_view format_str, format_args args); // OK + format_args args = make_format_args(42); // Error: dangling reference + \endrst + */ +template class basic_format_args { + public: + using size_type = int; + using format_arg = basic_format_arg; + + private: + // A descriptor that contains information about formatting arguments. + // If the number of arguments is less or equal to max_packed_args then + // argument types are passed in the descriptor. This reduces binary code size + // per formatting function call. + unsigned long long desc_; + union { + // If is_packed() returns true then argument values are stored in values_; + // otherwise they are stored in args_. This is done to improve cache + // locality and reduce compiled code size since storing larger objects + // may require more code (at least on x86-64) even if the same amount of + // data is actually copied to stack. It saves ~10% on the bloat test. + const detail::value* values_; + const format_arg* args_; + }; + + constexpr auto is_packed() const -> bool { + return (desc_ & detail::is_unpacked_bit) == 0; + } + auto has_named_args() const -> bool { + return (desc_ & detail::has_named_args_bit) != 0; + } + + FMT_CONSTEXPR auto type(int index) const -> detail::type { + int shift = index * detail::packed_arg_bits; + unsigned int mask = (1 << detail::packed_arg_bits) - 1; + return static_cast((desc_ >> shift) & mask); + } + + constexpr FMT_INLINE basic_format_args(unsigned long long desc, + const detail::value* values) + : desc_(desc), values_(values) {} + constexpr basic_format_args(unsigned long long desc, const format_arg* args) + : desc_(desc), args_(args) {} + + public: + constexpr basic_format_args() : desc_(0), args_(nullptr) {} + + /** + \rst + Constructs a `basic_format_args` object from `~fmt::format_arg_store`. + \endrst + */ + template + constexpr FMT_INLINE basic_format_args( + const format_arg_store& store) + : basic_format_args(format_arg_store::desc, + store.data_.args()) {} + + /** + \rst + Constructs a `basic_format_args` object from + `~fmt::dynamic_format_arg_store`. + \endrst + */ + constexpr FMT_INLINE basic_format_args( + const dynamic_format_arg_store& store) + : basic_format_args(store.get_types(), store.data()) {} + + /** + \rst + Constructs a `basic_format_args` object from a dynamic set of arguments. + \endrst + */ + constexpr basic_format_args(const format_arg* args, int count) + : basic_format_args(detail::is_unpacked_bit | detail::to_unsigned(count), + args) {} + + /** Returns the argument with the specified id. */ + FMT_CONSTEXPR auto get(int id) const -> format_arg { + format_arg arg; + if (!is_packed()) { + if (id < max_size()) arg = args_[id]; + return arg; + } + if (id >= detail::max_packed_args) return arg; + arg.type_ = type(id); + if (arg.type_ == detail::type::none_type) return arg; + arg.value_ = values_[id]; + return arg; + } + + template + auto get(basic_string_view name) const -> format_arg { + int id = get_id(name); + return id >= 0 ? get(id) : format_arg(); + } + + template + auto get_id(basic_string_view name) const -> int { + if (!has_named_args()) return -1; + const auto& named_args = + (is_packed() ? values_[-1] : args_[-1].value_).named_args; + for (size_t i = 0; i < named_args.size; ++i) { + if (named_args.data[i].name == name) return named_args.data[i].id; + } + return -1; + } + + auto max_size() const -> int { + unsigned long long max_packed = detail::max_packed_args; + return static_cast(is_packed() ? max_packed + : desc_ & ~detail::is_unpacked_bit); + } +}; + +/** An alias to ``basic_format_args``. */ +// A separate type would result in shorter symbols but break ABI compatibility +// between clang and gcc on ARM (#1919). +using format_args = basic_format_args; + +// We cannot use enum classes as bit fields because of a gcc bug +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414. +namespace align { +enum type { none, left, right, center, numeric }; +} +using align_t = align::type; +namespace sign { +enum type { none, minus, plus, space }; +} +using sign_t = sign::type; + +FMT_BEGIN_DETAIL_NAMESPACE + +void throw_format_error(const char* message); + +// Workaround an array initialization issue in gcc 4.8. +template struct fill_t { + private: + enum { max_size = 4 }; + Char data_[max_size] = {Char(' '), Char(0), Char(0), Char(0)}; + unsigned char size_ = 1; + + public: + FMT_CONSTEXPR void operator=(basic_string_view s) { + auto size = s.size(); + if (size > max_size) return throw_format_error("invalid fill"); + for (size_t i = 0; i < size; ++i) data_[i] = s[i]; + size_ = static_cast(size); + } + + constexpr auto size() const -> size_t { return size_; } + constexpr auto data() const -> const Char* { return data_; } + + FMT_CONSTEXPR auto operator[](size_t index) -> Char& { return data_[index]; } + FMT_CONSTEXPR auto operator[](size_t index) const -> const Char& { + return data_[index]; + } +}; +FMT_END_DETAIL_NAMESPACE + +// Format specifiers for built-in and string types. +template struct basic_format_specs { + int width; + int precision; + char type; + align_t align : 4; + sign_t sign : 3; + bool alt : 1; // Alternate form ('#'). + bool localized : 1; + detail::fill_t fill; + + constexpr basic_format_specs() + : width(0), + precision(-1), + type(0), + align(align::none), + sign(sign::none), + alt(false), + localized(false) {} +}; + +using format_specs = basic_format_specs; + +FMT_BEGIN_DETAIL_NAMESPACE + +enum class arg_id_kind { none, index, name }; + +// An argument reference. +template struct arg_ref { + FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {} + + FMT_CONSTEXPR explicit arg_ref(int index) + : kind(arg_id_kind::index), val(index) {} + FMT_CONSTEXPR explicit arg_ref(basic_string_view name) + : kind(arg_id_kind::name), val(name) {} + + FMT_CONSTEXPR auto operator=(int idx) -> arg_ref& { + kind = arg_id_kind::index; + val.index = idx; + return *this; + } + + arg_id_kind kind; + union value { + FMT_CONSTEXPR value(int id = 0) : index{id} {} + FMT_CONSTEXPR value(basic_string_view n) : name(n) {} + + int index; + basic_string_view name; + } val; +}; + +// Format specifiers with width and precision resolved at formatting rather +// than parsing time to allow re-using the same parsed specifiers with +// different sets of arguments (precompilation of format strings). +template +struct dynamic_format_specs : basic_format_specs { + arg_ref width_ref; + arg_ref precision_ref; +}; + +struct auto_id {}; + +// A format specifier handler that sets fields in basic_format_specs. +template class specs_setter { + protected: + basic_format_specs& specs_; + + public: + explicit FMT_CONSTEXPR specs_setter(basic_format_specs& specs) + : specs_(specs) {} + + FMT_CONSTEXPR specs_setter(const specs_setter& other) + : specs_(other.specs_) {} + + FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + specs_.fill = fill; + } + FMT_CONSTEXPR void on_sign(sign_t s) { specs_.sign = s; } + FMT_CONSTEXPR void on_hash() { specs_.alt = true; } + FMT_CONSTEXPR void on_localized() { specs_.localized = true; } + + FMT_CONSTEXPR void on_zero() { + if (specs_.align == align::none) specs_.align = align::numeric; + specs_.fill[0] = Char('0'); + } + + FMT_CONSTEXPR void on_width(int width) { specs_.width = width; } + FMT_CONSTEXPR void on_precision(int precision) { + specs_.precision = precision; + } + FMT_CONSTEXPR void end_precision() {} + + FMT_CONSTEXPR void on_type(Char type) { + specs_.type = static_cast(type); + } +}; + +// Format spec handler that saves references to arguments representing dynamic +// width and precision to be resolved at formatting time. +template +class dynamic_specs_handler + : public specs_setter { + public: + using char_type = typename ParseContext::char_type; + + FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs& specs, + ParseContext& ctx) + : specs_setter(specs), specs_(specs), context_(ctx) {} + + FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler& other) + : specs_setter(other), + specs_(other.specs_), + context_(other.context_) {} + + template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + specs_.width_ref = make_arg_ref(arg_id); + } + + template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + specs_.precision_ref = make_arg_ref(arg_id); + } + + FMT_CONSTEXPR void on_error(const char* message) { + context_.on_error(message); + } + + private: + dynamic_format_specs& specs_; + ParseContext& context_; + + using arg_ref_type = arg_ref; + + FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type { + context_.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type { + return arg_ref_type(context_.next_arg_id()); + } + + FMT_CONSTEXPR auto make_arg_ref(basic_string_view arg_id) + -> arg_ref_type { + context_.check_arg_id(arg_id); + basic_string_view format_str( + context_.begin(), to_unsigned(context_.end() - context_.begin())); + return arg_ref_type(arg_id); + } +}; + +template constexpr bool is_ascii_letter(Char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); +} + +// Converts a character to ASCII. Returns a number > 127 on conversion failure. +template ::value)> +constexpr auto to_ascii(Char value) -> Char { + return value; +} +template ::value)> +constexpr auto to_ascii(Char value) -> + typename std::underlying_type::type { + return value; +} + +template +FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int { + if (const_check(sizeof(Char) != 1)) return 1; + constexpr char lengths[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 4, 0}; + int len = lengths[static_cast(*begin) >> 3]; + + // Compute the pointer to the next character early so that the next + // iteration can start working on the next character. Neither Clang + // nor GCC figure out this reordering on their own. + return len + !len; +} + +// Return the result via the out param to workaround gcc bug 77539. +template +FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr& out) -> bool { + for (out = first; out != last; ++out) { + if (*out == value) return true; + } + return false; +} + +template <> +inline auto find(const char* first, const char* last, char value, + const char*& out) -> bool { + out = static_cast( + std::memchr(first, value, to_unsigned(last - first))); + return out != nullptr; +} + +// Parses the range [begin, end) as an unsigned integer. This function assumes +// that the range is non-empty and the first character is a digit. +template +FMT_CONSTEXPR auto parse_nonnegative_int(const Char*& begin, const Char* end, + int error_value) noexcept -> int { + FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); + unsigned value = 0, prev = 0; + auto p = begin; + do { + prev = value; + value = value * 10 + unsigned(*p - '0'); + ++p; + } while (p != end && '0' <= *p && *p <= '9'); + auto num_digits = p - begin; + begin = p; + if (num_digits <= std::numeric_limits::digits10) + return static_cast(value); + // Check for overflow. + const unsigned max = to_unsigned((std::numeric_limits::max)()); + return num_digits == std::numeric_limits::digits10 + 1 && + prev * 10ull + unsigned(p[-1] - '0') <= max + ? static_cast(value) + : error_value; +} + +// Parses fill and alignment. +template +FMT_CONSTEXPR auto parse_align(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + FMT_ASSERT(begin != end, ""); + auto align = align::none; + auto p = begin + code_point_length(begin); + if (p >= end) p = begin; + for (;;) { + switch (to_ascii(*p)) { + case '<': + align = align::left; + break; + case '>': + align = align::right; + break; + case '^': + align = align::center; + break; + default: + break; + } + if (align != align::none) { + if (p != begin) { + auto c = *begin; + if (c == '{') + return handler.on_error("invalid fill character '{'"), begin; + handler.on_fill(basic_string_view(begin, to_unsigned(p - begin))); + begin = p + 1; + } else + ++begin; + handler.on_align(align); + break; + } else if (p == begin) { + break; + } + p = begin; + } + return begin; +} + +template FMT_CONSTEXPR bool is_name_start(Char c) { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; +} + +template +FMT_CONSTEXPR auto do_parse_arg_id(const Char* begin, const Char* end, + IDHandler&& handler) -> const Char* { + FMT_ASSERT(begin != end, ""); + Char c = *begin; + if (c >= '0' && c <= '9') { + int index = 0; + if (c != '0') + index = + parse_nonnegative_int(begin, end, (std::numeric_limits::max)()); + else + ++begin; + if (begin == end || (*begin != '}' && *begin != ':')) + handler.on_error("invalid format string"); + else + handler(index); + return begin; + } + if (!is_name_start(c)) { + handler.on_error("invalid format string"); + return begin; + } + auto it = begin; + do { + ++it; + } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9'))); + handler(basic_string_view(begin, to_unsigned(it - begin))); + return it; +} + +template +FMT_CONSTEXPR FMT_INLINE auto parse_arg_id(const Char* begin, const Char* end, + IDHandler&& handler) -> const Char* { + Char c = *begin; + if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler); + handler(); + return begin; +} + +template +FMT_CONSTEXPR auto parse_width(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + using detail::auto_id; + struct width_adapter { + Handler& handler; + + FMT_CONSTEXPR void operator()() { handler.on_dynamic_width(auto_id()); } + FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_width(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_width(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + FMT_ASSERT(begin != end, ""); + if ('0' <= *begin && *begin <= '9') { + int width = parse_nonnegative_int(begin, end, -1); + if (width != -1) + handler.on_width(width); + else + handler.on_error("number is too big"); + } else if (*begin == '{') { + ++begin; + if (begin != end) begin = parse_arg_id(begin, end, width_adapter{handler}); + if (begin == end || *begin != '}') + return handler.on_error("invalid format string"), begin; + ++begin; + } + return begin; +} + +template +FMT_CONSTEXPR auto parse_precision(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + using detail::auto_id; + struct precision_adapter { + Handler& handler; + + FMT_CONSTEXPR void operator()() { handler.on_dynamic_precision(auto_id()); } + FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_precision(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_precision(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + ++begin; + auto c = begin != end ? *begin : Char(); + if ('0' <= c && c <= '9') { + auto precision = parse_nonnegative_int(begin, end, -1); + if (precision != -1) + handler.on_precision(precision); + else + handler.on_error("number is too big"); + } else if (c == '{') { + ++begin; + if (begin != end) + begin = parse_arg_id(begin, end, precision_adapter{handler}); + if (begin == end || *begin++ != '}') + return handler.on_error("invalid format string"), begin; + } else { + return handler.on_error("missing precision specifier"), begin; + } + handler.end_precision(); + return begin; +} + +// Parses standard format specifiers and sends notifications about parsed +// components to handler. +template +FMT_CONSTEXPR FMT_INLINE auto parse_format_specs(const Char* begin, + const Char* end, + SpecHandler&& handler) + -> const Char* { + if (begin + 1 < end && begin[1] == '}' && is_ascii_letter(*begin) && + *begin != 'L') { + handler.on_type(*begin++); + return begin; + } + + if (begin == end) return begin; + + begin = parse_align(begin, end, handler); + if (begin == end) return begin; + + // Parse sign. + switch (to_ascii(*begin)) { + case '+': + handler.on_sign(sign::plus); + ++begin; + break; + case '-': + handler.on_sign(sign::minus); + ++begin; + break; + case ' ': + handler.on_sign(sign::space); + ++begin; + break; + default: + break; + } + if (begin == end) return begin; + + if (*begin == '#') { + handler.on_hash(); + if (++begin == end) return begin; + } + + // Parse zero flag. + if (*begin == '0') { + handler.on_zero(); + if (++begin == end) return begin; + } + + begin = parse_width(begin, end, handler); + if (begin == end) return begin; + + // Parse precision. + if (*begin == '.') { + begin = parse_precision(begin, end, handler); + if (begin == end) return begin; + } + + if (*begin == 'L') { + handler.on_localized(); + ++begin; + } + + // Parse type. + if (begin != end && *begin != '}') handler.on_type(*begin++); + return begin; +} + +template +FMT_CONSTEXPR auto parse_replacement_field(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + struct id_adapter { + Handler& handler; + int arg_id; + + FMT_CONSTEXPR void operator()() { arg_id = handler.on_arg_id(); } + FMT_CONSTEXPR void operator()(int id) { arg_id = handler.on_arg_id(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + arg_id = handler.on_arg_id(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + ++begin; + if (begin == end) return handler.on_error("invalid format string"), end; + if (*begin == '}') { + handler.on_replacement_field(handler.on_arg_id(), begin); + } else if (*begin == '{') { + handler.on_text(begin, begin + 1); + } else { + auto adapter = id_adapter{handler, 0}; + begin = parse_arg_id(begin, end, adapter); + Char c = begin != end ? *begin : Char(); + if (c == '}') { + handler.on_replacement_field(adapter.arg_id, begin); + } else if (c == ':') { + begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); + if (begin == end || *begin != '}') + return handler.on_error("unknown format specifier"), end; + } else { + return handler.on_error("missing '}' in format string"), end; + } + } + return begin + 1; +} + +template +FMT_CONSTEXPR FMT_INLINE void parse_format_string( + basic_string_view format_str, Handler&& handler) { + // this is most likely a name-lookup defect in msvc's modules implementation + using detail::find; + + auto begin = format_str.data(); + auto end = begin + format_str.size(); + if (end - begin < 32) { + // Use a simple loop instead of memchr for small strings. + const Char* p = begin; + while (p != end) { + auto c = *p++; + if (c == '{') { + handler.on_text(begin, p - 1); + begin = p = parse_replacement_field(p - 1, end, handler); + } else if (c == '}') { + if (p == end || *p != '}') + return handler.on_error("unmatched '}' in format string"); + handler.on_text(begin, p); + begin = ++p; + } + } + handler.on_text(begin, end); + return; + } + struct writer { + FMT_CONSTEXPR void operator()(const Char* pbegin, const Char* pend) { + if (pbegin == pend) return; + for (;;) { + const Char* p = nullptr; + if (!find(pbegin, pend, Char('}'), p)) + return handler_.on_text(pbegin, pend); + ++p; + if (p == pend || *p != '}') + return handler_.on_error("unmatched '}' in format string"); + handler_.on_text(pbegin, p); + pbegin = p + 1; + } + } + Handler& handler_; + } write{handler}; + while (begin != end) { + // Doing two passes with memchr (one for '{' and another for '}') is up to + // 2.5x faster than the naive one-pass implementation on big format strings. + const Char* p = begin; + if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) + return write(begin, end); + write(begin, p); + begin = parse_replacement_field(p, end, handler); + } +} + +template +FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) + -> decltype(ctx.begin()) { + using char_type = typename ParseContext::char_type; + using context = buffer_context; + using mapped_type = conditional_t< + mapped_type_constant::value != type::custom_type, + decltype(arg_mapper().map(std::declval())), T>; + auto f = conditional_t::value, + formatter, + fallback_formatter>(); + return f.parse(ctx); +} + +// A parse context with extra argument id checks. It is only used at compile +// time because adding checks at runtime would introduce substantial overhead +// and would be redundant since argument ids are checked when arguments are +// retrieved anyway. +template +class compile_parse_context + : public basic_format_parse_context { + private: + int num_args_; + using base = basic_format_parse_context; + + public: + explicit FMT_CONSTEXPR compile_parse_context( + basic_string_view format_str, + int num_args = (std::numeric_limits::max)(), ErrorHandler eh = {}) + : base(format_str, eh), num_args_(num_args) {} + + FMT_CONSTEXPR auto next_arg_id() -> int { + int id = base::next_arg_id(); + if (id >= num_args_) this->on_error("argument not found"); + return id; + } + + FMT_CONSTEXPR void check_arg_id(int id) { + base::check_arg_id(id); + if (id >= num_args_) this->on_error("argument not found"); + } + using base::check_arg_id; +}; + +template +FMT_CONSTEXPR void check_int_type_spec(char spec, ErrorHandler&& eh) { + switch (spec) { + case 0: + case 'd': + case 'x': + case 'X': + case 'b': + case 'B': + case 'o': + case 'c': + break; + default: + eh.on_error("invalid type specifier"); + break; + } +} + +// Checks char specs and returns true if the type spec is char (and not int). +template +FMT_CONSTEXPR auto check_char_specs(const basic_format_specs& specs, + ErrorHandler&& eh = {}) -> bool { + if (specs.type && specs.type != 'c') { + check_int_type_spec(specs.type, eh); + return false; + } + if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) + eh.on_error("invalid format specifier for char"); + return true; +} + +// A floating-point presentation format. +enum class float_format : unsigned char { + general, // General: exponent notation or fixed point based on magnitude. + exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. + fixed, // Fixed point with the default precision of 6, e.g. 0.0012. + hex +}; + +struct float_specs { + int precision; + float_format format : 8; + sign_t sign : 8; + bool upper : 1; + bool locale : 1; + bool binary32 : 1; + bool use_grisu : 1; + bool showpoint : 1; +}; + +template +FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs& specs, + ErrorHandler&& eh = {}) + -> float_specs { + auto result = float_specs(); + result.showpoint = specs.alt; + result.locale = specs.localized; + switch (specs.type) { + case 0: + result.format = float_format::general; + break; + case 'G': + result.upper = true; + FMT_FALLTHROUGH; + case 'g': + result.format = float_format::general; + break; + case 'E': + result.upper = true; + FMT_FALLTHROUGH; + case 'e': + result.format = float_format::exp; + result.showpoint |= specs.precision != 0; + break; + case 'F': + result.upper = true; + FMT_FALLTHROUGH; + case 'f': + result.format = float_format::fixed; + result.showpoint |= specs.precision != 0; + break; + case 'A': + result.upper = true; + FMT_FALLTHROUGH; + case 'a': + result.format = float_format::hex; + break; + default: + eh.on_error("invalid type specifier"); + break; + } + return result; +} + +template +FMT_CONSTEXPR auto check_cstring_type_spec(Char spec, ErrorHandler&& eh = {}) + -> bool { + if (spec == 0 || spec == 's') return true; + if (spec != 'p') eh.on_error("invalid type specifier"); + return false; +} + +template +FMT_CONSTEXPR void check_string_type_spec(Char spec, ErrorHandler&& eh = {}) { + if (spec != 0 && spec != 's') eh.on_error("invalid type specifier"); +} + +template +FMT_CONSTEXPR void check_pointer_type_spec(Char spec, ErrorHandler&& eh) { + if (spec != 0 && spec != 'p') eh.on_error("invalid type specifier"); +} + +// A parse_format_specs handler that checks if specifiers are consistent with +// the argument type. +template class specs_checker : public Handler { + private: + detail::type arg_type_; + + FMT_CONSTEXPR void require_numeric_argument() { + if (!is_arithmetic_type(arg_type_)) + this->on_error("format specifier requires numeric argument"); + } + + public: + FMT_CONSTEXPR specs_checker(const Handler& handler, detail::type arg_type) + : Handler(handler), arg_type_(arg_type) {} + + FMT_CONSTEXPR void on_align(align_t align) { + if (align == align::numeric) require_numeric_argument(); + Handler::on_align(align); + } + + FMT_CONSTEXPR void on_sign(sign_t s) { + require_numeric_argument(); + if (is_integral_type(arg_type_) && arg_type_ != type::int_type && + arg_type_ != type::long_long_type && arg_type_ != type::char_type) { + this->on_error("format specifier requires signed argument"); + } + Handler::on_sign(s); + } + + FMT_CONSTEXPR void on_hash() { + require_numeric_argument(); + Handler::on_hash(); + } + + FMT_CONSTEXPR void on_localized() { + require_numeric_argument(); + Handler::on_localized(); + } + + FMT_CONSTEXPR void on_zero() { + require_numeric_argument(); + Handler::on_zero(); + } + + FMT_CONSTEXPR void end_precision() { + if (is_integral_type(arg_type_) || arg_type_ == type::pointer_type) + this->on_error("precision not allowed for this argument type"); + } +}; + +constexpr int invalid_arg_index = -1; + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template +constexpr auto get_arg_index_by_name(basic_string_view name) -> int { + if constexpr (detail::is_statically_named_arg()) { + if (name == T::name) return N; + } + if constexpr (sizeof...(Args) > 0) { + return get_arg_index_by_name(name); + } else { + (void)name; // Workaround an MSVC bug about "unused" parameter. + return invalid_arg_index; + } +} +#endif + +template +FMT_CONSTEXPR auto get_arg_index_by_name(basic_string_view name) -> int { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS + if constexpr (sizeof...(Args) > 0) { + return get_arg_index_by_name<0, Args...>(name); + } else { + (void)name; + return invalid_arg_index; + } +#else + (void)name; + return invalid_arg_index; +#endif +} + +template +class format_string_checker { + private: + using parse_context_type = compile_parse_context; + enum { num_args = sizeof...(Args) }; + + // Format specifier parsing function. + using parse_func = const Char* (*)(parse_context_type&); + + parse_context_type context_; + parse_func parse_funcs_[num_args > 0 ? num_args : 1]; + + public: + explicit FMT_CONSTEXPR format_string_checker( + basic_string_view format_str, ErrorHandler eh) + : context_(format_str, num_args, eh), + parse_funcs_{&parse_format_specs...} {} + + FMT_CONSTEXPR void on_text(const Char*, const Char*) {} + + FMT_CONSTEXPR auto on_arg_id() -> int { return context_.next_arg_id(); } + FMT_CONSTEXPR auto on_arg_id(int id) -> int { + return context_.check_arg_id(id), id; + } + FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS + auto index = get_arg_index_by_name(id); + if (index == invalid_arg_index) on_error("named argument is not found"); + return context_.check_arg_id(index), index; +#else + (void)id; + on_error("compile-time checks for named arguments require C++20 support"); + return 0; +#endif + } + + FMT_CONSTEXPR void on_replacement_field(int, const Char*) {} + + FMT_CONSTEXPR auto on_format_specs(int id, const Char* begin, const Char*) + -> const Char* { + context_.advance_to(context_.begin() + (begin - &*context_.begin())); + // id >= 0 check is a workaround for gcc 10 bug (#2065). + return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; + } + + FMT_CONSTEXPR void on_error(const char* message) { + context_.on_error(message); + } +}; + +template ::value), int>> +void check_format_string(S format_str) { + FMT_CONSTEXPR auto s = to_string_view(format_str); + using checker = format_string_checker...>; + FMT_CONSTEXPR bool invalid_format = + (parse_format_string(s, checker(s, {})), true); + ignore_unused(invalid_format); +} + +template +void vformat_to( + buffer& buf, basic_string_view fmt, + basic_format_args)> args, + locale_ref loc = {}); + +FMT_API void vprint_mojibake(std::FILE*, string_view, format_args); +#ifndef _WIN32 +inline void vprint_mojibake(std::FILE*, string_view, format_args) {} +#endif +FMT_END_DETAIL_NAMESPACE + +// A formatter specialization for the core types corresponding to detail::type +// constants. +template +struct formatter::value != + detail::type::custom_type>> { + private: + detail::dynamic_format_specs specs_; + + public: + // Parses format specifiers stopping either at the end of the range or at the + // terminating '}'. + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end) return begin; + using handler_type = detail::dynamic_specs_handler; + auto type = detail::type_constant::value; + auto checker = + detail::specs_checker(handler_type(specs_, ctx), type); + auto it = detail::parse_format_specs(begin, end, checker); + auto eh = ctx.error_handler(); + switch (type) { + case detail::type::none_type: + FMT_ASSERT(false, "invalid argument type"); + break; + case detail::type::bool_type: + if (!specs_.type || specs_.type == 's') break; + FMT_FALLTHROUGH; + case detail::type::int_type: + case detail::type::uint_type: + case detail::type::long_long_type: + case detail::type::ulong_long_type: + case detail::type::int128_type: + case detail::type::uint128_type: + detail::check_int_type_spec(specs_.type, eh); + break; + case detail::type::char_type: + detail::check_char_specs(specs_, eh); + break; + case detail::type::float_type: + if (detail::const_check(FMT_USE_FLOAT)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "float support disabled"); + break; + case detail::type::double_type: + if (detail::const_check(FMT_USE_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "double support disabled"); + break; + case detail::type::long_double_type: + if (detail::const_check(FMT_USE_LONG_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "long double support disabled"); + break; + case detail::type::cstring_type: + detail::check_cstring_type_spec(specs_.type, eh); + break; + case detail::type::string_type: + detail::check_string_type_spec(specs_.type, eh); + break; + case detail::type::pointer_type: + detail::check_pointer_type_spec(specs_.type, eh); + break; + case detail::type::custom_type: + // Custom format specifiers are checked in parse functions of + // formatter specializations. + break; + } + return it; + } + + template + FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const + -> decltype(ctx.out()); +}; + +template struct basic_runtime { basic_string_view str; }; + +template class basic_format_string { + private: + basic_string_view str_; + + public: + template >::value)> + FMT_CONSTEVAL basic_format_string(const S& s) : str_(s) { + static_assert( + detail::count< + (std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); +#ifdef FMT_HAS_CONSTEVAL + if constexpr (detail::count_named_args() == 0) { + using checker = detail::format_string_checker...>; + detail::parse_format_string(str_, checker(s, {})); + } +#else + detail::check_format_string(s); +#endif + } + basic_format_string(basic_runtime r) : str_(r.str) {} + + FMT_INLINE operator basic_string_view() const { return str_; } +}; + +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 +// Workaround broken conversion on older gcc. +template using format_string = string_view; +template auto runtime(const S& s) -> basic_string_view> { + return s; +} +#else +template +using format_string = basic_format_string...>; +// Creates a runtime format string. +template auto runtime(const S& s) -> basic_runtime> { + return {{s}}; +} +#endif + +FMT_API auto vformat(string_view fmt, format_args args) -> std::string; + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and returns the result + as a string. + + **Example**:: + + #include + std::string message = fmt::format("The answer is {}", 42); + \endrst +*/ +template +FMT_INLINE auto format(format_string fmt, T&&... args) -> std::string { + return vformat(fmt, fmt::make_format_args(args...)); +} + +/** Formats a string and writes the output to ``out``. */ +template ::value)> +auto vformat_to(OutputIt out, string_view fmt, format_args args) -> OutputIt { + using detail::get_buffer; + auto&& buf = get_buffer(out); + detail::vformat_to(buf, string_view(fmt), args, {}); + return detail::get_iterator(buf); +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt``, writes the result to + the output iterator ``out`` and returns the iterator past the end of the output + range. + + **Example**:: + + auto out = std::vector(); + fmt::format_to(std::back_inserter(out), "{}", 42); + \endrst + */ +template ::value)> +FMT_INLINE auto format_to(OutputIt out, format_string fmt, T&&... args) + -> OutputIt { + return vformat_to(out, fmt, fmt::make_format_args(args...)); +} + +template struct format_to_n_result { + /** Iterator past the end of the output range. */ + OutputIt out; + /** Total (not truncated) output size. */ + size_t size; +}; + +template ::value)> +auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) + -> format_to_n_result { + using buffer = + detail::iterator_buffer; + auto buf = buffer(out, n); + detail::vformat_to(buf, fmt, args, {}); + return {buf.out(), buf.count()}; +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt``, writes up to ``n`` + characters of the result to the output iterator ``out`` and returns the total + (not truncated) output size and the iterator past the end of the output range. + \endrst + */ +template ::value)> +FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, + const T&... args) -> format_to_n_result { + return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); +} + +/** Returns the number of chars in the output of ``format(fmt, args...)``. */ +template +FMT_INLINE auto formatted_size(format_string fmt, T&&... args) -> size_t { + auto buf = detail::counting_buffer<>(); + detail::vformat_to(buf, string_view(fmt), fmt::make_format_args(args...), {}); + return buf.count(); +} + +FMT_API void vprint(string_view fmt, format_args args); +FMT_API void vprint(std::FILE* f, string_view fmt, format_args args); + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and writes the output + to ``stdout``. + + **Example**:: + + fmt::print("Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template +FMT_INLINE void print(format_string fmt, T&&... args) { + const auto& vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(fmt, vargs) + : detail::vprint_mojibake(stdout, fmt, vargs); +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and writes the + output to the file ``f``. + + **Example**:: + + fmt::print(stderr, "Don't {}!", "panic"); + \endrst + */ +template +FMT_INLINE void print(std::FILE* f, format_string fmt, T&&... args) { + const auto& vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(f, fmt, vargs) + : detail::vprint_mojibake(f, fmt, vargs); +} + +FMT_MODULE_EXPORT_END +FMT_GCC_PRAGMA("GCC pop_options") +FMT_END_NAMESPACE + +#ifdef FMT_HEADER_ONLY +# include "format.h" +#endif +#endif // FMT_CORE_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/format-inl.h b/contrib/fmt-8.0.1/include/fmt/format-inl.h new file mode 100644 index 0000000000..94a36d1bc4 --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/format-inl.h @@ -0,0 +1,2620 @@ +// Formatting library for C++ - implementation +// +// Copyright (c) 2012 - 2016, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_FORMAT_INL_H_ +#define FMT_FORMAT_INL_H_ + +#include +#include +#include // errno +#include +#include +#include +#include // std::memmove +#include +#include + +#ifndef FMT_STATIC_THOUSANDS_SEPARATOR +# include +#endif + +#ifdef _WIN32 +# include // _isatty +#endif + +#include "format.h" + +FMT_BEGIN_NAMESPACE +namespace detail { + +FMT_FUNC void assert_fail(const char* file, int line, const char* message) { + // Use unchecked std::fprintf to avoid triggering another assertion when + // writing to stderr fails + std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); + // Chosen instead of std::abort to satisfy Clang in CUDA mode during device + // code pass. + std::terminate(); +} + +#ifndef _MSC_VER +# define FMT_SNPRINTF snprintf +#else // _MSC_VER +inline int fmt_snprintf(char* buffer, size_t size, const char* format, ...) { + va_list args; + va_start(args, format); + int result = vsnprintf_s(buffer, size, _TRUNCATE, format, args); + va_end(args); + return result; +} +# define FMT_SNPRINTF fmt_snprintf +#endif // _MSC_VER + +FMT_FUNC void format_error_code(detail::buffer& out, int error_code, + string_view message) FMT_NOEXCEPT { + // Report error code making sure that the output fits into + // inline_buffer_size to avoid dynamic memory allocation and potential + // bad_alloc. + out.try_resize(0); + static const char SEP[] = ": "; + static const char ERROR_STR[] = "error "; + // Subtract 2 to account for terminating null characters in SEP and ERROR_STR. + size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; + auto abs_value = static_cast>(error_code); + if (detail::is_negative(error_code)) { + abs_value = 0 - abs_value; + ++error_code_size; + } + error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); + auto it = buffer_appender(out); + if (message.size() <= inline_buffer_size - error_code_size) + format_to(it, FMT_STRING("{}{}"), message, SEP); + format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); + FMT_ASSERT(out.size() <= inline_buffer_size, ""); +} + +FMT_FUNC void report_error(format_func func, int error_code, + const char* message) FMT_NOEXCEPT { + memory_buffer full_message; + func(full_message, error_code, message); + // Don't use fwrite_fully because the latter may throw. + if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) + std::fputc('\n', stderr); +} + +// A wrapper around fwrite that throws on error. +inline void fwrite_fully(const void* ptr, size_t size, size_t count, + FILE* stream) { + size_t written = std::fwrite(ptr, size, count, stream); + if (written < count) FMT_THROW(system_error(errno, "cannot write to file")); +} + +#ifndef FMT_STATIC_THOUSANDS_SEPARATOR +template +locale_ref::locale_ref(const Locale& loc) : locale_(&loc) { + static_assert(std::is_same::value, ""); +} + +template Locale locale_ref::get() const { + static_assert(std::is_same::value, ""); + return locale_ ? *static_cast(locale_) : std::locale(); +} + +template +FMT_FUNC auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result { + auto& facet = std::use_facet>(loc.get()); + auto grouping = facet.grouping(); + auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); + return {std::move(grouping), thousands_sep}; +} +template FMT_FUNC Char decimal_point_impl(locale_ref loc) { + return std::use_facet>(loc.get()) + .decimal_point(); +} +#else +template +FMT_FUNC auto thousands_sep_impl(locale_ref) -> thousands_sep_result { + return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; +} +template FMT_FUNC Char decimal_point_impl(locale_ref) { + return '.'; +} +#endif +} // namespace detail + +#if !FMT_MSC_VER +FMT_API FMT_FUNC format_error::~format_error() FMT_NOEXCEPT = default; +#endif + +FMT_FUNC std::system_error vsystem_error(int error_code, string_view format_str, + format_args args) { + auto ec = std::error_code(error_code, std::generic_category()); + return std::system_error(ec, vformat(format_str, args)); +} + +namespace detail { + +template <> FMT_FUNC int count_digits<4>(detail::fallback_uintptr n) { + // fallback_uintptr is always stored in little endian. + int i = static_cast(sizeof(void*)) - 1; + while (i > 0 && n.value[i] == 0) --i; + auto char_digits = std::numeric_limits::digits / 4; + return i >= 0 ? i * char_digits + count_digits<4, unsigned>(n.value[i]) : 1; +} + +#if __cplusplus < 201703L +template constexpr const char basic_data::digits[][2]; +template constexpr const char basic_data::hex_digits[]; +template constexpr const char basic_data::signs[]; +template constexpr const unsigned basic_data::prefixes[]; +template constexpr const char basic_data::left_padding_shifts[]; +template +constexpr const char basic_data::right_padding_shifts[]; +#endif + +template struct bits { + static FMT_CONSTEXPR_DECL const int value = + static_cast(sizeof(T) * std::numeric_limits::digits); +}; + +class fp; +template fp normalize(fp value); + +// Lower (upper) boundary is a value half way between a floating-point value +// and its predecessor (successor). Boundaries have the same exponent as the +// value so only significands are stored. +struct boundaries { + uint64_t lower; + uint64_t upper; +}; + +// A handmade floating-point number f * pow(2, e). +class fp { + private: + using significand_type = uint64_t; + + template + using is_supported_float = bool_constant; + + public: + significand_type f; + int e; + + // All sizes are in bits. + // Subtract 1 to account for an implicit most significant bit in the + // normalized form. + static FMT_CONSTEXPR_DECL const int double_significand_size = + std::numeric_limits::digits - 1; + static FMT_CONSTEXPR_DECL const uint64_t implicit_bit = + 1ULL << double_significand_size; + static FMT_CONSTEXPR_DECL const int significand_size = + bits::value; + + fp() : f(0), e(0) {} + fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {} + + // Constructs fp from an IEEE754 double. It is a template to prevent compile + // errors on platforms where double is not IEEE754. + template explicit fp(Double d) { assign(d); } + + // Assigns d to this and return true iff predecessor is closer than successor. + template ::value)> + bool assign(Float d) { + // Assume float is in the format [sign][exponent][significand]. + using limits = std::numeric_limits; + const int float_significand_size = limits::digits - 1; + const int exponent_size = + bits::value - float_significand_size - 1; // -1 for sign + const uint64_t float_implicit_bit = 1ULL << float_significand_size; + const uint64_t significand_mask = float_implicit_bit - 1; + const uint64_t exponent_mask = (~0ULL >> 1) & ~significand_mask; + const int exponent_bias = (1 << exponent_size) - limits::max_exponent - 1; + constexpr bool is_double = sizeof(Float) == sizeof(uint64_t); + auto u = bit_cast>(d); + f = u & significand_mask; + int biased_e = + static_cast((u & exponent_mask) >> float_significand_size); + // Predecessor is closer if d is a normalized power of 2 (f == 0) other than + // the smallest normalized number (biased_e > 1). + bool is_predecessor_closer = f == 0 && biased_e > 1; + if (biased_e != 0) + f += float_implicit_bit; + else + biased_e = 1; // Subnormals use biased exponent 1 (min exponent). + e = biased_e - exponent_bias - float_significand_size; + return is_predecessor_closer; + } + + template ::value)> + bool assign(Float) { + *this = fp(); + return false; + } +}; + +// Normalizes the value converted from double and multiplied by (1 << SHIFT). +template fp normalize(fp value) { + // Handle subnormals. + const auto shifted_implicit_bit = fp::implicit_bit << SHIFT; + while ((value.f & shifted_implicit_bit) == 0) { + value.f <<= 1; + --value.e; + } + // Subtract 1 to account for hidden bit. + const auto offset = + fp::significand_size - fp::double_significand_size - SHIFT - 1; + value.f <<= offset; + value.e -= offset; + return value; +} + +inline bool operator==(fp x, fp y) { return x.f == y.f && x.e == y.e; } + +// Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking. +inline uint64_t multiply(uint64_t lhs, uint64_t rhs) { +#if FMT_USE_INT128 + auto product = static_cast<__uint128_t>(lhs) * rhs; + auto f = static_cast(product >> 64); + return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; +#else + // Multiply 32-bit parts of significands. + uint64_t mask = (1ULL << 32) - 1; + uint64_t a = lhs >> 32, b = lhs & mask; + uint64_t c = rhs >> 32, d = rhs & mask; + uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; + // Compute mid 64-bit of result and round. + uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); + return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); +#endif +} + +inline fp operator*(fp x, fp y) { return {multiply(x.f, y.f), x.e + y.e + 64}; } + +// Returns a cached power of 10 `c_k = c_k.f * pow(2, c_k.e)` such that its +// (binary) exponent satisfies `min_exponent <= c_k.e <= min_exponent + 28`. +inline fp get_cached_power(int min_exponent, int& pow10_exponent) { + // Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., 340. + // These are generated by support/compute-powers.py. + static constexpr const uint64_t pow10_significands[] = { + 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76, + 0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df, + 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c, + 0x8dd01fad907ffc3c, 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, + 0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57, + 0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7, + 0xa086cfcd97bf97f4, 0xef340a98172aace5, 0xb23867fb2a35b28e, + 0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, + 0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126, + 0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053, + 0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f, + 0xf8a95fcf88747d94, 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b, + 0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06, + 0xaa242499697392d3, 0xfd87b5f28300ca0e, 0xbce5086492111aeb, + 0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000, + 0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984, + 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, 0xd5d238a4abe98068, + 0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, + 0x83c7088e1aab65db, 0xc45d1df942711d9a, 0x924d692ca61be758, + 0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85, + 0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d, + 0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, 0xa59bc234db398c25, + 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2, + 0xcc20ce9bd35c78a5, 0x98165af37b2153df, 0xe2a0b5dc971f303a, + 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410, + 0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129, + 0xe7109bfba19c0c9d, 0xac2820d9623bf429, 0x80444b5e7aa7cf85, + 0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, + 0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b, + }; + + // Binary exponents of pow(10, k), for k = -348, -340, ..., 340, corresponding + // to significands above. + static constexpr const int16_t pow10_exponents[] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954, + -927, -901, -874, -847, -821, -794, -768, -741, -715, -688, -661, + -635, -608, -582, -555, -529, -502, -475, -449, -422, -396, -369, + -343, -316, -289, -263, -236, -210, -183, -157, -130, -103, -77, + -50, -24, 3, 30, 56, 83, 109, 136, 162, 189, 216, + 242, 269, 295, 322, 348, 375, 402, 428, 455, 481, 508, + 534, 561, 588, 614, 641, 667, 694, 720, 747, 774, 800, + 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066}; + + const int shift = 32; + const auto significand = static_cast(data::log10_2_significand); + int index = static_cast( + ((min_exponent + fp::significand_size - 1) * (significand >> shift) + + ((int64_t(1) << shift) - 1)) // ceil + >> 32 // arithmetic shift + ); + // Decimal exponent of the first (smallest) cached power of 10. + const int first_dec_exp = -348; + // Difference between 2 consecutive decimal exponents in cached powers of 10. + const int dec_exp_step = 8; + index = (index - first_dec_exp - 1) / dec_exp_step + 1; + pow10_exponent = first_dec_exp + index * dec_exp_step; + return {pow10_significands[index], pow10_exponents[index]}; +} + +// A simple accumulator to hold the sums of terms in bigint::square if uint128_t +// is not available. +struct accumulator { + uint64_t lower; + uint64_t upper; + + accumulator() : lower(0), upper(0) {} + explicit operator uint32_t() const { return static_cast(lower); } + + void operator+=(uint64_t n) { + lower += n; + if (lower < n) ++upper; + } + void operator>>=(int shift) { + FMT_ASSERT(shift == 32, ""); + (void)shift; + lower = (upper << 32) | (lower >> 32); + upper >>= 32; + } +}; + +class bigint { + private: + // A bigint is stored as an array of bigits (big digits), with bigit at index + // 0 being the least significant one. + using bigit = uint32_t; + using double_bigit = uint64_t; + enum { bigits_capacity = 32 }; + basic_memory_buffer bigits_; + int exp_; + + bigit operator[](int index) const { return bigits_[to_unsigned(index)]; } + bigit& operator[](int index) { return bigits_[to_unsigned(index)]; } + + static FMT_CONSTEXPR_DECL const int bigit_bits = bits::value; + + friend struct formatter; + + void subtract_bigits(int index, bigit other, bigit& borrow) { + auto result = static_cast((*this)[index]) - other - borrow; + (*this)[index] = static_cast(result); + borrow = static_cast(result >> (bigit_bits * 2 - 1)); + } + + void remove_leading_zeros() { + int num_bigits = static_cast(bigits_.size()) - 1; + while (num_bigits > 0 && (*this)[num_bigits] == 0) --num_bigits; + bigits_.resize(to_unsigned(num_bigits + 1)); + } + + // Computes *this -= other assuming aligned bigints and *this >= other. + void subtract_aligned(const bigint& other) { + FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); + FMT_ASSERT(compare(*this, other) >= 0, ""); + bigit borrow = 0; + int i = other.exp_ - exp_; + for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) + subtract_bigits(i, other.bigits_[j], borrow); + while (borrow > 0) subtract_bigits(i, 0, borrow); + remove_leading_zeros(); + } + + void multiply(uint32_t value) { + const double_bigit wide_value = value; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + double_bigit result = bigits_[i] * wide_value + carry; + bigits_[i] = static_cast(result); + carry = static_cast(result >> bigit_bits); + } + if (carry != 0) bigits_.push_back(carry); + } + + void multiply(uint64_t value) { + const bigit mask = ~bigit(0); + const double_bigit lower = value & mask; + const double_bigit upper = value >> bigit_bits; + double_bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + double_bigit result = bigits_[i] * lower + (carry & mask); + carry = + bigits_[i] * upper + (result >> bigit_bits) + (carry >> bigit_bits); + bigits_[i] = static_cast(result); + } + while (carry != 0) { + bigits_.push_back(carry & mask); + carry >>= bigit_bits; + } + } + + public: + bigint() : exp_(0) {} + explicit bigint(uint64_t n) { assign(n); } + ~bigint() { FMT_ASSERT(bigits_.capacity() <= bigits_capacity, ""); } + + bigint(const bigint&) = delete; + void operator=(const bigint&) = delete; + + void assign(const bigint& other) { + auto size = other.bigits_.size(); + bigits_.resize(size); + auto data = other.bigits_.data(); + std::copy(data, data + size, make_checked(bigits_.data(), size)); + exp_ = other.exp_; + } + + void assign(uint64_t n) { + size_t num_bigits = 0; + do { + bigits_[num_bigits++] = n & ~bigit(0); + n >>= bigit_bits; + } while (n != 0); + bigits_.resize(num_bigits); + exp_ = 0; + } + + int num_bigits() const { return static_cast(bigits_.size()) + exp_; } + + FMT_NOINLINE bigint& operator<<=(int shift) { + FMT_ASSERT(shift >= 0, ""); + exp_ += shift / bigit_bits; + shift %= bigit_bits; + if (shift == 0) return *this; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + bigit c = bigits_[i] >> (bigit_bits - shift); + bigits_[i] = (bigits_[i] << shift) + carry; + carry = c; + } + if (carry != 0) bigits_.push_back(carry); + return *this; + } + + template bigint& operator*=(Int value) { + FMT_ASSERT(value > 0, ""); + multiply(uint32_or_64_or_128_t(value)); + return *this; + } + + friend int compare(const bigint& lhs, const bigint& rhs) { + int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); + if (num_lhs_bigits != num_rhs_bigits) + return num_lhs_bigits > num_rhs_bigits ? 1 : -1; + int i = static_cast(lhs.bigits_.size()) - 1; + int j = static_cast(rhs.bigits_.size()) - 1; + int end = i - j; + if (end < 0) end = 0; + for (; i >= end; --i, --j) { + bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; + if (lhs_bigit != rhs_bigit) return lhs_bigit > rhs_bigit ? 1 : -1; + } + if (i != j) return i > j ? 1 : -1; + return 0; + } + + // Returns compare(lhs1 + lhs2, rhs). + friend int add_compare(const bigint& lhs1, const bigint& lhs2, + const bigint& rhs) { + int max_lhs_bigits = (std::max)(lhs1.num_bigits(), lhs2.num_bigits()); + int num_rhs_bigits = rhs.num_bigits(); + if (max_lhs_bigits + 1 < num_rhs_bigits) return -1; + if (max_lhs_bigits > num_rhs_bigits) return 1; + auto get_bigit = [](const bigint& n, int i) -> bigit { + return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; + }; + double_bigit borrow = 0; + int min_exp = (std::min)((std::min)(lhs1.exp_, lhs2.exp_), rhs.exp_); + for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { + double_bigit sum = + static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); + bigit rhs_bigit = get_bigit(rhs, i); + if (sum > rhs_bigit + borrow) return 1; + borrow = rhs_bigit + borrow - sum; + if (borrow > 1) return -1; + borrow <<= bigit_bits; + } + return borrow != 0 ? -1 : 0; + } + + // Assigns pow(10, exp) to this bigint. + void assign_pow10(int exp) { + FMT_ASSERT(exp >= 0, ""); + if (exp == 0) return assign(1); + // Find the top bit. + int bitmask = 1; + while (exp >= bitmask) bitmask <<= 1; + bitmask >>= 1; + // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) by + // repeated squaring and multiplication. + assign(5); + bitmask >>= 1; + while (bitmask != 0) { + square(); + if ((exp & bitmask) != 0) *this *= 5; + bitmask >>= 1; + } + *this <<= exp; // Multiply by pow(2, exp) by shifting. + } + + void square() { + int num_bigits = static_cast(bigits_.size()); + int num_result_bigits = 2 * num_bigits; + basic_memory_buffer n(std::move(bigits_)); + bigits_.resize(to_unsigned(num_result_bigits)); + using accumulator_t = conditional_t; + auto sum = accumulator_t(); + for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { + // Compute bigit at position bigit_index of the result by adding + // cross-product terms n[i] * n[j] such that i + j == bigit_index. + for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { + // Most terms are multiplied twice which can be optimized in the future. + sum += static_cast(n[i]) * n[j]; + } + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; // Compute the carry. + } + // Do the same for the top half. + for (int bigit_index = num_bigits; bigit_index < num_result_bigits; + ++bigit_index) { + for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) + sum += static_cast(n[i++]) * n[j--]; + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; + } + remove_leading_zeros(); + exp_ *= 2; + } + + // If this bigint has a bigger exponent than other, adds trailing zero to make + // exponents equal. This simplifies some operations such as subtraction. + void align(const bigint& other) { + int exp_difference = exp_ - other.exp_; + if (exp_difference <= 0) return; + int num_bigits = static_cast(bigits_.size()); + bigits_.resize(to_unsigned(num_bigits + exp_difference)); + for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) + bigits_[j] = bigits_[i]; + std::uninitialized_fill_n(bigits_.data(), exp_difference, 0); + exp_ -= exp_difference; + } + + // Divides this bignum by divisor, assigning the remainder to this and + // returning the quotient. + int divmod_assign(const bigint& divisor) { + FMT_ASSERT(this != &divisor, ""); + if (compare(*this, divisor) < 0) return 0; + FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); + align(divisor); + int quotient = 0; + do { + subtract_aligned(divisor); + ++quotient; + } while (compare(*this, divisor) >= 0); + return quotient; + } +}; + +enum class round_direction { unknown, up, down }; + +// Given the divisor (normally a power of 10), the remainder = v % divisor for +// some number v and the error, returns whether v should be rounded up, down, or +// whether the rounding direction can't be determined due to error. +// error should be less than divisor / 2. +inline round_direction get_round_direction(uint64_t divisor, uint64_t remainder, + uint64_t error) { + FMT_ASSERT(remainder < divisor, ""); // divisor - remainder won't overflow. + FMT_ASSERT(error < divisor, ""); // divisor - error won't overflow. + FMT_ASSERT(error < divisor - error, ""); // error * 2 won't overflow. + // Round down if (remainder + error) * 2 <= divisor. + if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2) + return round_direction::down; + // Round up if (remainder - error) * 2 >= divisor. + if (remainder >= error && + remainder - error >= divisor - (remainder - error)) { + return round_direction::up; + } + return round_direction::unknown; +} + +namespace digits { +enum result { + more, // Generate more digits. + done, // Done generating digits. + error // Digit generation cancelled due to an error. +}; +} + +inline uint64_t power_of_10_64(int exp) { + static constexpr const uint64_t data[] = {1, FMT_POWERS_OF_10(1), + FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; + return data[exp]; +} + +// Generates output using the Grisu digit-gen algorithm. +// error: the size of the region (lower, upper) outside of which numbers +// definitely do not round to value (Delta in Grisu3). +template +FMT_INLINE digits::result grisu_gen_digits(fp value, uint64_t error, int& exp, + Handler& handler) { + const fp one(1ULL << -value.e, value.e); + // The integral part of scaled value (p1 in Grisu) = value / one. It cannot be + // zero because it contains a product of two 64-bit numbers with MSB set (due + // to normalization) - 1, shifted right by at most 60 bits. + auto integral = static_cast(value.f >> -one.e); + FMT_ASSERT(integral != 0, ""); + FMT_ASSERT(integral == value.f >> -one.e, ""); + // The fractional part of scaled value (p2 in Grisu) c = value % one. + uint64_t fractional = value.f & (one.f - 1); + exp = count_digits(integral); // kappa in Grisu. + // Divide by 10 to prevent overflow. + auto result = handler.on_start(power_of_10_64(exp - 1) << -one.e, + value.f / 10, error * 10, exp); + if (result != digits::more) return result; + // Generate digits for the integral part. This can produce up to 10 digits. + do { + uint32_t digit = 0; + auto divmod_integral = [&](uint32_t divisor) { + digit = integral / divisor; + integral %= divisor; + }; + // This optimization by Milo Yip reduces the number of integer divisions by + // one per iteration. + switch (exp) { + case 10: + divmod_integral(1000000000); + break; + case 9: + divmod_integral(100000000); + break; + case 8: + divmod_integral(10000000); + break; + case 7: + divmod_integral(1000000); + break; + case 6: + divmod_integral(100000); + break; + case 5: + divmod_integral(10000); + break; + case 4: + divmod_integral(1000); + break; + case 3: + divmod_integral(100); + break; + case 2: + divmod_integral(10); + break; + case 1: + digit = integral; + integral = 0; + break; + default: + FMT_ASSERT(false, "invalid number of digits"); + } + --exp; + auto remainder = (static_cast(integral) << -one.e) + fractional; + result = handler.on_digit(static_cast('0' + digit), + power_of_10_64(exp) << -one.e, remainder, error, + exp, true); + if (result != digits::more) return result; + } while (exp > 0); + // Generate digits for the fractional part. + for (;;) { + fractional *= 10; + error *= 10; + char digit = static_cast('0' + (fractional >> -one.e)); + fractional &= one.f - 1; + --exp; + result = handler.on_digit(digit, one.f, fractional, error, exp, false); + if (result != digits::more) return result; + } +} + +// The fixed precision digit handler. +struct fixed_handler { + char* buf; + int size; + int precision; + int exp10; + bool fixed; + + digits::result on_start(uint64_t divisor, uint64_t remainder, uint64_t error, + int& exp) { + // Non-fixed formats require at least one digit and no precision adjustment. + if (!fixed) return digits::more; + // Adjust fixed precision by exponent because it is relative to decimal + // point. + precision += exp + exp10; + // Check if precision is satisfied just by leading zeros, e.g. + // format("{:.2f}", 0.001) gives "0.00" without generating any digits. + if (precision > 0) return digits::more; + if (precision < 0) return digits::done; + auto dir = get_round_direction(divisor, remainder, error); + if (dir == round_direction::unknown) return digits::error; + buf[size++] = dir == round_direction::up ? '1' : '0'; + return digits::done; + } + + digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder, + uint64_t error, int, bool integral) { + FMT_ASSERT(remainder < divisor, ""); + buf[size++] = digit; + if (!integral && error >= remainder) return digits::error; + if (size < precision) return digits::more; + if (!integral) { + // Check if error * 2 < divisor with overflow prevention. + // The check is not needed for the integral part because error = 1 + // and divisor > (1 << 32) there. + if (error >= divisor || error >= divisor - error) return digits::error; + } else { + FMT_ASSERT(error == 1 && divisor > 2, ""); + } + auto dir = get_round_direction(divisor, remainder, error); + if (dir != round_direction::up) + return dir == round_direction::down ? digits::done : digits::error; + ++buf[size - 1]; + for (int i = size - 1; i > 0 && buf[i] > '9'; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] > '9') { + buf[0] = '1'; + if (fixed) + buf[size++] = '0'; + else + ++exp10; + } + return digits::done; + } +}; + +// A 128-bit integer type used internally, +struct uint128_wrapper { + uint128_wrapper() = default; + +#if FMT_USE_INT128 + uint128_t internal_; + + constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT + : internal_{static_cast(low) | + (static_cast(high) << 64)} {} + + constexpr uint128_wrapper(uint128_t u) : internal_{u} {} + + constexpr uint64_t high() const FMT_NOEXCEPT { + return uint64_t(internal_ >> 64); + } + constexpr uint64_t low() const FMT_NOEXCEPT { return uint64_t(internal_); } + + uint128_wrapper& operator+=(uint64_t n) FMT_NOEXCEPT { + internal_ += n; + return *this; + } +#else + uint64_t high_; + uint64_t low_; + + constexpr uint128_wrapper(uint64_t high, uint64_t low) FMT_NOEXCEPT + : high_{high}, + low_{low} {} + + constexpr uint64_t high() const FMT_NOEXCEPT { return high_; } + constexpr uint64_t low() const FMT_NOEXCEPT { return low_; } + + uint128_wrapper& operator+=(uint64_t n) FMT_NOEXCEPT { +# if defined(_MSC_VER) && defined(_M_X64) + unsigned char carry = _addcarry_u64(0, low_, n, &low_); + _addcarry_u64(carry, high_, 0, &high_); + return *this; +# else + uint64_t sum = low_ + n; + high_ += (sum < low_ ? 1 : 0); + low_ = sum; + return *this; +# endif + } +#endif +}; + +// Implementation of Dragonbox algorithm: https://github.com/jk-jeon/dragonbox. +namespace dragonbox { +// Computes 128-bit result of multiplication of two 64-bit unsigned integers. +inline uint128_wrapper umul128(uint64_t x, uint64_t y) FMT_NOEXCEPT { +#if FMT_USE_INT128 + return static_cast(x) * static_cast(y); +#elif defined(_MSC_VER) && defined(_M_X64) + uint128_wrapper result; + result.low_ = _umul128(x, y, &result.high_); + return result; +#else + const uint64_t mask = (uint64_t(1) << 32) - uint64_t(1); + + uint64_t a = x >> 32; + uint64_t b = x & mask; + uint64_t c = y >> 32; + uint64_t d = y & mask; + + uint64_t ac = a * c; + uint64_t bc = b * c; + uint64_t ad = a * d; + uint64_t bd = b * d; + + uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); + + return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), + (intermediate << 32) + (bd & mask)}; +#endif +} + +// Computes upper 64 bits of multiplication of two 64-bit unsigned integers. +inline uint64_t umul128_upper64(uint64_t x, uint64_t y) FMT_NOEXCEPT { +#if FMT_USE_INT128 + auto p = static_cast(x) * static_cast(y); + return static_cast(p >> 64); +#elif defined(_MSC_VER) && defined(_M_X64) + return __umulh(x, y); +#else + return umul128(x, y).high(); +#endif +} + +// Computes upper 64 bits of multiplication of a 64-bit unsigned integer and a +// 128-bit unsigned integer. +inline uint64_t umul192_upper64(uint64_t x, uint128_wrapper y) FMT_NOEXCEPT { + uint128_wrapper g0 = umul128(x, y.high()); + g0 += umul128_upper64(x, y.low()); + return g0.high(); +} + +// Computes upper 32 bits of multiplication of a 32-bit unsigned integer and a +// 64-bit unsigned integer. +inline uint32_t umul96_upper32(uint32_t x, uint64_t y) FMT_NOEXCEPT { + return static_cast(umul128_upper64(x, y)); +} + +// Computes middle 64 bits of multiplication of a 64-bit unsigned integer and a +// 128-bit unsigned integer. +inline uint64_t umul192_middle64(uint64_t x, uint128_wrapper y) FMT_NOEXCEPT { + uint64_t g01 = x * y.high(); + uint64_t g10 = umul128_upper64(x, y.low()); + return g01 + g10; +} + +// Computes lower 64 bits of multiplication of a 32-bit unsigned integer and a +// 64-bit unsigned integer. +inline uint64_t umul96_lower64(uint32_t x, uint64_t y) FMT_NOEXCEPT { + return x * y; +} + +// Computes floor(log10(pow(2, e))) for e in [-1700, 1700] using the method from +// https://fmt.dev/papers/Grisu-Exact.pdf#page=5, section 3.4. +inline int floor_log10_pow2(int e) FMT_NOEXCEPT { + FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); + const int shift = 22; + return (e * static_cast(data::log10_2_significand >> (64 - shift))) >> + shift; +} + +// Various fast log computations. +inline int floor_log2_pow10(int e) FMT_NOEXCEPT { + FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); + const uint64_t log2_10_integer_part = 3; + const uint64_t log2_10_fractional_digits = 0x5269e12f346e2bf9; + const int shift_amount = 19; + return (e * static_cast( + (log2_10_integer_part << shift_amount) | + (log2_10_fractional_digits >> (64 - shift_amount)))) >> + shift_amount; +} +inline int floor_log10_pow2_minus_log10_4_over_3(int e) FMT_NOEXCEPT { + FMT_ASSERT(e <= 1700 && e >= -1700, "too large exponent"); + const uint64_t log10_4_over_3_fractional_digits = 0x1ffbfc2bbc780375; + const int shift_amount = 22; + return (e * static_cast(data::log10_2_significand >> + (64 - shift_amount)) - + static_cast(log10_4_over_3_fractional_digits >> + (64 - shift_amount))) >> + shift_amount; +} + +// Returns true iff x is divisible by pow(2, exp). +inline bool divisible_by_power_of_2(uint32_t x, int exp) FMT_NOEXCEPT { + FMT_ASSERT(exp >= 1, ""); + FMT_ASSERT(x != 0, ""); +#ifdef FMT_BUILTIN_CTZ + return FMT_BUILTIN_CTZ(x) >= exp; +#else + return exp < num_bits() && x == ((x >> exp) << exp); +#endif +} +inline bool divisible_by_power_of_2(uint64_t x, int exp) FMT_NOEXCEPT { + FMT_ASSERT(exp >= 1, ""); + FMT_ASSERT(x != 0, ""); +#ifdef FMT_BUILTIN_CTZLL + return FMT_BUILTIN_CTZLL(x) >= exp; +#else + return exp < num_bits() && x == ((x >> exp) << exp); +#endif +} + +// Table entry type for divisibility test. +template struct divtest_table_entry { + T mod_inv; + T max_quotient; +}; + +// Returns true iff x is divisible by pow(5, exp). +inline bool divisible_by_power_of_5(uint32_t x, int exp) FMT_NOEXCEPT { + FMT_ASSERT(exp <= 10, "too large exponent"); + static constexpr const divtest_table_entry divtest_table[] = { + {0x00000001, 0xffffffff}, {0xcccccccd, 0x33333333}, + {0xc28f5c29, 0x0a3d70a3}, {0x26e978d5, 0x020c49ba}, + {0x3afb7e91, 0x0068db8b}, {0x0bcbe61d, 0x0014f8b5}, + {0x68c26139, 0x000431bd}, {0xae8d46a5, 0x0000d6bf}, + {0x22e90e21, 0x00002af3}, {0x3a2e9c6d, 0x00000897}, + {0x3ed61f49, 0x000001b7}}; + return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; +} +inline bool divisible_by_power_of_5(uint64_t x, int exp) FMT_NOEXCEPT { + FMT_ASSERT(exp <= 23, "too large exponent"); + static constexpr const divtest_table_entry divtest_table[] = { + {0x0000000000000001, 0xffffffffffffffff}, + {0xcccccccccccccccd, 0x3333333333333333}, + {0x8f5c28f5c28f5c29, 0x0a3d70a3d70a3d70}, + {0x1cac083126e978d5, 0x020c49ba5e353f7c}, + {0xd288ce703afb7e91, 0x0068db8bac710cb2}, + {0x5d4e8fb00bcbe61d, 0x0014f8b588e368f0}, + {0x790fb65668c26139, 0x000431bde82d7b63}, + {0xe5032477ae8d46a5, 0x0000d6bf94d5e57a}, + {0xc767074b22e90e21, 0x00002af31dc46118}, + {0x8e47ce423a2e9c6d, 0x0000089705f4136b}, + {0x4fa7f60d3ed61f49, 0x000001b7cdfd9d7b}, + {0x0fee64690c913975, 0x00000057f5ff85e5}, + {0x3662e0e1cf503eb1, 0x000000119799812d}, + {0xa47a2cf9f6433fbd, 0x0000000384b84d09}, + {0x54186f653140a659, 0x00000000b424dc35}, + {0x7738164770402145, 0x0000000024075f3d}, + {0xe4a4d1417cd9a041, 0x000000000734aca5}, + {0xc75429d9e5c5200d, 0x000000000170ef54}, + {0xc1773b91fac10669, 0x000000000049c977}, + {0x26b172506559ce15, 0x00000000000ec1e4}, + {0xd489e3a9addec2d1, 0x000000000002f394}, + {0x90e860bb892c8d5d, 0x000000000000971d}, + {0x502e79bf1b6f4f79, 0x0000000000001e39}, + {0xdcd618596be30fe5, 0x000000000000060b}}; + return x * divtest_table[exp].mod_inv <= divtest_table[exp].max_quotient; +} + +// Replaces n by floor(n / pow(5, N)) returning true if and only if n is +// divisible by pow(5, N). +// Precondition: n <= 2 * pow(5, N + 1). +template +bool check_divisibility_and_divide_by_pow5(uint32_t& n) FMT_NOEXCEPT { + static constexpr struct { + uint32_t magic_number; + int bits_for_comparison; + uint32_t threshold; + int shift_amount; + } infos[] = {{0xcccd, 16, 0x3333, 18}, {0xa429, 8, 0x0a, 20}}; + constexpr auto info = infos[N - 1]; + n *= info.magic_number; + const uint32_t comparison_mask = (1u << info.bits_for_comparison) - 1; + bool result = (n & comparison_mask) <= info.threshold; + n >>= info.shift_amount; + return result; +} + +// Computes floor(n / pow(10, N)) for small n and N. +// Precondition: n <= pow(10, N + 1). +template uint32_t small_division_by_pow10(uint32_t n) FMT_NOEXCEPT { + static constexpr struct { + uint32_t magic_number; + int shift_amount; + uint32_t divisor_times_10; + } infos[] = {{0xcccd, 19, 100}, {0xa3d8, 22, 1000}}; + constexpr auto info = infos[N - 1]; + FMT_ASSERT(n <= info.divisor_times_10, "n is too large"); + return n * info.magic_number >> info.shift_amount; +} + +// Computes floor(n / 10^(kappa + 1)) (float) +inline uint32_t divide_by_10_to_kappa_plus_1(uint32_t n) FMT_NOEXCEPT { + return n / float_info::big_divisor; +} +// Computes floor(n / 10^(kappa + 1)) (double) +inline uint64_t divide_by_10_to_kappa_plus_1(uint64_t n) FMT_NOEXCEPT { + return umul128_upper64(n, 0x83126e978d4fdf3c) >> 9; +} + +// Various subroutines using pow10 cache +template struct cache_accessor; + +template <> struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint64_t; + + static uint64_t get_cached_power(int k) FMT_NOEXCEPT { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, + "k is out of range"); + constexpr const uint64_t pow10_significands[] = { + 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, + 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, 0xc612062576589ddb, + 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, + 0xf1c90080baf72cb2, 0x971da05074da7bef, 0xbce5086492111aeb, + 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, + 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, + 0xe12e13424bb40e14, 0x8cbccc096f5088cc, 0xafebff0bcb24aaff, + 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, + 0xd6bf94d5e57a42bd, 0x8637bd05af6c69b6, 0xa7c5ac471b478424, + 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, + 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, + 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, + 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, + 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, + 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, + 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, + 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, 0x8ac7230489e80000, + 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, + 0xa968163f0a57b400, 0xd3c21bcecceda100, 0x84595161401484a0, + 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940984, + 0xa18f07d736b90be5, 0xc9f2c9cd04674ede, 0xfc6f7c4045812296, + 0x9dc5ada82b70b59d, 0xc5371912364ce305, 0xf684df56c3e01bc6, + 0x9a130b963a6c115c, 0xc097ce7bc90715b3, 0xf0bdc21abb48db20, + 0x96769950b50d88f4, 0xbc143fa4e250eb31, 0xeb194f8e1ae525fd, + 0x92efd1b8d0cf37be, 0xb7abc627050305ad, 0xe596b7b0c643c719, + 0x8f7e32ce7bea5c6f, 0xb35dbf821ae4f38b, 0xe0352f62a19e306e}; + return pow10_significands[k - float_info::min_k]; + } + + static carrier_uint compute_mul(carrier_uint u, + const cache_entry_type& cache) FMT_NOEXCEPT { + return umul96_upper32(u, cache); + } + + static uint32_t compute_delta(const cache_entry_type& cache, + int beta_minus_1) FMT_NOEXCEPT { + return static_cast(cache >> (64 - 1 - beta_minus_1)); + } + + static bool compute_mul_parity(carrier_uint two_f, + const cache_entry_type& cache, + int beta_minus_1) FMT_NOEXCEPT { + FMT_ASSERT(beta_minus_1 >= 1, ""); + FMT_ASSERT(beta_minus_1 < 64, ""); + + return ((umul96_lower64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return static_cast( + (cache - (cache >> (float_info::significand_bits + 2))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1)); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return static_cast( + (cache + (cache >> (float_info::significand_bits + 1))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1)); + } + + static carrier_uint compute_round_up_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return (static_cast( + cache >> + (64 - float_info::significand_bits - 2 - beta_minus_1)) + + 1) / + 2; + } +}; + +template <> struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint128_wrapper; + + static uint128_wrapper get_cached_power(int k) FMT_NOEXCEPT { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, + "k is out of range"); + + static constexpr const uint128_wrapper pow10_significands[] = { +#if FMT_USE_FULL_CACHE_DRAGONBOX + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0x9faacf3df73609b1, 0x77b191618c54e9ad}, + {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, + {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, + {0x9becce62836ac577, 0x4ee367f9430aec33}, + {0xc2e801fb244576d5, 0x229c41f793cda740}, + {0xf3a20279ed56d48a, 0x6b43527578c11110}, + {0x9845418c345644d6, 0x830a13896b78aaaa}, + {0xbe5691ef416bd60c, 0x23cc986bc656d554}, + {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, + {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, + {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, + {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, + {0x91376c36d99995be, 0x23100809b9c21fa2}, + {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, + {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, + {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, + {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, + {0xdd95317f31c7fa1d, 0x40405643d711d584}, + {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, + {0xad1c8eab5ee43b66, 0xda3243650005eed0}, + {0xd863b256369d4a40, 0x90bed43e40076a83}, + {0x873e4f75e2224e68, 0x5a7744a6e804a292}, + {0xa90de3535aaae202, 0x711515d0a205cb37}, + {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, + {0x8412d9991ed58091, 0xe858790afe9486c3}, + {0xa5178fff668ae0b6, 0x626e974dbe39a873}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, + {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, + {0xc987434744ac874e, 0xa327ffb266b56221}, + {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, + {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, + {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, + {0xf6019da07f549b2b, 0x7e2a53a146606a49}, + {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, + {0xc0314325637a1939, 0xfa911155fefb5309}, + {0xf03d93eebc589f88, 0x793555ab7eba27cb}, + {0x96267c7535b763b5, 0x4bc1558b2f3458df}, + {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, + {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, + {0x92a1958a7675175f, 0x0bfacd89ec191eca}, + {0xb749faed14125d36, 0xcef980ec671f667c}, + {0xe51c79a85916f484, 0x82b7e12780e7401b}, + {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, + {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, + {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, + {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, + {0xaecc49914078536d, 0x58fae9f773886e19}, + {0xda7f5bf590966848, 0xaf39a475506a899f}, + {0x888f99797a5e012d, 0x6d8406c952429604}, + {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, + {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, + {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0xd0601d8efc57b08b, 0xf13b94daf124da27}, + {0x823c12795db6ce57, 0x76c53d08d6b70859}, + {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, + {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, + {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, + {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, + {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, + {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, + {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, + {0xc21094364dfb5636, 0x985915fc12f542e5}, + {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, + {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, + {0xbd8430bd08277231, 0x50c6ff782a838354}, + {0xece53cec4a314ebd, 0xa4f8bf5635246429}, + {0x940f4613ae5ed136, 0x871b7795e136be9a}, + {0xb913179899f68584, 0x28e2557b59846e40}, + {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, + {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, + {0xb4bca50b065abe63, 0x0fed077a756b53aa}, + {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, + {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, + {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, + {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, + {0x89e42caaf9491b60, 0xf41686c49db57245}, + {0xac5d37d5b79b6239, 0x311c2875c522ced6}, + {0xd77485cb25823ac7, 0x7d633293366b828c}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, + {0xd267caa862a12d66, 0xd072df63c324fd7c}, + {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, + {0xa46116538d0deb78, 0x52d9be85f074e609}, + {0xcd795be870516656, 0x67902e276c921f8c}, + {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, + {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, + {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, + {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, + {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, + {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, + {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, + {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, + {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, + {0xef340a98172aace4, 0x86fb897116c87c35}, + {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, + {0xbae0a846d2195712, 0x8974836059cca10a}, + {0xe998d258869facd7, 0x2bd1a438703fc94c}, + {0x91ff83775423cc06, 0x7b6306a34627ddd0}, + {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, + {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, + {0x8e938662882af53e, 0x547eb47b7282ee9d}, + {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, + {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, + {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, + {0xae0b158b4738705e, 0x9624ab50b148d446}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, + {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, + {0xd47487cc8470652b, 0x7647c32000696720}, + {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, + {0xa5fb0a17c777cf09, 0xf468107100525891}, + {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, + {0x81ac1fe293d599bf, 0xc6f14cd848405531}, + {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, + {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, + {0xfd442e4688bd304a, 0x908f4a166d1da664}, + {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, + {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, + {0xf7549530e188c128, 0xd12bee59e68ef47d}, + {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, + {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, + {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, + {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, + {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, + {0xebdf661791d60f56, 0x111b495b3464ad22}, + {0x936b9fcebb25c995, 0xcab10dd900beec35}, + {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, + {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, + {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, + {0xb3f4e093db73a093, 0x59ed216765690f57}, + {0xe0f218b8d25088b8, 0x306869c13ec3532d}, + {0x8c974f7383725573, 0x1e414218c73a13fc}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, + {0x894bc396ce5da772, 0x6b8bba8c328eb784}, + {0xab9eb47c81f5114f, 0x066ea92f3f326565}, + {0xd686619ba27255a2, 0xc80a537b0efefebe}, + {0x8613fd0145877585, 0xbd06742ce95f5f37}, + {0xa798fc4196e952e7, 0x2c48113823b73705}, + {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, + {0x82ef85133de648c4, 0x9a984d73dbe722fc}, + {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, + {0xcc963fee10b7d1b3, 0x318df905079926a9}, + {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, + {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, + {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, + {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, + {0x9c1661a651213e2d, 0x06bea10ca65c084f}, + {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, + {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, + {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, + {0xbe89523386091465, 0xf6bbb397f1135824}, + {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, + {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, + {0xba121a4650e4ddeb, 0x92f34d62616ce414}, + {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, + {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, + {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, + {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, + {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, + {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, + {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, + {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, + {0x87625f056c7c4a8b, 0x11471cd764ad4973}, + {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, + {0xd389b47879823479, 0x4aff1d108d4ec2c4}, + {0x843610cb4bf160cb, 0xcedf722a585139bb}, + {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, + {0xce947a3da6a9273e, 0x733d226229feea33}, + {0x811ccc668829b887, 0x0806357d5a3f5260}, + {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, + {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, + {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, + {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, + {0xc5029163f384a931, 0x0a9e795e65d4df12}, + {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, + {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, + {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, + {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, + {0x964e858c91ba2655, 0x3a6a07f8d510f870}, + {0xbbe226efb628afea, 0x890489f70a55368c}, + {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, + {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, + {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, + {0xb32df8e9f3546564, 0x47939822dc96abfa}, + {0xdff9772470297ebd, 0x59787e2b93bc56f8}, + {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, + {0xaefae51477a06b03, 0xede622920b6b23f2}, + {0xdab99e59958885c4, 0xe95fab368e45ecee}, + {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, + {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, + {0xd59944a37c0752a2, 0x4be76d3346f04960}, + {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, + {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, + {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, + {0x825ecc24c873782f, 0x8ed400668c0c28c9}, + {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, + {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, + {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, + {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, + {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, + {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, + {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, + {0xc24452da229b021b, 0xfbe85badce996169}, + {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, + {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, + {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, + {0xed246723473e3813, 0x290123e9aab23b69}, + {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, + {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, + {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, + {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, + {0x8d590723948a535f, 0x579c487e5a38ad0f}, + {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, + {0xdcdb1b2798182244, 0xf8e431456cf88e66}, + {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, + {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, + {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, + {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, + {0xa87fea27a539e9a5, 0x3f2398d747b36225}, + {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, + {0x83a3eeeef9153e89, 0x1953cf68300424ad}, + {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, + {0xcdb02555653131b6, 0x3792f412cb06794e}, + {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, + {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, + {0xc8de047564d20a8b, 0xf245825a5a445276}, + {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, + {0x9ced737bb6c4183d, 0x55464dd69685606c}, + {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, + {0xf53304714d9265df, 0xd53dd99f4b3066a9}, + {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, + {0xbf8fdb78849a5f96, 0xde98520472bdd034}, + {0xef73d256a5c0f77c, 0x963e66858f6d4441}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xbb127c53b17ec159, 0x5560c018580d5d53}, + {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, + {0x9226712162ab070d, 0xcab3961304ca70e9}, + {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, + {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, + {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, + {0xb267ed1940f1c61c, 0x55f038b237591ed4}, + {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, + {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, + {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, + {0xd9c7dced53c72255, 0x96e7bd358c904a22}, + {0x881cea14545c7575, 0x7e50d64177da2e55}, + {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, + {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, + {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, + {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, + {0xcfb11ead453994ba, 0x67de18eda5814af3}, + {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, + {0xa2425ff75e14fc31, 0xa1258379a94d028e}, + {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, + {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, + {0x9e74d1b791e07e48, 0x775ea264cf55347e}, + {0xc612062576589dda, 0x95364afe032a819e}, + {0xf79687aed3eec551, 0x3a83ddbd83f52205}, + {0x9abe14cd44753b52, 0xc4926a9672793543}, + {0xc16d9a0095928a27, 0x75b7053c0f178294}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, + {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, + {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, + {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, + {0xb877aa3236a4b449, 0x09befeb9fad487c3}, + {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, + {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, + {0xb424dc35095cd80f, 0x538484c19ef38c95}, + {0xe12e13424bb40e13, 0x2865a5f206b06fba}, + {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, + {0xafebff0bcb24aafe, 0xf78f69a51539d749}, + {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, + {0x89705f4136b4a597, 0x31680a88f8953031}, + {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, + {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, + {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, + {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, + {0xd1b71758e219652b, 0xd3c36113404ea4a9}, + {0x83126e978d4fdf3b, 0x645a1cac083126ea}, + {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, + {0xcccccccccccccccc, 0xcccccccccccccccd}, + {0x8000000000000000, 0x0000000000000000}, + {0xa000000000000000, 0x0000000000000000}, + {0xc800000000000000, 0x0000000000000000}, + {0xfa00000000000000, 0x0000000000000000}, + {0x9c40000000000000, 0x0000000000000000}, + {0xc350000000000000, 0x0000000000000000}, + {0xf424000000000000, 0x0000000000000000}, + {0x9896800000000000, 0x0000000000000000}, + {0xbebc200000000000, 0x0000000000000000}, + {0xee6b280000000000, 0x0000000000000000}, + {0x9502f90000000000, 0x0000000000000000}, + {0xba43b74000000000, 0x0000000000000000}, + {0xe8d4a51000000000, 0x0000000000000000}, + {0x9184e72a00000000, 0x0000000000000000}, + {0xb5e620f480000000, 0x0000000000000000}, + {0xe35fa931a0000000, 0x0000000000000000}, + {0x8e1bc9bf04000000, 0x0000000000000000}, + {0xb1a2bc2ec5000000, 0x0000000000000000}, + {0xde0b6b3a76400000, 0x0000000000000000}, + {0x8ac7230489e80000, 0x0000000000000000}, + {0xad78ebc5ac620000, 0x0000000000000000}, + {0xd8d726b7177a8000, 0x0000000000000000}, + {0x878678326eac9000, 0x0000000000000000}, + {0xa968163f0a57b400, 0x0000000000000000}, + {0xd3c21bcecceda100, 0x0000000000000000}, + {0x84595161401484a0, 0x0000000000000000}, + {0xa56fa5b99019a5c8, 0x0000000000000000}, + {0xcecb8f27f4200f3a, 0x0000000000000000}, + {0x813f3978f8940984, 0x4000000000000000}, + {0xa18f07d736b90be5, 0x5000000000000000}, + {0xc9f2c9cd04674ede, 0xa400000000000000}, + {0xfc6f7c4045812296, 0x4d00000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xc5371912364ce305, 0x6c28000000000000}, + {0xf684df56c3e01bc6, 0xc732000000000000}, + {0x9a130b963a6c115c, 0x3c7f400000000000}, + {0xc097ce7bc90715b3, 0x4b9f100000000000}, + {0xf0bdc21abb48db20, 0x1e86d40000000000}, + {0x96769950b50d88f4, 0x1314448000000000}, + {0xbc143fa4e250eb31, 0x17d955a000000000}, + {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, + {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, + {0xb7abc627050305ad, 0xf14a3d9e40000000}, + {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, + {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, + {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, + {0xe0352f62a19e306e, 0xd50b2037ad200000}, + {0x8c213d9da502de45, 0x4526f422cc340000}, + {0xaf298d050e4395d6, 0x9670b12b7f410000}, + {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, + {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, + {0xab0e93b6efee0053, 0x8eea0d047a457a00}, + {0xd5d238a4abe98068, 0x72a4904598d6d880}, + {0x85a36366eb71f041, 0x47a6da2b7f864750}, + {0xa70c3c40a64e6c51, 0x999090b65f67d924}, + {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, + {0x82818f1281ed449f, 0xbff8f10e7a8921a4}, + {0xa321f2d7226895c7, 0xaff72d52192b6a0d}, + {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490}, + {0xfee50b7025c36a08, 0x02f236d04753d5b4}, + {0x9f4f2726179a2245, 0x01d762422c946590}, + {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5}, + {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2}, + {0x9b934c3b330c8577, 0x63cc55f49f88eb2f}, + {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb}, + {0xf316271c7fc3908a, 0x8bef464e3945ef7a}, + {0x97edd871cfda3a56, 0x97758bf0e3cbb5ac}, + {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317}, + {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd}, + {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a}, + {0xb975d6b6ee39e436, 0xb3e2fd538e122b44}, + {0xe7d34c64a9c85d44, 0x60dbbca87196b616}, + {0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd}, + {0xb51d13aea4a488dd, 0x6babab6398bdbe41}, + {0xe264589a4dcdab14, 0xc696963c7eed2dd1}, + {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2}, + {0xb0de65388cc8ada8, 0x3b25a55f43294bcb}, + {0xdd15fe86affad912, 0x49ef0eb713f39ebe}, + {0x8a2dbf142dfcc7ab, 0x6e3569326c784337}, + {0xacb92ed9397bf996, 0x49c2c37f07965404}, + {0xd7e77a8f87daf7fb, 0xdc33745ec97be906}, + {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3}, + {0xa8acd7c0222311bc, 0xc40832ea0d68ce0c}, + {0xd2d80db02aabd62b, 0xf50a3fa490c30190}, + {0x83c7088e1aab65db, 0x792667c6da79e0fa}, + {0xa4b8cab1a1563f52, 0x577001b891185938}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, + {0x80b05e5ac60b6178, 0x544f8158315b05b4}, + {0xa0dc75f1778e39d6, 0x696361ae3db1c721}, + {0xc913936dd571c84c, 0x03bc3a19cd1e38e9}, + {0xfb5878494ace3a5f, 0x04ab48a04065c723}, + {0x9d174b2dcec0e47b, 0x62eb0d64283f9c76}, + {0xc45d1df942711d9a, 0x3ba5d0bd324f8394}, + {0xf5746577930d6500, 0xca8f44ec7ee36479}, + {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb}, + {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e}, + {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e}, + {0x95d04aee3b80ece5, 0xbba1f1d158724a12}, + {0xbb445da9ca61281f, 0x2a8a6e45ae8edc97}, + {0xea1575143cf97226, 0xf52d09d71a3293bd}, + {0x924d692ca61be758, 0x593c2626705f9c56}, + {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c}, + {0xe498f455c38b997a, 0x0b6dfb9c0f956447}, + {0x8edf98b59a373fec, 0x4724bd4189bd5eac}, + {0xb2977ee300c50fe7, 0x58edec91ec2cb657}, + {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed}, + {0x8b865b215899f46c, 0xbd79e0d20082ee74}, + {0xae67f1e9aec07187, 0xecd8590680a3aa11}, + {0xda01ee641a708de9, 0xe80e6f4820cc9495}, + {0x884134fe908658b2, 0x3109058d147fdcdd}, + {0xaa51823e34a7eede, 0xbd4b46f0599fd415}, + {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a}, + {0x850fadc09923329e, 0x03e2cf6bc604ddb0}, + {0xa6539930bf6bff45, 0x84db8346b786151c}, + {0xcfe87f7cef46ff16, 0xe612641865679a63}, + {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e}, + {0xa26da3999aef7749, 0xe3be5e330f38f09d}, + {0xcb090c8001ab551c, 0x5cadf5bfd3072cc5}, + {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6}, + {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa}, + {0xc646d63501a1511d, 0xb281e1fd541501b8}, + {0xf7d88bc24209a565, 0x1f225a7ca91a4226}, + {0x9ae757596946075f, 0x3375788de9b06958}, + {0xc1a12d2fc3978937, 0x0052d6b1641c83ae}, + {0xf209787bb47d6b84, 0xc0678c5dbd23a49a}, + {0x9745eb4d50ce6332, 0xf840b7ba963646e0}, + {0xbd176620a501fbff, 0xb650e5a93bc3d898}, + {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe}, + {0x93ba47c980e98cdf, 0xc66f336c36b10137}, + {0xb8a8d9bbe123f017, 0xb80b0047445d4184}, + {0xe6d3102ad96cec1d, 0xa60dc059157491e5}, + {0x9043ea1ac7e41392, 0x87c89837ad68db2f}, + {0xb454e4a179dd1877, 0x29babe4598c311fb}, + {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a}, + {0x8ce2529e2734bb1d, 0x1899e4a65f58660c}, + {0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f}, + {0xdc21a1171d42645d, 0x76707543f4fa1f73}, + {0x899504ae72497eba, 0x6a06494a791c53a8}, + {0xabfa45da0edbde69, 0x0487db9d17636892}, + {0xd6f8d7509292d603, 0x45a9d2845d3c42b6}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, + {0xa7f26836f282b732, 0x8e6cac7768d7141e}, + {0xd1ef0244af2364ff, 0x3207d795430cd926}, + {0x8335616aed761f1f, 0x7f44e6bd49e807b8}, + {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6}, + {0xcd036837130890a1, 0x36dba887c37a8c0f}, + {0x802221226be55a64, 0xc2494954da2c9789}, + {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c}, + {0xc83553c5c8965d3d, 0x6f92829494e5acc7}, + {0xfa42a8b73abbf48c, 0xcb772339ba1f17f9}, + {0x9c69a97284b578d7, 0xff2a760414536efb}, + {0xc38413cf25e2d70d, 0xfef5138519684aba}, + {0xf46518c2ef5b8cd1, 0x7eb258665fc25d69}, + {0x98bf2f79d5993802, 0xef2f773ffbd97a61}, + {0xbeeefb584aff8603, 0xaafb550ffacfd8fa}, + {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38}, + {0x952ab45cfa97a0b2, 0xdd945a747bf26183}, + {0xba756174393d88df, 0x94f971119aeef9e4}, + {0xe912b9d1478ceb17, 0x7a37cd5601aab85d}, + {0x91abb422ccb812ee, 0xac62e055c10ab33a}, + {0xb616a12b7fe617aa, 0x577b986b314d6009}, + {0xe39c49765fdf9d94, 0xed5a7e85fda0b80b}, + {0x8e41ade9fbebc27d, 0x14588f13be847307}, + {0xb1d219647ae6b31c, 0x596eb2d8ae258fc8}, + {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb}, + {0x8aec23d680043bee, 0x25de7bb9480d5854}, + {0xada72ccc20054ae9, 0xaf561aa79a10ae6a}, + {0xd910f7ff28069da4, 0x1b2ba1518094da04}, + {0x87aa9aff79042286, 0x90fb44d2f05d0842}, + {0xa99541bf57452b28, 0x353a1607ac744a53}, + {0xd3fa922f2d1675f2, 0x42889b8997915ce8}, + {0x847c9b5d7c2e09b7, 0x69956135febada11}, + {0xa59bc234db398c25, 0x43fab9837e699095}, + {0xcf02b2c21207ef2e, 0x94f967e45e03f4bb}, + {0x8161afb94b44f57d, 0x1d1be0eebac278f5}, + {0xa1ba1ba79e1632dc, 0x6462d92a69731732}, + {0xca28a291859bbf93, 0x7d7b8f7503cfdcfe}, + {0xfcb2cb35e702af78, 0x5cda735244c3d43e}, + {0x9defbf01b061adab, 0x3a0888136afa64a7}, + {0xc56baec21c7a1916, 0x088aaa1845b8fdd0}, + {0xf6c69a72a3989f5b, 0x8aad549e57273d45}, + {0x9a3c2087a63f6399, 0x36ac54e2f678864b}, + {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd}, + {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5}, + {0x969eb7c47859e743, 0x9f644ae5a4b1b325}, + {0xbc4665b596706114, 0x873d5d9f0dde1fee}, + {0xeb57ff22fc0c7959, 0xa90cb506d155a7ea}, + {0x9316ff75dd87cbd8, 0x09a7f12442d588f2}, + {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb2f}, + {0xe5d3ef282a242e81, 0x8f1668c8a86da5fa}, + {0x8fa475791a569d10, 0xf96e017d694487bc}, + {0xb38d92d760ec4455, 0x37c981dcc395a9ac}, + {0xe070f78d3927556a, 0x85bbe253f47b1417}, + {0x8c469ab843b89562, 0x93956d7478ccec8e}, + {0xaf58416654a6babb, 0x387ac8d1970027b2}, + {0xdb2e51bfe9d0696a, 0x06997b05fcc0319e}, + {0x88fcf317f22241e2, 0x441fece3bdf81f03}, + {0xab3c2fddeeaad25a, 0xd527e81cad7626c3}, + {0xd60b3bd56a5586f1, 0x8a71e223d8d3b074}, + {0x85c7056562757456, 0xf6872d5667844e49}, + {0xa738c6bebb12d16c, 0xb428f8ac016561db}, + {0xd106f86e69d785c7, 0xe13336d701beba52}, + {0x82a45b450226b39c, 0xecc0024661173473}, + {0xa34d721642b06084, 0x27f002d7f95d0190}, + {0xcc20ce9bd35c78a5, 0x31ec038df7b441f4}, + {0xff290242c83396ce, 0x7e67047175a15271}, + {0x9f79a169bd203e41, 0x0f0062c6e984d386}, + {0xc75809c42c684dd1, 0x52c07b78a3e60868}, + {0xf92e0c3537826145, 0xa7709a56ccdf8a82}, + {0x9bbcc7a142b17ccb, 0x88a66076400bb691}, + {0xc2abf989935ddbfe, 0x6acff893d00ea435}, + {0xf356f7ebf83552fe, 0x0583f6b8c4124d43}, + {0x98165af37b2153de, 0xc3727a337a8b704a}, + {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c}, + {0xeda2ee1c7064130c, 0x1162def06f79df73}, + {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8}, + {0xb9a74a0637ce2ee1, 0x6d953e2bd7173692}, + {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437}, + {0x910ab1d4db9914a0, 0x1d9c9892400a22a2}, + {0xb54d5e4a127f59c8, 0x2503beb6d00cab4b}, + {0xe2a0b5dc971f303a, 0x2e44ae64840fd61d}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, + {0xb10d8e1456105dad, 0x7425a83e872c5f47}, + {0xdd50f1996b947518, 0xd12f124e28f77719}, + {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f}, + {0xace73cbfdc0bfb7b, 0x636cc64d1001550b}, + {0xd8210befd30efa5a, 0x3c47f7e05401aa4e}, + {0x8714a775e3e95c78, 0x65acfaec34810a71}, + {0xa8d9d1535ce3b396, 0x7f1839a741a14d0d}, + {0xd31045a8341ca07c, 0x1ede48111209a050}, + {0x83ea2b892091e44d, 0x934aed0aab460432}, + {0xa4e4b66b68b65d60, 0xf81da84d5617853f}, + {0xce1de40642e3f4b9, 0x36251260ab9d668e}, + {0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019}, + {0xa1075a24e4421730, 0xb24cf65b8612f81f}, + {0xc94930ae1d529cfc, 0xdee033f26797b627}, + {0xfb9b7cd9a4a7443c, 0x169840ef017da3b1}, + {0x9d412e0806e88aa5, 0x8e1f289560ee864e}, + {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2}, + {0xf5b5d7ec8acb58a2, 0xae10af696774b1db}, + {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29}, + {0xbff610b0cc6edd3f, 0x17fd090a58d32af3}, + {0xeff394dcff8a948e, 0xddfc4b4cef07f5b0}, + {0x95f83d0a1fb69cd9, 0x4abdaf101564f98e}, + {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1}, + {0xea53df5fd18d5513, 0x84c86189216dc5ed}, + {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4}, + {0xb7118682dbb66a77, 0x3fbc8c33221dc2a1}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, + {0x8f05b1163ba6832d, 0x29cb4d87f2a7400e}, + {0xb2c71d5bca9023f8, 0x743e20e9ef511012}, + {0xdf78e4b2bd342cf6, 0x914da9246b255416}, + {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e}, + {0xae9672aba3d0c320, 0xa184ac2473b529b1}, + {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e}, + {0x8865899617fb1871, 0x7e2fa67c7a658892}, + {0xaa7eebfb9df9de8d, 0xddbb901b98feeab7}, + {0xd51ea6fa85785631, 0x552a74227f3ea565}, + {0x8533285c936b35de, 0xd53a88958f87275f}, + {0xa67ff273b8460356, 0x8a892abaf368f137}, + {0xd01fef10a657842c, 0x2d2b7569b0432d85}, + {0x8213f56a67f6b29b, 0x9c3b29620e29fc73}, + {0xa298f2c501f45f42, 0x8349f3ba91b47b8f}, + {0xcb3f2f7642717713, 0x241c70a936219a73}, + {0xfe0efb53d30dd4d7, 0xed238cd383aa0110}, + {0x9ec95d1463e8a506, 0xf4363804324a40aa}, + {0xc67bb4597ce2ce48, 0xb143c6053edcd0d5}, + {0xf81aa16fdc1b81da, 0xdd94b7868e94050a}, + {0x9b10a4e5e9913128, 0xca7cf2b4191c8326}, + {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0}, + {0xf24a01a73cf2dccf, 0xbc633b39673c8cec}, + {0x976e41088617ca01, 0xd5be0503e085d813}, + {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18}, + {0xec9c459d51852ba2, 0xddf8e7d60ed1219e}, + {0x93e1ab8252f33b45, 0xcabb90e5c942b503}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, + {0xe7109bfba19c0c9d, 0x0cc512670a783ad4}, + {0x906a617d450187e2, 0x27fb2b80668b24c5}, + {0xb484f9dc9641e9da, 0xb1f9f660802dedf6}, + {0xe1a63853bbd26451, 0x5e7873f8a0396973}, + {0x8d07e33455637eb2, 0xdb0b487b6423e1e8}, + {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62}, + {0xdc5c5301c56b75f7, 0x7641a140cc7810fb}, + {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d}, + {0xac2820d9623bf429, 0x546345fa9fbdcd44}, + {0xd732290fbacaf133, 0xa97c177947ad4095}, + {0x867f59a9d4bed6c0, 0x49ed8eabcccc485d}, + {0xa81f301449ee8c70, 0x5c68f256bfff5a74}, + {0xd226fc195c6a2f8c, 0x73832eec6fff3111}, + {0x83585d8fd9c25db7, 0xc831fd53c5ff7eab}, + {0xa42e74f3d032f525, 0xba3e7ca8b77f5e55}, + {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb}, + {0x80444b5e7aa7cf85, 0x7980d163cf5b81b3}, + {0xa0555e361951c366, 0xd7e105bcc332621f}, + {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7}, + {0xfa856334878fc150, 0xb14f98f6f0feb951}, + {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3}, + {0xc3b8358109e84f07, 0x0a862f80ec4700c8}, + {0xf4a642e14c6262c8, 0xcd27bb612758c0fa}, + {0x98e7e9cccfbd7dbd, 0x8038d51cb897789c}, + {0xbf21e44003acdd2c, 0xe0470a63e6bd56c3}, + {0xeeea5d5004981478, 0x1858ccfce06cac74}, + {0x95527a5202df0ccb, 0x0f37801e0c43ebc8}, + {0xbaa718e68396cffd, 0xd30560258f54e6ba}, + {0xe950df20247c83fd, 0x47c6b82ef32a2069}, + {0x91d28b7416cdd27e, 0x4cdc331d57fa5441}, + {0xb6472e511c81471d, 0xe0133fe4adf8e952}, + {0xe3d8f9e563a198e5, 0x58180fddd97723a6}, + {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648}, + {0xb201833b35d63f73, 0x2cd2cc6551e513da}, + {0xde81e40a034bcf4f, 0xf8077f7ea65e58d1}, + {0x8b112e86420f6191, 0xfb04afaf27faf782}, + {0xadd57a27d29339f6, 0x79c5db9af1f9b563}, + {0xd94ad8b1c7380874, 0x18375281ae7822bc}, + {0x87cec76f1c830548, 0x8f2293910d0b15b5}, + {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb22}, + {0xd433179d9c8cb841, 0x5fa60692a46151eb}, + {0x849feec281d7f328, 0xdbc7c41ba6bcd333}, + {0xa5c7ea73224deff3, 0x12b9b522906c0800}, + {0xcf39e50feae16bef, 0xd768226b34870a00}, + {0x81842f29f2cce375, 0xe6a1158300d46640}, + {0xa1e53af46f801c53, 0x60495ae3c1097fd0}, + {0xca5e89b18b602368, 0x385bb19cb14bdfc4}, + {0xfcf62c1dee382c42, 0x46729e03dd9ed7b5}, + {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d1}, + {0xc5a05277621be293, 0xc7098b7305241885}, + { 0xf70867153aa2db38, + 0xb8cbee4fc66d1ea7 } +#else + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0xc350000000000000, 0x0000000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xfee50b7025c36a08, 0x02f236d04753d5b4}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, + {0xa6539930bf6bff45, 0x84db8346b786151c}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, + {0xd910f7ff28069da4, 0x1b2ba1518094da04}, + {0xaf58416654a6babb, 0x387ac8d1970027b2}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, + { 0x95527a5202df0ccb, + 0x0f37801e0c43ebc8 } +#endif + }; + +#if FMT_USE_FULL_CACHE_DRAGONBOX + return pow10_significands[k - float_info::min_k]; +#else + static constexpr const uint64_t powers_of_5_64[] = { + 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, + 0x000000000000007d, 0x0000000000000271, 0x0000000000000c35, + 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, + 0x00000000001dcd65, 0x00000000009502f9, 0x0000000002e90edd, + 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, + 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, + 0x000003782dace9d9, 0x00001158e460913d, 0x000056bc75e2d631, + 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, + 0x00d3c21bcecceda1, 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; + + static constexpr const uint32_t pow10_recovery_errors[] = { + 0x50001400, 0x54044100, 0x54014555, 0x55954415, 0x54115555, 0x00000001, + 0x50000000, 0x00104000, 0x54010004, 0x05004001, 0x55555544, 0x41545555, + 0x54040551, 0x15445545, 0x51555514, 0x10000015, 0x00101100, 0x01100015, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04450514, 0x45414110, + 0x55555145, 0x50544050, 0x15040155, 0x11054140, 0x50111514, 0x11451454, + 0x00400541, 0x00000000, 0x55555450, 0x10056551, 0x10054011, 0x55551014, + 0x69514555, 0x05151109, 0x00155555}; + + static const int compression_ratio = 27; + + // Compute base index. + int cache_index = (k - float_info::min_k) / compression_ratio; + int kb = cache_index * compression_ratio + float_info::min_k; + int offset = k - kb; + + // Get base cache. + uint128_wrapper base_cache = pow10_significands[cache_index]; + if (offset == 0) return base_cache; + + // Compute the required amount of bit-shift. + int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; + FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); + + // Try to recover the real cache. + uint64_t pow5 = powers_of_5_64[offset]; + uint128_wrapper recovered_cache = umul128(base_cache.high(), pow5); + uint128_wrapper middle_low = + umul128(base_cache.low() - (kb < 0 ? 1u : 0u), pow5); + + recovered_cache += middle_low.high(); + + uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); + uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); + + recovered_cache = + uint128_wrapper{(recovered_cache.low() >> alpha) | high_to_middle, + ((middle_low.low() >> alpha) | middle_to_low)}; + + if (kb < 0) recovered_cache += 1; + + // Get error. + int error_idx = (k - float_info::min_k) / 16; + uint32_t error = (pow10_recovery_errors[error_idx] >> + ((k - float_info::min_k) % 16) * 2) & + 0x3; + + // Add the error back. + FMT_ASSERT(recovered_cache.low() + error >= recovered_cache.low(), ""); + return {recovered_cache.high(), recovered_cache.low() + error}; +#endif + } + + static carrier_uint compute_mul(carrier_uint u, + const cache_entry_type& cache) FMT_NOEXCEPT { + return umul192_upper64(u, cache); + } + + static uint32_t compute_delta(cache_entry_type const& cache, + int beta_minus_1) FMT_NOEXCEPT { + return static_cast(cache.high() >> (64 - 1 - beta_minus_1)); + } + + static bool compute_mul_parity(carrier_uint two_f, + const cache_entry_type& cache, + int beta_minus_1) FMT_NOEXCEPT { + FMT_ASSERT(beta_minus_1 >= 1, ""); + FMT_ASSERT(beta_minus_1 < 64, ""); + + return ((umul192_middle64(two_f, cache) >> (64 - beta_minus_1)) & 1) != 0; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return (cache.high() - + (cache.high() >> (float_info::significand_bits + 2))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return (cache.high() + + (cache.high() >> (float_info::significand_bits + 1))) >> + (64 - float_info::significand_bits - 1 - beta_minus_1); + } + + static carrier_uint compute_round_up_for_shorter_interval_case( + const cache_entry_type& cache, int beta_minus_1) FMT_NOEXCEPT { + return ((cache.high() >> + (64 - float_info::significand_bits - 2 - beta_minus_1)) + + 1) / + 2; + } +}; + +// Various integer checks +template +bool is_left_endpoint_integer_shorter_interval(int exponent) FMT_NOEXCEPT { + return exponent >= + float_info< + T>::case_shorter_interval_left_endpoint_lower_threshold && + exponent <= + float_info::case_shorter_interval_left_endpoint_upper_threshold; +} +template +bool is_endpoint_integer(typename float_info::carrier_uint two_f, + int exponent, int minus_k) FMT_NOEXCEPT { + if (exponent < float_info::case_fc_pm_half_lower_threshold) return false; + // For k >= 0. + if (exponent <= float_info::case_fc_pm_half_upper_threshold) return true; + // For k < 0. + if (exponent > float_info::divisibility_check_by_5_threshold) return false; + return divisible_by_power_of_5(two_f, minus_k); +} + +template +bool is_center_integer(typename float_info::carrier_uint two_f, int exponent, + int minus_k) FMT_NOEXCEPT { + // Exponent for 5 is negative. + if (exponent > float_info::divisibility_check_by_5_threshold) return false; + if (exponent > float_info::case_fc_upper_threshold) + return divisible_by_power_of_5(two_f, minus_k); + // Both exponents are nonnegative. + if (exponent >= float_info::case_fc_lower_threshold) return true; + // Exponent for 2 is negative. + return divisible_by_power_of_2(two_f, minus_k - exponent + 1); +} + +// Remove trailing zeros from n and return the number of zeros removed (float) +FMT_INLINE int remove_trailing_zeros(uint32_t& n) FMT_NOEXCEPT { +#ifdef FMT_BUILTIN_CTZ + int t = FMT_BUILTIN_CTZ(n); +#else + int t = ctz(n); +#endif + if (t > float_info::max_trailing_zeros) + t = float_info::max_trailing_zeros; + + const uint32_t mod_inv1 = 0xcccccccd; + const uint32_t max_quotient1 = 0x33333333; + const uint32_t mod_inv2 = 0xc28f5c29; + const uint32_t max_quotient2 = 0x0a3d70a3; + + int s = 0; + for (; s < t - 1; s += 2) { + if (n * mod_inv2 > max_quotient2) break; + n *= mod_inv2; + } + if (s < t && n * mod_inv1 <= max_quotient1) { + n *= mod_inv1; + ++s; + } + n >>= s; + return s; +} + +// Removes trailing zeros and returns the number of zeros removed (double) +FMT_INLINE int remove_trailing_zeros(uint64_t& n) FMT_NOEXCEPT { +#ifdef FMT_BUILTIN_CTZLL + int t = FMT_BUILTIN_CTZLL(n); +#else + int t = ctzll(n); +#endif + if (t > float_info::max_trailing_zeros) + t = float_info::max_trailing_zeros; + // Divide by 10^8 and reduce to 32-bits + // Since ret_value.significand <= (2^64 - 1) / 1000 < 10^17, + // both of the quotient and the r should fit in 32-bits + + const uint32_t mod_inv1 = 0xcccccccd; + const uint32_t max_quotient1 = 0x33333333; + const uint64_t mod_inv8 = 0xc767074b22e90e21; + const uint64_t max_quotient8 = 0x00002af31dc46118; + + // If the number is divisible by 1'0000'0000, work with the quotient + if (t >= 8) { + auto quotient_candidate = n * mod_inv8; + + if (quotient_candidate <= max_quotient8) { + auto quotient = static_cast(quotient_candidate >> 8); + + int s = 8; + for (; s < t; ++s) { + if (quotient * mod_inv1 > max_quotient1) break; + quotient *= mod_inv1; + } + quotient >>= (s - 8); + n = quotient; + return s; + } + } + + // Otherwise, work with the remainder + auto quotient = static_cast(n / 100000000); + auto remainder = static_cast(n - 100000000 * quotient); + + if (t == 0 || remainder * mod_inv1 > max_quotient1) { + return 0; + } + remainder *= mod_inv1; + + if (t == 1 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 1) + quotient * 10000000ull; + return 1; + } + remainder *= mod_inv1; + + if (t == 2 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 2) + quotient * 1000000ull; + return 2; + } + remainder *= mod_inv1; + + if (t == 3 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 3) + quotient * 100000ull; + return 3; + } + remainder *= mod_inv1; + + if (t == 4 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 4) + quotient * 10000ull; + return 4; + } + remainder *= mod_inv1; + + if (t == 5 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 5) + quotient * 1000ull; + return 5; + } + remainder *= mod_inv1; + + if (t == 6 || remainder * mod_inv1 > max_quotient1) { + n = (remainder >> 6) + quotient * 100ull; + return 6; + } + remainder *= mod_inv1; + + n = (remainder >> 7) + quotient * 10ull; + return 7; +} + +// The main algorithm for shorter interval case +template +FMT_INLINE decimal_fp shorter_interval_case(int exponent) FMT_NOEXCEPT { + decimal_fp ret_value; + // Compute k and beta + const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); + const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); + + // Compute xi and zi + using cache_entry_type = typename cache_accessor::cache_entry_type; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + + auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case( + cache, beta_minus_1); + auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case( + cache, beta_minus_1); + + // If the left endpoint is not an integer, increase it + if (!is_left_endpoint_integer_shorter_interval(exponent)) ++xi; + + // Try bigger divisor + ret_value.significand = zi / 10; + + // If succeed, remove trailing zeros if necessary and return + if (ret_value.significand * 10 >= xi) { + ret_value.exponent = minus_k + 1; + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + } + + // Otherwise, compute the round-up of y + ret_value.significand = + cache_accessor::compute_round_up_for_shorter_interval_case( + cache, beta_minus_1); + ret_value.exponent = minus_k; + + // When tie occurs, choose one of them according to the rule + if (exponent >= float_info::shorter_interval_tie_lower_threshold && + exponent <= float_info::shorter_interval_tie_upper_threshold) { + ret_value.significand = ret_value.significand % 2 == 0 + ? ret_value.significand + : ret_value.significand - 1; + } else if (ret_value.significand < xi) { + ++ret_value.significand; + } + return ret_value; +} + +template decimal_fp to_decimal(T x) FMT_NOEXCEPT { + // Step 1: integer promotion & Schubfach multiplier calculation. + + using carrier_uint = typename float_info::carrier_uint; + using cache_entry_type = typename cache_accessor::cache_entry_type; + auto br = bit_cast(x); + + // Extract significand bits and exponent bits. + const carrier_uint significand_mask = + (static_cast(1) << float_info::significand_bits) - 1; + carrier_uint significand = (br & significand_mask); + int exponent = static_cast((br & exponent_mask()) >> + float_info::significand_bits); + + if (exponent != 0) { // Check if normal. + exponent += float_info::exponent_bias - float_info::significand_bits; + + // Shorter interval case; proceed like Schubfach. + if (significand == 0) return shorter_interval_case(exponent); + + significand |= + (static_cast(1) << float_info::significand_bits); + } else { + // Subnormal case; the interval is always regular. + if (significand == 0) return {0, 0}; + exponent = float_info::min_exponent - float_info::significand_bits; + } + + const bool include_left_endpoint = (significand % 2 == 0); + const bool include_right_endpoint = include_left_endpoint; + + // Compute k and beta. + const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + const int beta_minus_1 = exponent + floor_log2_pow10(-minus_k); + + // Compute zi and deltai + // 10^kappa <= deltai < 10^(kappa + 1) + const uint32_t deltai = cache_accessor::compute_delta(cache, beta_minus_1); + const carrier_uint two_fc = significand << 1; + const carrier_uint two_fr = two_fc | 1; + const carrier_uint zi = + cache_accessor::compute_mul(two_fr << beta_minus_1, cache); + + // Step 2: Try larger divisor; remove trailing zeros if necessary + + // Using an upper bound on zi, we might be able to optimize the division + // better than the compiler; we are computing zi / big_divisor here + decimal_fp ret_value; + ret_value.significand = divide_by_10_to_kappa_plus_1(zi); + uint32_t r = static_cast(zi - float_info::big_divisor * + ret_value.significand); + + if (r > deltai) { + goto small_divisor_case_label; + } else if (r < deltai) { + // Exclude the right endpoint if necessary + if (r == 0 && !include_right_endpoint && + is_endpoint_integer(two_fr, exponent, minus_k)) { + --ret_value.significand; + r = float_info::big_divisor; + goto small_divisor_case_label; + } + } else { + // r == deltai; compare fractional parts + // Check conditions in the order different from the paper + // to take advantage of short-circuiting + const carrier_uint two_fl = two_fc - 1; + if ((!include_left_endpoint || + !is_endpoint_integer(two_fl, exponent, minus_k)) && + !cache_accessor::compute_mul_parity(two_fl, cache, beta_minus_1)) { + goto small_divisor_case_label; + } + } + ret_value.exponent = minus_k + float_info::kappa + 1; + + // We may need to remove trailing zeros + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + + // Step 3: Find the significand with the smaller divisor + +small_divisor_case_label: + ret_value.significand *= 10; + ret_value.exponent = minus_k + float_info::kappa; + + const uint32_t mask = (1u << float_info::kappa) - 1; + auto dist = r - (deltai / 2) + (float_info::small_divisor / 2); + + // Is dist divisible by 2^kappa? + if ((dist & mask) == 0) { + const bool approx_y_parity = + ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; + dist >>= float_info::kappa; + + // Is dist divisible by 5^kappa? + if (check_divisibility_and_divide_by_pow5::kappa>(dist)) { + ret_value.significand += dist; + + // Check z^(f) >= epsilon^(f) + // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, + // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f) + // Since there are only 2 possibilities, we only need to care about the + // parity. Also, zi and r should have the same parity since the divisor + // is an even number + if (cache_accessor::compute_mul_parity(two_fc, cache, beta_minus_1) != + approx_y_parity) { + --ret_value.significand; + } else { + // If z^(f) >= epsilon^(f), we might have a tie + // when z^(f) == epsilon^(f), or equivalently, when y is an integer + if (is_center_integer(two_fc, exponent, minus_k)) { + ret_value.significand = ret_value.significand % 2 == 0 + ? ret_value.significand + : ret_value.significand - 1; + } + } + } + // Is dist not divisible by 5^kappa? + else { + ret_value.significand += dist; + } + } + // Is dist not divisible by 2^kappa? + else { + // Since we know dist is small, we might be able to optimize the division + // better than the compiler; we are computing dist / small_divisor here + ret_value.significand += + small_division_by_pow10::kappa>(dist); + } + return ret_value; +} +} // namespace dragonbox + +// Formats value using a variation of the Fixed-Precision Positive +// Floating-Point Printout ((FPP)^2) algorithm by Steele & White: +// https://fmt.dev/papers/p372-steele.pdf. +template +void fallback_format(Double d, int num_digits, bool binary32, buffer& buf, + int& exp10) { + bigint numerator; // 2 * R in (FPP)^2. + bigint denominator; // 2 * S in (FPP)^2. + // lower and upper are differences between value and corresponding boundaries. + bigint lower; // (M^- in (FPP)^2). + bigint upper_store; // upper's value if different from lower. + bigint* upper = nullptr; // (M^+ in (FPP)^2). + fp value; + // Shift numerator and denominator by an extra bit or two (if lower boundary + // is closer) to make lower and upper integers. This eliminates multiplication + // by 2 during later computations. + const bool is_predecessor_closer = + binary32 ? value.assign(static_cast(d)) : value.assign(d); + int shift = is_predecessor_closer ? 2 : 1; + uint64_t significand = value.f << shift; + if (value.e >= 0) { + numerator.assign(significand); + numerator <<= value.e; + lower.assign(1); + lower <<= value.e; + if (shift != 1) { + upper_store.assign(1); + upper_store <<= value.e + 1; + upper = &upper_store; + } + denominator.assign_pow10(exp10); + denominator <<= shift; + } else if (exp10 < 0) { + numerator.assign_pow10(-exp10); + lower.assign(numerator); + if (shift != 1) { + upper_store.assign(numerator); + upper_store <<= 1; + upper = &upper_store; + } + numerator *= significand; + denominator.assign(1); + denominator <<= shift - value.e; + } else { + numerator.assign(significand); + denominator.assign_pow10(exp10); + denominator <<= shift - value.e; + lower.assign(1); + if (shift != 1) { + upper_store.assign(1ULL << 1); + upper = &upper_store; + } + } + // Invariant: value == (numerator / denominator) * pow(10, exp10). + if (num_digits < 0) { + // Generate the shortest representation. + if (!upper) upper = &lower; + bool even = (value.f & 1) == 0; + num_digits = 0; + char* data = buf.data(); + for (;;) { + int digit = numerator.divmod_assign(denominator); + bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. + // numerator + upper >[=] pow10: + bool high = add_compare(numerator, *upper, denominator) + even > 0; + data[num_digits++] = static_cast('0' + digit); + if (low || high) { + if (!low) { + ++data[num_digits - 1]; + } else if (high) { + int result = add_compare(numerator, numerator, denominator); + // Round half to even. + if (result > 0 || (result == 0 && (digit % 2) != 0)) + ++data[num_digits - 1]; + } + buf.try_resize(to_unsigned(num_digits)); + exp10 -= num_digits - 1; + return; + } + numerator *= 10; + lower *= 10; + if (upper != &lower) *upper *= 10; + } + } + // Generate the given number of digits. + exp10 -= num_digits - 1; + if (num_digits == 0) { + buf.try_resize(1); + denominator *= 10; + buf[0] = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; + return; + } + buf.try_resize(to_unsigned(num_digits)); + for (int i = 0; i < num_digits - 1; ++i) { + int digit = numerator.divmod_assign(denominator); + buf[i] = static_cast('0' + digit); + numerator *= 10; + } + int digit = numerator.divmod_assign(denominator); + auto result = add_compare(numerator, numerator, denominator); + if (result > 0 || (result == 0 && (digit % 2) != 0)) { + if (digit == 9) { + const auto overflow = '0' + 10; + buf[num_digits - 1] = overflow; + // Propagate the carry. + for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] == overflow) { + buf[0] = '1'; + ++exp10; + } + return; + } + ++digit; + } + buf[num_digits - 1] = static_cast('0' + digit); +} + +template +int format_float(T value, int precision, float_specs specs, buffer& buf) { + static_assert(!std::is_same::value, ""); + FMT_ASSERT(value >= 0, "value is negative"); + + const bool fixed = specs.format == float_format::fixed; + if (value <= 0) { // <= instead of == to silence a warning. + if (precision <= 0 || !fixed) { + buf.push_back('0'); + return 0; + } + buf.try_resize(to_unsigned(precision)); + std::uninitialized_fill_n(buf.data(), precision, '0'); + return -precision; + } + + if (!specs.use_grisu) return snprintf_float(value, precision, specs, buf); + + if (precision < 0) { + // Use Dragonbox for the shortest format. + if (specs.binary32) { + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } + + // Use Grisu + Dragon4 for the given precision: + // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. + int exp = 0; + const int min_exp = -60; // alpha in Grisu. + int cached_exp10 = 0; // K in Grisu. + fp normalized = normalize(fp(value)); + const auto cached_pow = get_cached_power( + min_exp - (normalized.e + fp::significand_size), cached_exp10); + normalized = normalized * cached_pow; + // Limit precision to the maximum possible number of significant digits in an + // IEEE754 double because we don't need to generate zeros. + const int max_double_digits = 767; + if (precision > max_double_digits) precision = max_double_digits; + fixed_handler handler{buf.data(), 0, precision, -cached_exp10, fixed}; + if (grisu_gen_digits(normalized, 1, exp, handler) == digits::error) { + exp += handler.size - cached_exp10 - 1; + fallback_format(value, handler.precision, specs.binary32, buf, exp); + } else { + exp += handler.exp10; + buf.try_resize(to_unsigned(handler.size)); + } + if (!fixed && !specs.showpoint) { + // Remove trailing zeros. + auto num_digits = buf.size(); + while (num_digits > 0 && buf[num_digits - 1] == '0') { + --num_digits; + ++exp; + } + buf.try_resize(num_digits); + } + return exp; +} // namespace detail + +template +int snprintf_float(T value, int precision, float_specs specs, + buffer& buf) { + // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. + FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); + static_assert(!std::is_same::value, ""); + + // Subtract 1 to account for the difference in precision since we use %e for + // both general and exponent format. + if (specs.format == float_format::general || + specs.format == float_format::exp) + precision = (precision >= 0 ? precision : 6) - 1; + + // Build the format string. + enum { max_format_size = 7 }; // The longest format is "%#.*Le". + char format[max_format_size]; + char* format_ptr = format; + *format_ptr++ = '%'; + if (specs.showpoint && specs.format == float_format::hex) *format_ptr++ = '#'; + if (precision >= 0) { + *format_ptr++ = '.'; + *format_ptr++ = '*'; + } + if (std::is_same()) *format_ptr++ = 'L'; + *format_ptr++ = specs.format != float_format::hex + ? (specs.format == float_format::fixed ? 'f' : 'e') + : (specs.upper ? 'A' : 'a'); + *format_ptr = '\0'; + + // Format using snprintf. + auto offset = buf.size(); + for (;;) { + auto begin = buf.data() + offset; + auto capacity = buf.capacity() - offset; +#ifdef FMT_FUZZ + if (precision > 100000) + throw std::runtime_error( + "fuzz mode - avoid large allocation inside snprintf"); +#endif + // Suppress the warning about a nonliteral format string. + // Cannot use auto because of a bug in MinGW (#1532). + int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF; + int result = precision >= 0 + ? snprintf_ptr(begin, capacity, format, precision, value) + : snprintf_ptr(begin, capacity, format, value); + if (result < 0) { + // The buffer will grow exponentially. + buf.try_reserve(buf.capacity() + 1); + continue; + } + auto size = to_unsigned(result); + // Size equal to capacity means that the last character was truncated. + if (size >= capacity) { + buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. + continue; + } + auto is_digit = [](char c) { return c >= '0' && c <= '9'; }; + if (specs.format == float_format::fixed) { + if (precision == 0) { + buf.try_resize(size); + return 0; + } + // Find and remove the decimal point. + auto end = begin + size, p = end; + do { + --p; + } while (is_digit(*p)); + int fraction_size = static_cast(end - p - 1); + std::memmove(p, p + 1, to_unsigned(fraction_size)); + buf.try_resize(size - 1); + return -fraction_size; + } + if (specs.format == float_format::hex) { + buf.try_resize(size + offset); + return 0; + } + // Find and parse the exponent. + auto end = begin + size, exp_pos = end; + do { + --exp_pos; + } while (*exp_pos != 'e'); + char sign = exp_pos[1]; + FMT_ASSERT(sign == '+' || sign == '-', ""); + int exp = 0; + auto p = exp_pos + 2; // Skip 'e' and sign. + do { + FMT_ASSERT(is_digit(*p), ""); + exp = exp * 10 + (*p++ - '0'); + } while (p != end); + if (sign == '-') exp = -exp; + int fraction_size = 0; + if (exp_pos != begin + 1) { + // Remove trailing zeros. + auto fraction_end = exp_pos - 1; + while (*fraction_end == '0') --fraction_end; + // Move the fractional part left to get rid of the decimal point. + fraction_size = static_cast(fraction_end - begin - 1); + std::memmove(begin + 1, begin + 2, to_unsigned(fraction_size)); + } + buf.try_resize(to_unsigned(fraction_size) + offset + 1); + return exp - fraction_size; + } +} +} // namespace detail + +template <> struct formatter { + FMT_CONSTEXPR format_parse_context::iterator parse( + format_parse_context& ctx) { + return ctx.begin(); + } + + format_context::iterator format(const detail::bigint& n, + format_context& ctx) { + auto out = ctx.out(); + bool first = true; + for (auto i = n.bigits_.size(); i > 0; --i) { + auto value = n.bigits_[i - 1u]; + if (first) { + out = format_to(out, FMT_STRING("{:x}"), value); + first = false; + continue; + } + out = format_to(out, FMT_STRING("{:08x}"), value); + } + if (n.exp_ > 0) + out = format_to(out, FMT_STRING("p{}"), + n.exp_ * detail::bigint::bigit_bits); + return out; + } +}; + +FMT_FUNC detail::utf8_to_utf16::utf8_to_utf16(string_view s) { + for_each_codepoint(s, [this](uint32_t cp, int error) { + if (error != 0) FMT_THROW(std::runtime_error("invalid utf8")); + if (cp <= 0xFFFF) { + buffer_.push_back(static_cast(cp)); + } else { + cp -= 0x10000; + buffer_.push_back(static_cast(0xD800 + (cp >> 10))); + buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); + } + }); + buffer_.push_back(0); +} + +FMT_FUNC void format_system_error(detail::buffer& out, int error_code, + const char* message) FMT_NOEXCEPT { + FMT_TRY { + auto ec = std::error_code(error_code, std::generic_category()); + write(std::back_inserter(out), std::system_error(ec, message).what()); + return; + } + FMT_CATCH(...) {} + format_error_code(out, error_code, message); +} + +FMT_FUNC void detail::error_handler::on_error(const char* message) { + FMT_THROW(format_error(message)); +} + +FMT_FUNC void report_system_error(int error_code, + const char* message) FMT_NOEXCEPT { + report_error(format_system_error, error_code, message); +} + +FMT_FUNC std::string vformat(string_view fmt, format_args args) { + // Don't optimize the "{}" case to keep the binary size small and because it + // can be better optimized in fmt::format anyway. + auto buffer = memory_buffer(); + detail::vformat_to(buffer, fmt, args); + return to_string(buffer); +} + +#ifdef _WIN32 +namespace detail { +using dword = conditional_t; +extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // + void*, const void*, dword, dword*, void*); +} // namespace detail +#endif + +namespace detail { +FMT_FUNC void print(std::FILE* f, string_view text) { +#ifdef _WIN32 + auto fd = _fileno(f); + if (_isatty(fd)) { + detail::utf8_to_utf16 u16(string_view(text.data(), text.size())); + auto written = detail::dword(); + if (detail::WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), + u16.c_str(), static_cast(u16.size()), + &written, nullptr)) { + return; + } + // Fallback to fwrite on failure. It can happen if the output has been + // redirected to NUL. + } +#endif + detail::fwrite_fully(text.data(), 1, text.size(), f); +} +} // namespace detail + +FMT_FUNC void vprint(std::FILE* f, string_view format_str, format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, args); + detail::print(f, {buffer.data(), buffer.size()}); +} + +#ifdef _WIN32 +// Print assuming legacy (non-Unicode) encoding. +FMT_FUNC void detail::vprint_mojibake(std::FILE* f, string_view format_str, + format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, + basic_format_args>(args)); + fwrite_fully(buffer.data(), 1, buffer.size(), f); +} +#endif + +FMT_FUNC void vprint(string_view format_str, format_args args) { + vprint(stdout, format_str, args); +} + +FMT_END_NAMESPACE + +#endif // FMT_FORMAT_INL_H_ diff --git a/contrib/fmt-8.0.1/include/fmt/format.h b/contrib/fmt-8.0.1/include/fmt/format.h new file mode 100644 index 0000000000..5398a23a82 --- /dev/null +++ b/contrib/fmt-8.0.1/include/fmt/format.h @@ -0,0 +1,2830 @@ +/* + Formatting library for C++ + + Copyright (c) 2012 - present, Victor Zverovich + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + --- Optional exception to the license --- + + As an exception, if, as a result of your compiling your source code, portions + of this Software are embedded into a machine-executable object form of such + source code, you may redistribute such embedded portions in such object form + without including the above copyright and permission notices. + */ + +#ifndef FMT_FORMAT_H_ +#define FMT_FORMAT_H_ + +#include // std::signbit +#include // uint32_t +#include // std::numeric_limits +#include // std::uninitialized_copy +#include // std::runtime_error +#include // std::system_error +#include // std::swap + +#include "core.h" + +#ifdef __INTEL_COMPILER +# define FMT_ICC_VERSION __INTEL_COMPILER +#elif defined(__ICL) +# define FMT_ICC_VERSION __ICL +#else +# define FMT_ICC_VERSION 0 +#endif + +#ifdef __NVCC__ +# define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) +#else +# define FMT_CUDA_VERSION 0 +#endif + +#ifdef __has_builtin +# define FMT_HAS_BUILTIN(x) __has_builtin(x) +#else +# define FMT_HAS_BUILTIN(x) 0 +#endif + +#if FMT_GCC_VERSION || FMT_CLANG_VERSION +# define FMT_NOINLINE __attribute__((noinline)) +#else +# define FMT_NOINLINE +#endif + +#if FMT_MSC_VER +# define FMT_MSC_DEFAULT = default +#else +# define FMT_MSC_DEFAULT +#endif + +#ifndef FMT_THROW +# if FMT_EXCEPTIONS +# if FMT_MSC_VER || FMT_NVCC +FMT_BEGIN_NAMESPACE +namespace detail { +template inline void do_throw(const Exception& x) { + // Silence unreachable code warnings in MSVC and NVCC because these + // are nearly impossible to fix in a generic code. + volatile bool b = true; + if (b) throw x; +} +} // namespace detail +FMT_END_NAMESPACE +# define FMT_THROW(x) detail::do_throw(x) +# else +# define FMT_THROW(x) throw x +# endif +# else +# define FMT_THROW(x) \ + do { \ + FMT_ASSERT(false, (x).what()); \ + } while (false) +# endif +#endif + +#if FMT_EXCEPTIONS +# define FMT_TRY try +# define FMT_CATCH(x) catch (x) +#else +# define FMT_TRY if (true) +# define FMT_CATCH(x) if (false) +#endif + +#ifndef FMT_DEPRECATED +# if FMT_HAS_CPP14_ATTRIBUTE(deprecated) || FMT_MSC_VER >= 1900 +# define FMT_DEPRECATED [[deprecated]] +# else +# if (defined(__GNUC__) && !defined(__LCC__)) || defined(__clang__) +# define FMT_DEPRECATED __attribute__((deprecated)) +# elif FMT_MSC_VER +# define FMT_DEPRECATED __declspec(deprecated) +# else +# define FMT_DEPRECATED /* deprecated */ +# endif +# endif +#endif + +// Workaround broken [[deprecated]] in the Intel, PGI and NVCC compilers. +#if FMT_ICC_VERSION || defined(__PGI) || FMT_NVCC +# define FMT_DEPRECATED_ALIAS +#else +# define FMT_DEPRECATED_ALIAS FMT_DEPRECATED +#endif + +#ifndef FMT_USE_USER_DEFINED_LITERALS +// EDG based compilers (Intel, NVIDIA, Elbrus, etc), GCC and MSVC support UDLs. +# if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || \ + FMT_MSC_VER >= 1900) && \ + (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) +# define FMT_USE_USER_DEFINED_LITERALS 1 +# else +# define FMT_USE_USER_DEFINED_LITERALS 0 +# endif +#endif + +// Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of +// integer formatter template instantiations to just one by only using the +// largest integer type. This results in a reduction in binary size but will +// cause a decrease in integer formatting performance. +#if !defined(FMT_REDUCE_INT_INSTANTIATIONS) +# define FMT_REDUCE_INT_INSTANTIATIONS 0 +#endif + +// __builtin_clz is broken in clang with Microsoft CodeGen: +// https://github.com/fmtlib/fmt/issues/519 +#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clz)) && !FMT_MSC_VER +# define FMT_BUILTIN_CLZ(n) __builtin_clz(n) +#endif +#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clzll)) && !FMT_MSC_VER +# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) +#endif +#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_ctz)) +# define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) +#endif +#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_ctzll)) +# define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) +#endif + +#if FMT_MSC_VER +# include // _BitScanReverse[64], _BitScanForward[64], _umul128 +#endif + +// Some compilers masquerade as both MSVC and GCC-likes or otherwise support +// __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the +// MSVC intrinsics if the clz and clzll builtins are not available. +#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(FMT_BUILTIN_CTZLL) +FMT_BEGIN_NAMESPACE +namespace detail { +// Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. +# if !defined(__clang__) +# pragma managed(push, off) +# pragma intrinsic(_BitScanForward) +# pragma intrinsic(_BitScanReverse) +# if defined(_WIN64) +# pragma intrinsic(_BitScanForward64) +# pragma intrinsic(_BitScanReverse64) +# endif +# endif + +inline auto clz(uint32_t x) -> int { + unsigned long r = 0; + _BitScanReverse(&r, x); + FMT_ASSERT(x != 0, ""); + // Static analysis complains about using uninitialized data + // "r", but the only way that can happen is if "x" is 0, + // which the callers guarantee to not happen. + FMT_MSC_WARNING(suppress : 6102) + return 31 ^ static_cast(r); +} +# define FMT_BUILTIN_CLZ(n) detail::clz(n) + +inline auto clzll(uint64_t x) -> int { + unsigned long r = 0; +# ifdef _WIN64 + _BitScanReverse64(&r, x); +# else + // Scan the high 32 bits. + if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 ^ (r + 32); + // Scan the low 32 bits. + _BitScanReverse(&r, static_cast(x)); +# endif + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return 63 ^ static_cast(r); +} +# define FMT_BUILTIN_CLZLL(n) detail::clzll(n) + +inline auto ctz(uint32_t x) -> int { + unsigned long r = 0; + _BitScanForward(&r, x); + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return static_cast(r); +} +# define FMT_BUILTIN_CTZ(n) detail::ctz(n) + +inline auto ctzll(uint64_t x) -> int { + unsigned long r = 0; + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. +# ifdef _WIN64 + _BitScanForward64(&r, x); +# else + // Scan the low 32 bits. + if (_BitScanForward(&r, static_cast(x))) return static_cast(r); + // Scan the high 32 bits. + _BitScanForward(&r, static_cast(x >> 32)); + r += 32; +# endif + return static_cast(r); +} +# define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) +# if !defined(__clang__) +# pragma managed(pop) +# endif +} // namespace detail +FMT_END_NAMESPACE +#endif + +FMT_BEGIN_NAMESPACE +namespace detail { + +#if __cplusplus >= 202002L || \ + (__cplusplus >= 201709L && FMT_GCC_VERSION >= 1002) +# define FMT_CONSTEXPR20 constexpr +#else +# define FMT_CONSTEXPR20 +#endif + +// An equivalent of `*reinterpret_cast(&source)` that doesn't have +// undefined behavior (e.g. due to type aliasing). +// Example: uint64_t d = bit_cast(2.718); +template +inline auto bit_cast(const Source& source) -> Dest { + static_assert(sizeof(Dest) == sizeof(Source), "size mismatch"); + Dest dest; + std::memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +inline auto is_big_endian() -> bool { + const auto u = 1u; + struct bytes { + char data[sizeof(u)]; + }; + return bit_cast(u).data[0] == 0; +} + +// A fallback implementation of uintptr_t for systems that lack it. +struct fallback_uintptr { + unsigned char value[sizeof(void*)]; + + fallback_uintptr() = default; + explicit fallback_uintptr(const void* p) { + *this = bit_cast(p); + if (is_big_endian()) { + for (size_t i = 0, j = sizeof(void*) - 1; i < j; ++i, --j) + std::swap(value[i], value[j]); + } + } +}; +#ifdef UINTPTR_MAX +using uintptr_t = ::uintptr_t; +inline auto to_uintptr(const void* p) -> uintptr_t { + return bit_cast(p); +} +#else +using uintptr_t = fallback_uintptr; +inline auto to_uintptr(const void* p) -> fallback_uintptr { + return fallback_uintptr(p); +} +#endif + +// Returns the largest possible value for type T. Same as +// std::numeric_limits::max() but shorter and not affected by the max macro. +template constexpr auto max_value() -> T { + return (std::numeric_limits::max)(); +} +template constexpr auto num_bits() -> int { + return std::numeric_limits::digits; +} +// std::numeric_limits::digits may return 0 for 128-bit ints. +template <> constexpr auto num_bits() -> int { return 128; } +template <> constexpr auto num_bits() -> int { return 128; } +template <> constexpr auto num_bits() -> int { + return static_cast(sizeof(void*) * + std::numeric_limits::digits); +} + +FMT_INLINE void assume(bool condition) { + (void)condition; +#if FMT_HAS_BUILTIN(__builtin_assume) + __builtin_assume(condition); +#endif +} + +// An approximation of iterator_t for pre-C++20 systems. +template +using iterator_t = decltype(std::begin(std::declval())); +template using sentinel_t = decltype(std::end(std::declval())); + +// A workaround for std::string not having mutable data() until C++17. +template +inline auto get_data(std::basic_string& s) -> Char* { + return &s[0]; +} +template +inline auto get_data(Container& c) -> typename Container::value_type* { + return c.data(); +} + +#if defined(_SECURE_SCL) && _SECURE_SCL +// Make a checked iterator to avoid MSVC warnings. +template using checked_ptr = stdext::checked_array_iterator; +template auto make_checked(T* p, size_t size) -> checked_ptr { + return {p, size}; +} +#else +template using checked_ptr = T*; +template inline auto make_checked(T* p, size_t) -> T* { return p; } +#endif + +// Attempts to reserve space for n extra characters in the output range. +// Returns a pointer to the reserved range or a reference to it. +template ::value)> +#if FMT_CLANG_VERSION >= 307 && !FMT_ICC_VERSION +__attribute__((no_sanitize("undefined"))) +#endif +inline auto +reserve(std::back_insert_iterator it, size_t n) + -> checked_ptr { + Container& c = get_container(it); + size_t size = c.size(); + c.resize(size + n); + return make_checked(get_data(c) + size, n); +} + +template +inline auto reserve(buffer_appender it, size_t n) -> buffer_appender { + buffer& buf = get_container(it); + buf.try_reserve(buf.size() + n); + return it; +} + +template +constexpr auto reserve(Iterator& it, size_t) -> Iterator& { + return it; +} + +template +using reserve_iterator = + remove_reference_t(), 0))>; + +template +constexpr auto to_pointer(OutputIt, size_t) -> T* { + return nullptr; +} +template auto to_pointer(buffer_appender it, size_t n) -> T* { + buffer& buf = get_container(it); + auto size = buf.size(); + if (buf.capacity() < size + n) return nullptr; + buf.try_resize(size + n); + return buf.data() + size; +} + +template ::value)> +inline auto base_iterator(std::back_insert_iterator& it, + checked_ptr) + -> std::back_insert_iterator { + return it; +} + +template +constexpr auto base_iterator(Iterator, Iterator it) -> Iterator { + return it; +} + +// is spectacularly slow to compile in C++20 so use a simple fill_n +// instead (#1998). +template +FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T& value) + -> OutputIt { + for (Size i = 0; i < count; ++i) *out++ = value; + return out; +} +template +FMT_CONSTEXPR20 auto fill_n(T* out, Size count, char value) -> T* { + if (is_constant_evaluated()) { + return fill_n(out, count, value); + } + std::memset(out, value, to_unsigned(count)); + return out + count; +} + +#ifdef __cpp_char8_t +using char8_type = char8_t; +#else +enum char8_type : unsigned char {}; +#endif + +template +FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, + OutputIt out) -> OutputIt { + return copy_str(begin, end, out); +} + +// A public domain branchless UTF-8 decoder by Christopher Wellons: +// https://github.com/skeeto/branchless-utf8 +/* Decode the next character, c, from s, reporting errors in e. + * + * Since this is a branchless decoder, four bytes will be read from the + * buffer regardless of the actual length of the next character. This + * means the buffer _must_ have at least three bytes of zero padding + * following the end of the data stream. + * + * Errors are reported in e, which will be non-zero if the parsed + * character was somehow invalid: invalid byte sequence, non-canonical + * encoding, or a surrogate half. + * + * The function returns a pointer to the next character. When an error + * occurs, this pointer will be a guess that depends on the particular + * error, but it will always advance at least one byte. + */ +FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e) + -> const char* { + constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; + constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; + constexpr const int shiftc[] = {0, 18, 12, 6, 0}; + constexpr const int shifte[] = {0, 6, 4, 2, 0}; + + int len = code_point_length(s); + const char* next = s + len; + + // Assume a four-byte character and load four bytes. Unused bits are + // shifted out. + *c = uint32_t(s[0] & masks[len]) << 18; + *c |= uint32_t(s[1] & 0x3f) << 12; + *c |= uint32_t(s[2] & 0x3f) << 6; + *c |= uint32_t(s[3] & 0x3f) << 0; + *c >>= shiftc[len]; + + // Accumulate the various error conditions. + using uchar = unsigned char; + *e = (*c < mins[len]) << 6; // non-canonical encoding + *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? + *e |= (*c > 0x10FFFF) << 8; // out of range? + *e |= (uchar(s[1]) & 0xc0) >> 2; + *e |= (uchar(s[2]) & 0xc0) >> 4; + *e |= uchar(s[3]) >> 6; + *e ^= 0x2a; // top two bits of each tail byte correct? + *e >>= shifte[len]; + + return next; +} + +template +FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) { + auto decode = [f](const char* p) { + auto cp = uint32_t(); + auto error = 0; + p = utf8_decode(p, &cp, &error); + f(cp, error); + return p; + }; + auto p = s.data(); + const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. + if (s.size() >= block_size) { + for (auto end = p + s.size() - block_size + 1; p < end;) p = decode(p); + } + if (auto num_chars_left = s.data() + s.size() - p) { + char buf[2 * block_size - 1] = {}; + copy_str(p, p + num_chars_left, buf); + p = buf; + do { + p = decode(p); + } while (p - buf < num_chars_left); + } +} + +template +inline auto compute_width(basic_string_view s) -> size_t { + return s.size(); +} + +// Computes approximate display width of a UTF-8 string. +FMT_CONSTEXPR inline size_t compute_width(string_view s) { + size_t num_code_points = 0; + // It is not a lambda for compatibility with C++14. + struct count_code_points { + size_t* count; + FMT_CONSTEXPR void operator()(uint32_t cp, int error) const { + *count += detail::to_unsigned( + 1 + + (error == 0 && cp >= 0x1100 && + (cp <= 0x115f || // Hangul Jamo init. consonants + cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET + cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET + // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: + (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || + (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables + (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs + (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms + (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms + (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms + (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms + (cp >= 0x20000 && cp <= 0x2fffd) || // CJK + (cp >= 0x30000 && cp <= 0x3fffd) || + // Miscellaneous Symbols and Pictographs + Emoticons: + (cp >= 0x1f300 && cp <= 0x1f64f) || + // Supplemental Symbols and Pictographs: + (cp >= 0x1f900 && cp <= 0x1f9ff)))); + } + }; + for_each_codepoint(s, count_code_points{&num_code_points}); + return num_code_points; +} + +inline auto compute_width(basic_string_view s) -> size_t { + return compute_width(basic_string_view( + reinterpret_cast(s.data()), s.size())); +} + +template +inline auto code_point_index(basic_string_view s, size_t n) -> size_t { + size_t size = s.size(); + return n < size ? n : size; +} + +// Calculates the index of the nth code point in a UTF-8 string. +inline auto code_point_index(basic_string_view s, size_t n) + -> size_t { + const char8_type* data = s.data(); + size_t num_code_points = 0; + for (size_t i = 0, size = s.size(); i != size; ++i) { + if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) return i; + } + return s.size(); +} + +template +using is_fast_float = bool_constant::is_iec559 && + sizeof(T) <= sizeof(double)>; + +#ifndef FMT_USE_FULL_CACHE_DRAGONBOX +# define FMT_USE_FULL_CACHE_DRAGONBOX 0 +#endif + +template +template +void buffer::append(const U* begin, const U* end) { + while (begin != end) { + auto count = to_unsigned(end - begin); + try_reserve(size_ + count); + auto free_cap = capacity_ - size_; + if (free_cap < count) count = free_cap; + std::uninitialized_copy_n(begin, count, make_checked(ptr_ + size_, count)); + size_ += count; + begin += count; + } +} + +template +struct is_locale : std::false_type {}; +template +struct is_locale> : std::true_type {}; +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +// The number of characters to store in the basic_memory_buffer object itself +// to avoid dynamic memory allocation. +enum { inline_buffer_size = 500 }; + +/** + \rst + A dynamically growing memory buffer for trivially copyable/constructible types + with the first ``SIZE`` elements stored in the object itself. + + You can use the ``memory_buffer`` type alias for ``char`` instead. + + **Example**:: + + fmt::memory_buffer out; + format_to(out, "The answer is {}.", 42); + + This will append the following output to the ``out`` object: + + .. code-block:: none + + The answer is 42. + + The output can be converted to an ``std::string`` with ``to_string(out)``. + \endrst + */ +template > +class basic_memory_buffer final : public detail::buffer { + private: + T store_[SIZE]; + + // Don't inherit from Allocator avoid generating type_info for it. + Allocator alloc_; + + // Deallocate memory allocated by the buffer. + void deallocate() { + T* data = this->data(); + if (data != store_) alloc_.deallocate(data, this->capacity()); + } + + protected: + void grow(size_t size) final FMT_OVERRIDE; + + public: + using value_type = T; + using const_reference = const T&; + + explicit basic_memory_buffer(const Allocator& alloc = Allocator()) + : alloc_(alloc) { + this->set(store_, SIZE); + } + ~basic_memory_buffer() { deallocate(); } + + private: + // Move data from other to this buffer. + void move(basic_memory_buffer& other) { + alloc_ = std::move(other.alloc_); + T* data = other.data(); + size_t size = other.size(), capacity = other.capacity(); + if (data == other.store_) { + this->set(store_, capacity); + std::uninitialized_copy(other.store_, other.store_ + size, + detail::make_checked(store_, capacity)); + } else { + this->set(data, capacity); + // Set pointer to the inline array so that delete is not called + // when deallocating. + other.set(other.store_, 0); + } + this->resize(size); + } + + public: + /** + \rst + Constructs a :class:`fmt::basic_memory_buffer` object moving the content + of the other object to it. + \endrst + */ + basic_memory_buffer(basic_memory_buffer&& other) FMT_NOEXCEPT { move(other); } + + /** + \rst + Moves the content of the other ``basic_memory_buffer`` object to this one. + \endrst + */ + auto operator=(basic_memory_buffer&& other) FMT_NOEXCEPT + -> basic_memory_buffer& { + FMT_ASSERT(this != &other, ""); + deallocate(); + move(other); + return *this; + } + + // Returns a copy of the allocator associated with this buffer. + auto get_allocator() const -> Allocator { return alloc_; } + + /** + Resizes the buffer to contain *count* elements. If T is a POD type new + elements may not be initialized. + */ + void resize(size_t count) { this->try_resize(count); } + + /** Increases the buffer capacity to *new_capacity*. */ + void reserve(size_t new_capacity) { this->try_reserve(new_capacity); } + + // Directly append data into the buffer + using detail::buffer::append; + template + void append(const ContiguousRange& range) { + append(range.data(), range.data() + range.size()); + } +}; + +template +void basic_memory_buffer::grow(size_t size) { +#ifdef FMT_FUZZ + if (size > 5000) throw std::runtime_error("fuzz mode - won't grow that much"); +#endif + const size_t max_size = std::allocator_traits::max_size(alloc_); + size_t old_capacity = this->capacity(); + size_t new_capacity = old_capacity + old_capacity / 2; + if (size > new_capacity) + new_capacity = size; + else if (new_capacity > max_size) + new_capacity = size > max_size ? size : max_size; + T* old_data = this->data(); + T* new_data = + std::allocator_traits::allocate(alloc_, new_capacity); + // The following code doesn't throw, so the raw pointer above doesn't leak. + std::uninitialized_copy(old_data, old_data + this->size(), + detail::make_checked(new_data, new_capacity)); + this->set(new_data, new_capacity); + // deallocate must not throw according to the standard, but even if it does, + // the buffer already uses the new storage and will deallocate it in + // destructor. + if (old_data != store_) alloc_.deallocate(old_data, old_capacity); +} + +using memory_buffer = basic_memory_buffer; + +template +struct is_contiguous> : std::true_type { +}; + +namespace detail { +FMT_API void print(std::FILE*, string_view); +} + +/** A formatting error such as invalid format string. */ +FMT_CLASS_API +class FMT_API format_error : public std::runtime_error { + public: + explicit format_error(const char* message) : std::runtime_error(message) {} + explicit format_error(const std::string& message) + : std::runtime_error(message) {} + format_error(const format_error&) = default; + format_error& operator=(const format_error&) = default; + format_error(format_error&&) = default; + format_error& operator=(format_error&&) = default; + ~format_error() FMT_NOEXCEPT FMT_OVERRIDE FMT_MSC_DEFAULT; +}; + +/** + \rst + Constructs a `~fmt::format_arg_store` object that contains references + to arguments and can be implicitly converted to `~fmt::format_args`. + If ``fmt`` is a compile-time string then `make_args_checked` checks + its validity at compile time. + \endrst + */ +template > +FMT_INLINE auto make_args_checked(const S& fmt, + const remove_reference_t&... args) + -> format_arg_store, remove_reference_t...> { + static_assert( + detail::count<( + std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); + detail::check_format_string(fmt); + return {args...}; +} + +// compile-time support +namespace detail_exported { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template struct fixed_string { + constexpr fixed_string(const Char (&str)[N]) { + detail::copy_str(static_cast(str), + str + N, data); + } + Char data[N]{}; +}; +#endif + +// Converts a compile-time string to basic_string_view. +template +constexpr auto compile_string_to_view(const Char (&s)[N]) + -> basic_string_view { + // Remove trailing NUL character if needed. Won't be present if this is used + // with a raw character array (i.e. not defined as a string). + return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; +} +template +constexpr auto compile_string_to_view(detail::std_string_view s) + -> basic_string_view { + return {s.data(), s.size()}; +} +} // namespace detail_exported + +FMT_BEGIN_DETAIL_NAMESPACE + +inline void throw_format_error(const char* message) { + FMT_THROW(format_error(message)); +} + +template struct is_integral : std::is_integral {}; +template <> struct is_integral : std::true_type {}; +template <> struct is_integral : std::true_type {}; + +template +using is_signed = + std::integral_constant::is_signed || + std::is_same::value>; + +// Returns true if value is negative, false otherwise. +// Same as `value < 0` but doesn't produce warnings if T is an unsigned type. +template ::value)> +FMT_CONSTEXPR auto is_negative(T value) -> bool { + return value < 0; +} +template ::value)> +FMT_CONSTEXPR auto is_negative(T) -> bool { + return false; +} + +template ::value)> +FMT_CONSTEXPR auto is_supported_floating_point(T) -> uint16_t { + return (std::is_same::value && FMT_USE_FLOAT) || + (std::is_same::value && FMT_USE_DOUBLE) || + (std::is_same::value && FMT_USE_LONG_DOUBLE); +} + +// Smallest of uint32_t, uint64_t, uint128_t that is large enough to +// represent all values of an integral type T. +template +using uint32_or_64_or_128_t = + conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, + uint32_t, + conditional_t() <= 64, uint64_t, uint128_t>>; +template +using uint64_or_128_t = conditional_t() <= 64, uint64_t, uint128_t>; + +#define FMT_POWERS_OF_10(factor) \ + factor * 10, (factor)*100, (factor)*1000, (factor)*10000, (factor)*100000, \ + (factor)*1000000, (factor)*10000000, (factor)*100000000, \ + (factor)*1000000000 + +// Static data is placed in this class template for the header-only config. +template struct basic_data { + // log10(2) = 0x0.4d104d427de7fbcc... + static const uint64_t log10_2_significand = 0x4d104d427de7fbcc; + + // GCC generates slightly better code for pairs than chars. + FMT_API static constexpr const char digits[100][2] = { + {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'}, + {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'}, + {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'}, + {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'}, + {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'}, + {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'}, + {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'}, + {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'}, + {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'}, + {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'}, + {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'}, + {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'}, + {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'}, + {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'}, + {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'}, + {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'}, + {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}}; + + FMT_API static constexpr const char hex_digits[] = "0123456789abcdef"; + FMT_API static constexpr const char signs[4] = {0, '-', '+', ' '}; + FMT_API static constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', + 0x1000000u | ' '}; + FMT_API static constexpr const char left_padding_shifts[5] = {31, 31, 0, 1, + 0}; + FMT_API static constexpr const char right_padding_shifts[5] = {0, 31, 0, 1, + 0}; +}; + +#ifdef FMT_SHARED +// Required for -flto, -fivisibility=hidden and -shared to work +extern template struct basic_data; +#endif + +// This is a struct rather than an alias to avoid shadowing warnings in gcc. +struct data : basic_data<> {}; + +template FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { + int count = 1; + for (;;) { + // Integer division is slow so do it for a group of four digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + if (n < 10) return count; + if (n < 100) return count + 1; + if (n < 1000) return count + 2; + if (n < 10000) return count + 3; + n /= 10000u; + count += 4; + } +} +#if FMT_USE_INT128 +FMT_CONSTEXPR inline auto count_digits(uint128_t n) -> int { + return count_digits_fallback(n); +} +#endif + +// Returns the number of decimal digits in n. Leading zeros are not counted +// except for n == 0 in which case count_digits returns 1. +FMT_CONSTEXPR20 inline auto count_digits(uint64_t n) -> int { +#ifdef FMT_BUILTIN_CLZLL + if (!is_constant_evaluated()) { + // https://github.com/fmtlib/format-benchmark/blob/master/digits10 + // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). + constexpr uint16_t bsr2log10[] = { + 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, + 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, + 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; + auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; + constexpr const uint64_t zero_or_powers_of_10[] = { + 0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; + return t - (n < zero_or_powers_of_10[t]); + } +#endif + return count_digits_fallback(n); +} + +// Counts the number of digits in n. BITS = log2(radix). +template +FMT_CONSTEXPR auto count_digits(UInt n) -> int { +#ifdef FMT_BUILTIN_CLZ + if (num_bits() == 32) + return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; +#endif + int num_digits = 0; + do { + ++num_digits; + } while ((n >>= BITS) != 0); + return num_digits; +} + +template <> auto count_digits<4>(detail::fallback_uintptr n) -> int; + +// It is a separate function rather than a part of count_digits to workaround +// the lack of static constexpr in constexpr functions. +FMT_INLINE uint64_t count_digits_inc(int n) { + // An optimization by Kendall Willets from https://bit.ly/3uOIQrB. + // This increments the upper 32 bits (log10(T) - 1) when >= T is added. +#define FMT_INC(T) (((sizeof(#T) - 1ull) << 32) - T) + static constexpr uint64_t table[] = { + FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 + FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 + FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 + FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 + FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k + FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k + FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k + FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M + FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M + FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M + FMT_INC(1000000000), FMT_INC(1000000000) // 4B + }; + return table[n]; +} + +// Optional version of count_digits for better performance on 32-bit platforms. +FMT_CONSTEXPR20 inline auto count_digits(uint32_t n) -> int { +#ifdef FMT_BUILTIN_CLZ + if (!is_constant_evaluated()) { + auto inc = count_digits_inc(FMT_BUILTIN_CLZ(n | 1) ^ 31); + return static_cast((n + inc) >> 32); + } +#endif + return count_digits_fallback(n); +} + +template constexpr auto digits10() FMT_NOEXCEPT -> int { + return std::numeric_limits::digits10; +} +template <> constexpr auto digits10() FMT_NOEXCEPT -> int { + return 38; +} +template <> constexpr auto digits10() FMT_NOEXCEPT -> int { + return 38; +} + +template struct thousands_sep_result { + std::string grouping; + Char thousands_sep; +}; + +template +FMT_API auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result; +template +inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { + auto result = thousands_sep_impl(loc); + return {result.grouping, Char(result.thousands_sep)}; +} +template <> +inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { + return thousands_sep_impl(loc); +} + +template +FMT_API auto decimal_point_impl(locale_ref loc) -> Char; +template inline auto decimal_point(locale_ref loc) -> Char { + return Char(decimal_point_impl(loc)); +} +template <> inline auto decimal_point(locale_ref loc) -> wchar_t { + return decimal_point_impl(loc); +} + +// Compares two characters for equality. +template auto equal2(const Char* lhs, const char* rhs) -> bool { + return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); +} +inline auto equal2(const char* lhs, const char* rhs) -> bool { + return memcmp(lhs, rhs, 2) == 0; +} + +// Copies two characters from src to dst. +template void copy2(Char* dst, const char* src) { + *dst++ = static_cast(*src++); + *dst = static_cast(*src); +} +FMT_INLINE void copy2(char* dst, const char* src) { memcpy(dst, src, 2); } + +template struct format_decimal_result { + Iterator begin; + Iterator end; +}; + +// Formats a decimal unsigned integer value writing into out pointing to a +// buffer of specified size. The caller must ensure that the buffer is large +// enough. +template +FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size) + -> format_decimal_result { + FMT_ASSERT(size >= count_digits(value), "invalid digit count"); + out += size; + Char* end = out; + if (is_constant_evaluated()) { + while (value >= 10) { + *--out = static_cast('0' + value % 10); + value /= 10; + } + *--out = static_cast('0' + value); + return {out, end}; + } + while (value >= 100) { + // Integer division is slow so do it for a group of two digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + out -= 2; + copy2(out, data::digits[value % 100]); + value /= 100; + } + if (value < 10) { + *--out = static_cast('0' + value); + return {out, end}; + } + out -= 2; + copy2(out, data::digits[value]); + return {out, end}; +} + +template >::value)> +inline auto format_decimal(Iterator out, UInt value, int size) + -> format_decimal_result { + // Buffer is large enough to hold all digits (digits10 + 1). + Char buffer[digits10() + 1]; + auto end = format_decimal(buffer, value, size).end; + return {out, detail::copy_str_noinline(buffer, end, out)}; +} + +template +FMT_CONSTEXPR auto format_uint(Char* buffer, UInt value, int num_digits, + bool upper = false) -> Char* { + buffer += num_digits; + Char* end = buffer; + do { + const char* digits = upper ? "0123456789ABCDEF" : data::hex_digits; + unsigned digit = (value & ((1 << BASE_BITS) - 1)); + *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) + : digits[digit]); + } while ((value >>= BASE_BITS) != 0); + return end; +} + +template +auto format_uint(Char* buffer, detail::fallback_uintptr n, int num_digits, + bool = false) -> Char* { + auto char_digits = std::numeric_limits::digits / 4; + int start = (num_digits + char_digits - 1) / char_digits - 1; + if (int start_digits = num_digits % char_digits) { + unsigned value = n.value[start--]; + buffer = format_uint(buffer, value, start_digits); + } + for (; start >= 0; --start) { + unsigned value = n.value[start]; + buffer += char_digits; + auto p = buffer; + for (int i = 0; i < char_digits; ++i) { + unsigned digit = (value & ((1 << BASE_BITS) - 1)); + *--p = static_cast(data::hex_digits[digit]); + value >>= BASE_BITS; + } + } + return buffer; +} + +template +inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) + -> It { + if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { + format_uint(ptr, value, num_digits, upper); + return out; + } + // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). + char buffer[num_bits() / BASE_BITS + 1]; + format_uint(buffer, value, num_digits, upper); + return detail::copy_str_noinline(buffer, buffer + num_digits, out); +} + +// A converter from UTF-8 to UTF-16. +class utf8_to_utf16 { + private: + basic_memory_buffer buffer_; + + public: + FMT_API explicit utf8_to_utf16(string_view s); + operator basic_string_view() const { return {&buffer_[0], size()}; } + auto size() const -> size_t { return buffer_.size() - 1; } + auto c_str() const -> const wchar_t* { return &buffer_[0]; } + auto str() const -> std::wstring { return {&buffer_[0], size()}; } +}; + +namespace dragonbox { + +// Type-specific information that Dragonbox uses. +template struct float_info; + +template <> struct float_info { + using carrier_uint = uint32_t; + static const int significand_bits = 23; + static const int exponent_bits = 8; + static const int min_exponent = -126; + static const int max_exponent = 127; + static const int exponent_bias = -127; + static const int decimal_digits = 9; + static const int kappa = 1; + static const int big_divisor = 100; + static const int small_divisor = 10; + static const int min_k = -31; + static const int max_k = 46; + static const int cache_bits = 64; + static const int divisibility_check_by_5_threshold = 39; + static const int case_fc_pm_half_lower_threshold = -1; + static const int case_fc_pm_half_upper_threshold = 6; + static const int case_fc_lower_threshold = -2; + static const int case_fc_upper_threshold = 6; + static const int case_shorter_interval_left_endpoint_lower_threshold = 2; + static const int case_shorter_interval_left_endpoint_upper_threshold = 3; + static const int shorter_interval_tie_lower_threshold = -35; + static const int shorter_interval_tie_upper_threshold = -35; + static const int max_trailing_zeros = 7; +}; + +template <> struct float_info { + using carrier_uint = uint64_t; + static const int significand_bits = 52; + static const int exponent_bits = 11; + static const int min_exponent = -1022; + static const int max_exponent = 1023; + static const int exponent_bias = -1023; + static const int decimal_digits = 17; + static const int kappa = 2; + static const int big_divisor = 1000; + static const int small_divisor = 100; + static const int min_k = -292; + static const int max_k = 326; + static const int cache_bits = 128; + static const int divisibility_check_by_5_threshold = 86; + static const int case_fc_pm_half_lower_threshold = -2; + static const int case_fc_pm_half_upper_threshold = 9; + static const int case_fc_lower_threshold = -4; + static const int case_fc_upper_threshold = 9; + static const int case_shorter_interval_left_endpoint_lower_threshold = 2; + static const int case_shorter_interval_left_endpoint_upper_threshold = 3; + static const int shorter_interval_tie_lower_threshold = -77; + static const int shorter_interval_tie_upper_threshold = -77; + static const int max_trailing_zeros = 16; +}; + +template struct decimal_fp { + using significand_type = typename float_info::carrier_uint; + significand_type significand; + int exponent; +}; + +template +FMT_API auto to_decimal(T x) FMT_NOEXCEPT -> decimal_fp; +} // namespace dragonbox + +template +constexpr auto exponent_mask() -> + typename dragonbox::float_info::carrier_uint { + using uint = typename dragonbox::float_info::carrier_uint; + return ((uint(1) << dragonbox::float_info::exponent_bits) - 1) + << dragonbox::float_info::significand_bits; +} + +// Writes the exponent exp in the form "[+-]d{2,3}" to buffer. +template +auto write_exponent(int exp, It it) -> It { + FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); + if (exp < 0) { + *it++ = static_cast('-'); + exp = -exp; + } else { + *it++ = static_cast('+'); + } + if (exp >= 100) { + const char* top = data::digits[exp / 100]; + if (exp >= 1000) *it++ = static_cast(top[0]); + *it++ = static_cast(top[1]); + exp %= 100; + } + const char* d = data::digits[exp]; + *it++ = static_cast(d[0]); + *it++ = static_cast(d[1]); + return it; +} + +template +auto format_float(T value, int precision, float_specs specs, buffer& buf) + -> int; + +// Formats a floating-point number with snprintf. +template +auto snprintf_float(T value, int precision, float_specs specs, + buffer& buf) -> int; + +template auto promote_float(T value) -> T { return value; } +inline auto promote_float(float value) -> double { + return static_cast(value); +} + +template +FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, + const fill_t& fill) -> OutputIt { + auto fill_size = fill.size(); + if (fill_size == 1) return detail::fill_n(it, n, fill[0]); + auto data = fill.data(); + for (size_t i = 0; i < n; ++i) + it = copy_str(data, data + fill_size, it); + return it; +} + +// Writes the output of f, padded according to format specifications in specs. +// size: output size in code units. +// width: output display width in (terminal) column positions. +template +FMT_CONSTEXPR auto write_padded(OutputIt out, + const basic_format_specs& specs, + size_t size, size_t width, F&& f) -> OutputIt { + static_assert(align == align::left || align == align::right, ""); + unsigned spec_width = to_unsigned(specs.width); + size_t padding = spec_width > width ? spec_width - width : 0; + auto* shifts = align == align::left ? data::left_padding_shifts + : data::right_padding_shifts; + size_t left_padding = padding >> shifts[specs.align]; + size_t right_padding = padding - left_padding; + auto it = reserve(out, size + padding * specs.fill.size()); + if (left_padding != 0) it = fill(it, left_padding, specs.fill); + it = f(it); + if (right_padding != 0) it = fill(it, right_padding, specs.fill); + return base_iterator(out, it); +} + +template +constexpr auto write_padded(OutputIt out, const basic_format_specs& specs, + size_t size, F&& f) -> OutputIt { + return write_padded(out, specs, size, size, f); +} + +template +FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, + const basic_format_specs& specs) + -> OutputIt { + return write_padded( + out, specs, bytes.size(), [bytes](reserve_iterator it) { + const char* data = bytes.data(); + return copy_str(data, data + bytes.size(), it); + }); +} + +template +auto write_ptr(OutputIt out, UIntPtr value, + const basic_format_specs* specs) -> OutputIt { + int num_digits = count_digits<4>(value); + auto size = to_unsigned(num_digits) + size_t(2); + auto write = [=](reserve_iterator it) { + *it++ = static_cast('0'); + *it++ = static_cast('x'); + return format_uint<4, Char>(it, value, num_digits); + }; + return specs ? write_padded(out, *specs, size, write) + : base_iterator(out, write(reserve(out, size))); +} + +template +FMT_CONSTEXPR auto write_char(OutputIt out, Char value, + const basic_format_specs& specs) + -> OutputIt { + return write_padded(out, specs, 1, [=](reserve_iterator it) { + *it++ = value; + return it; + }); +} +template +FMT_CONSTEXPR auto write(OutputIt out, Char value, + const basic_format_specs& specs, + locale_ref loc = {}) -> OutputIt { + return check_char_specs(specs) + ? write_char(out, value, specs) + : write(out, static_cast(value), specs, loc); +} + +// Data for write_int that doesn't depend on output iterator type. It is used to +// avoid template code bloat. +template struct write_int_data { + size_t size; + size_t padding; + + FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, + const basic_format_specs& specs) + : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { + if (specs.align == align::numeric) { + auto width = to_unsigned(specs.width); + if (width > size) { + padding = width - size; + size = width; + } + } else if (specs.precision > num_digits) { + size = (prefix >> 24) + to_unsigned(specs.precision); + padding = to_unsigned(specs.precision - num_digits); + } + } +}; + +// Writes an integer in the format +// +// where are written by write_digits(it). +// prefix contains chars in three lower bytes and the size in the fourth byte. +template +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, + unsigned prefix, + const basic_format_specs& specs, + W write_digits) -> OutputIt { + // Slightly faster check for specs.width == 0 && specs.precision == -1. + if ((specs.width | (specs.precision + 1)) == 0) { + auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); + if (prefix != 0) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + } + return base_iterator(out, write_digits(it)); + } + auto data = write_int_data(num_digits, prefix, specs); + return write_padded( + out, specs, data.size, [=](reserve_iterator it) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + it = detail::fill_n(it, data.padding, static_cast('0')); + return write_digits(it); + }); +} + +template +auto write_int_localized(OutputIt& out, UInt value, unsigned prefix, + const basic_format_specs& specs, locale_ref loc) + -> bool { + static_assert(std::is_same, UInt>::value, ""); + const auto sep_size = 1; + auto ts = thousands_sep(loc); + if (!ts.thousands_sep) return false; + int num_digits = count_digits(value); + int size = num_digits, n = num_digits; + const std::string& groups = ts.grouping; + std::string::const_iterator group = groups.cbegin(); + while (group != groups.cend() && n > *group && *group > 0 && + *group != max_value()) { + size += sep_size; + n -= *group; + ++group; + } + if (group == groups.cend()) size += sep_size * ((n - 1) / groups.back()); + char digits[40]; + format_decimal(digits, value, num_digits); + basic_memory_buffer buffer; + if (prefix != 0) ++size; + const auto usize = to_unsigned(size); + buffer.resize(usize); + basic_string_view s(&ts.thousands_sep, sep_size); + // Index of a decimal digit with the least significant digit having index 0. + int digit_index = 0; + group = groups.cbegin(); + auto p = buffer.data() + size - 1; + for (int i = num_digits - 1; i > 0; --i) { + *p-- = static_cast(digits[i]); + if (*group <= 0 || ++digit_index % *group != 0 || + *group == max_value()) + continue; + if (group + 1 != groups.cend()) { + digit_index = 0; + ++group; + } + std::uninitialized_copy(s.data(), s.data() + s.size(), + make_checked(p, s.size())); + p -= s.size(); + } + *p-- = static_cast(*digits); + if (prefix != 0) *p = static_cast(prefix); + auto data = buffer.data(); + out = write_padded( + out, specs, usize, usize, [=](reserve_iterator it) { + return copy_str(data, data + size, it); + }); + return true; +} + +FMT_CONSTEXPR inline void prefix_append(unsigned& prefix, unsigned value) { + prefix |= prefix != 0 ? value << 8 : value; + prefix += (1u + (value > 0xff ? 1 : 0)) << 24; +} + +template struct write_int_arg { + UInt abs_value; + unsigned prefix; +}; + +template +FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) + -> write_int_arg> { + auto prefix = 0u; + auto abs_value = static_cast>(value); + if (is_negative(value)) { + prefix = 0x01000000 | '-'; + abs_value = 0 - abs_value; + } else { + prefix = data::prefixes[sign]; + } + return {abs_value, prefix}; +} + +template +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, + const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + static_assert(std::is_same>::value, ""); + auto abs_value = arg.abs_value; + auto prefix = arg.prefix; + auto utype = static_cast(specs.type); + switch (specs.type) { + case 0: + case 'd': { + if (specs.localized && + write_int_localized(out, static_cast>(abs_value), + prefix, specs, loc)) { + return out; + } + auto num_digits = count_digits(abs_value); + return write_int( + out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_decimal(it, abs_value, num_digits).end; + }); + } + case 'x': + case 'X': { + if (specs.alt) prefix_append(prefix, (utype << 8) | '0'); + bool upper = specs.type != 'x'; + int num_digits = count_digits<4>(abs_value); + return write_int( + out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_uint<4, Char>(it, abs_value, num_digits, upper); + }); + } + case 'b': + case 'B': { + if (specs.alt) prefix_append(prefix, (utype << 8) | '0'); + int num_digits = count_digits<1>(abs_value); + return write_int(out, num_digits, prefix, specs, + [=](reserve_iterator it) { + return format_uint<1, Char>(it, abs_value, num_digits); + }); + } + case 'o': { + int num_digits = count_digits<3>(abs_value); + if (specs.alt && specs.precision <= num_digits && abs_value != 0) { + // Octal prefix '0' is counted as a digit, so only add it if precision + // is not greater than the number of digits. + prefix_append(prefix, '0'); + } + return write_int(out, num_digits, prefix, specs, + [=](reserve_iterator it) { + return format_uint<3, Char>(it, abs_value, num_digits); + }); + } + case 'c': + return write_char(out, static_cast(abs_value), specs); + default: + FMT_THROW(format_error("invalid type specifier")); + } + return out; +} +template ::value && + !std::is_same::value && + std::is_same>::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value, + const basic_format_specs& specs, locale_ref loc) + -> OutputIt { + return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); +} +// An inlined version of write used in format string compilation. +template ::value && + !std::is_same::value && + !std::is_same>::value)> +FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, + const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); +} + +template +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, + const basic_format_specs& specs) -> OutputIt { + auto data = s.data(); + auto size = s.size(); + if (specs.precision >= 0 && to_unsigned(specs.precision) < size) + size = code_point_index(s, to_unsigned(specs.precision)); + auto width = + specs.width != 0 ? compute_width(basic_string_view(data, size)) : 0; + return write_padded(out, specs, size, width, + [=](reserve_iterator it) { + return copy_str(data, data + size, it); + }); +} +template +FMT_CONSTEXPR auto write(OutputIt out, + basic_string_view> s, + const basic_format_specs& specs, locale_ref) + -> OutputIt { + check_string_type_spec(specs.type); + return write(out, s, specs); +} +template +FMT_CONSTEXPR auto write(OutputIt out, const Char* s, + const basic_format_specs& specs, locale_ref) + -> OutputIt { + return check_cstring_type_spec(specs.type) + ? write(out, basic_string_view(s), specs, {}) + : write_ptr(out, to_uintptr(s), &specs); +} + +template +auto write_nonfinite(OutputIt out, bool isinf, basic_format_specs specs, + const float_specs& fspecs) -> OutputIt { + auto str = + isinf ? (fspecs.upper ? "INF" : "inf") : (fspecs.upper ? "NAN" : "nan"); + constexpr size_t str_size = 3; + auto sign = fspecs.sign; + auto size = str_size + (sign ? 1 : 0); + // Replace '0'-padding with space for non-finite values. + const bool is_zero_fill = + specs.fill.size() == 1 && *specs.fill.data() == static_cast('0'); + if (is_zero_fill) specs.fill[0] = static_cast(' '); + return write_padded(out, specs, size, [=](reserve_iterator it) { + if (sign) *it++ = static_cast(data::signs[sign]); + return copy_str(str, str + str_size, it); + }); +} + +// A decimal floating-point number significand * pow(10, exp). +struct big_decimal_fp { + const char* significand; + int significand_size; + int exponent; +}; + +inline auto get_significand_size(const big_decimal_fp& fp) -> int { + return fp.significand_size; +} +template +inline auto get_significand_size(const dragonbox::decimal_fp& fp) -> int { + return count_digits(fp.significand); +} + +template +inline auto write_significand(OutputIt out, const char* significand, + int& significand_size) -> OutputIt { + return copy_str(significand, significand + significand_size, out); +} +template +inline auto write_significand(OutputIt out, UInt significand, + int significand_size) -> OutputIt { + return format_decimal(out, significand, significand_size).end; +} + +template ::value)> +inline auto write_significand(Char* out, UInt significand, int significand_size, + int integral_size, Char decimal_point) -> Char* { + if (!decimal_point) + return format_decimal(out, significand, significand_size).end; + auto end = format_decimal(out + 1, significand, significand_size).end; + if (integral_size == 1) { + out[0] = out[1]; + } else { + std::uninitialized_copy_n(out + 1, integral_size, + make_checked(out, to_unsigned(integral_size))); + } + out[integral_size] = decimal_point; + return end; +} + +template >::value)> +inline auto write_significand(OutputIt out, UInt significand, + int significand_size, int integral_size, + Char decimal_point) -> OutputIt { + // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. + Char buffer[digits10() + 2]; + auto end = write_significand(buffer, significand, significand_size, + integral_size, decimal_point); + return detail::copy_str_noinline(buffer, end, out); +} + +template +inline auto write_significand(OutputIt out, const char* significand, + int significand_size, int integral_size, + Char decimal_point) -> OutputIt { + out = detail::copy_str_noinline(significand, + significand + integral_size, out); + if (!decimal_point) return out; + *out++ = decimal_point; + return detail::copy_str_noinline(significand + integral_size, + significand + significand_size, out); +} + +template +auto write_float(OutputIt out, const DecimalFP& fp, + const basic_format_specs& specs, float_specs fspecs, + Char decimal_point) -> OutputIt { + auto significand = fp.significand; + int significand_size = get_significand_size(fp); + static const Char zero = static_cast('0'); + auto sign = fspecs.sign; + size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); + using iterator = reserve_iterator; + + int output_exp = fp.exponent + significand_size - 1; + auto use_exp_format = [=]() { + if (fspecs.format == float_format::exp) return true; + if (fspecs.format != float_format::general) return false; + // Use the fixed notation if the exponent is in [exp_lower, exp_upper), + // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. + const int exp_lower = -4, exp_upper = 16; + return output_exp < exp_lower || + output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); + }; + if (use_exp_format()) { + int num_zeros = 0; + if (fspecs.showpoint) { + num_zeros = fspecs.precision - significand_size; + if (num_zeros < 0) num_zeros = 0; + size += to_unsigned(num_zeros); + } else if (significand_size == 1) { + decimal_point = Char(); + } + auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; + int exp_digits = 2; + if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; + + size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); + char exp_char = fspecs.upper ? 'E' : 'e'; + auto write = [=](iterator it) { + if (sign) *it++ = static_cast(data::signs[sign]); + // Insert a decimal point after the first digit and add an exponent. + it = write_significand(it, significand, significand_size, 1, + decimal_point); + if (num_zeros > 0) it = detail::fill_n(it, num_zeros, zero); + *it++ = static_cast(exp_char); + return write_exponent(output_exp, it); + }; + return specs.width > 0 ? write_padded(out, specs, size, write) + : base_iterator(out, write(reserve(out, size))); + } + + int exp = fp.exponent + significand_size; + if (fp.exponent >= 0) { + // 1234e5 -> 123400000[.0+] + size += to_unsigned(fp.exponent); + int num_zeros = fspecs.precision - exp; +#ifdef FMT_FUZZ + if (num_zeros > 5000) + throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); +#endif + if (fspecs.showpoint) { + if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; + if (num_zeros > 0) size += to_unsigned(num_zeros) + 1; + } + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = static_cast(data::signs[sign]); + it = write_significand(it, significand, significand_size); + it = detail::fill_n(it, fp.exponent, zero); + if (!fspecs.showpoint) return it; + *it++ = decimal_point; + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } else if (exp > 0) { + // 1234e-2 -> 12.34[0+] + int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; + size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = static_cast(data::signs[sign]); + it = write_significand(it, significand, significand_size, exp, + decimal_point); + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } + // 1234e-6 -> 0.001234 + int num_zeros = -exp; + if (significand_size == 0 && fspecs.precision >= 0 && + fspecs.precision < num_zeros) { + num_zeros = fspecs.precision; + } + bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; + size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = static_cast(data::signs[sign]); + *it++ = zero; + if (!pointy) return it; + *it++ = decimal_point; + it = detail::fill_n(it, num_zeros, zero); + return write_significand(it, significand, significand_size); + }); +} + +template ::value)> +auto write(OutputIt out, T value, basic_format_specs specs, + locale_ref loc = {}) -> OutputIt { + if (const_check(!is_supported_floating_point(value))) return out; + float_specs fspecs = parse_float_type_spec(specs); + fspecs.sign = specs.sign; + if (std::signbit(value)) { // value < 0 is false for NaN so use signbit. + fspecs.sign = sign::minus; + value = -value; + } else if (fspecs.sign == sign::minus) { + fspecs.sign = sign::none; + } + + if (!std::isfinite(value)) + return write_nonfinite(out, std::isinf(value), specs, fspecs); + + if (specs.align == align::numeric && fspecs.sign) { + auto it = reserve(out, 1); + *it++ = static_cast(data::signs[fspecs.sign]); + out = base_iterator(out, it); + fspecs.sign = sign::none; + if (specs.width != 0) --specs.width; + } + + memory_buffer buffer; + if (fspecs.format == float_format::hex) { + if (fspecs.sign) buffer.push_back(data::signs[fspecs.sign]); + snprintf_float(promote_float(value), specs.precision, fspecs, buffer); + return write_bytes(out, {buffer.data(), buffer.size()}, + specs); + } + int precision = specs.precision >= 0 || !specs.type ? specs.precision : 6; + if (fspecs.format == float_format::exp) { + if (precision == max_value()) + FMT_THROW(format_error("number is too big")); + else + ++precision; + } + if (const_check(std::is_same())) fspecs.binary32 = true; + fspecs.use_grisu = is_fast_float(); + int exp = format_float(promote_float(value), precision, fspecs, buffer); + fspecs.precision = precision; + Char point = + fspecs.locale ? decimal_point(loc) : static_cast('.'); + auto fp = big_decimal_fp{buffer.data(), static_cast(buffer.size()), exp}; + return write_float(out, fp, specs, fspecs, point); +} + +template ::value)> +auto write(OutputIt out, T value) -> OutputIt { + if (const_check(!is_supported_floating_point(value))) return out; + + using floaty = conditional_t::value, double, T>; + using uint = typename dragonbox::float_info::carrier_uint; + auto bits = bit_cast(value); + + auto fspecs = float_specs(); + auto sign_bit = bits & (uint(1) << (num_bits() - 1)); + if (sign_bit != 0) { + fspecs.sign = sign::minus; + value = -value; + } + + static const auto specs = basic_format_specs(); + uint mask = exponent_mask(); + if ((bits & mask) == mask) + return write_nonfinite(out, std::isinf(value), specs, fspecs); + + auto dec = dragonbox::to_decimal(static_cast(value)); + return write_float(out, dec, specs, fspecs, static_cast('.')); +} + +template ::value && + !is_fast_float::value)> +inline auto write(OutputIt out, T value) -> OutputIt { + return write(out, value, basic_format_specs()); +} + +template +auto write(OutputIt out, monostate, basic_format_specs = {}, + locale_ref = {}) -> OutputIt { + FMT_ASSERT(false, ""); + return out; +} + +template +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) + -> OutputIt { + auto it = reserve(out, value.size()); + it = copy_str_noinline(value.begin(), value.end(), it); + return base_iterator(out, it); +} + +template ::value)> +constexpr auto write(OutputIt out, const T& value) -> OutputIt { + return write(out, to_string_view(value)); +} + +template ::value && + !std::is_same::value && + !std::is_same::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { + auto abs_value = static_cast>(value); + bool negative = is_negative(value); + // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. + if (negative) abs_value = ~abs_value + 1; + int num_digits = count_digits(abs_value); + auto size = (negative ? 1 : 0) + static_cast(num_digits); + auto it = reserve(out, size); + if (auto ptr = to_pointer(it, size)) { + if (negative) *ptr++ = static_cast('-'); + format_decimal(ptr, abs_value, num_digits); + return out; + } + if (negative) *it++ = static_cast('-'); + it = format_decimal(it, abs_value, num_digits).end; + return base_iterator(out, it); +} + +// FMT_ENABLE_IF() condition separated to workaround MSVC bug +template < + typename Char, typename OutputIt, typename T, + bool check = + std::is_enum::value && !std::is_same::value && + mapped_type_constant>::value != + type::custom_type, + FMT_ENABLE_IF(check)> +FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { + return write( + out, static_cast::type>(value)); +} + +template ::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value, + const basic_format_specs& specs = {}, + locale_ref = {}) -> OutputIt { + return specs.type && specs.type != 's' + ? write(out, value ? 1 : 0, specs, {}) + : write_bytes(out, value ? "true" : "false", specs); +} + +template +FMT_CONSTEXPR auto write(OutputIt out, Char value) -> OutputIt { + auto it = reserve(out, 1); + *it++ = value; + return base_iterator(out, it); +} + +template +FMT_CONSTEXPR_CHAR_TRAITS auto write(OutputIt out, const Char* value) + -> OutputIt { + if (!value) { + FMT_THROW(format_error("string pointer is null")); + } else { + auto length = std::char_traits::length(value); + out = write(out, basic_string_view(value, length)); + } + return out; +} + +template ::value)> +auto write(OutputIt out, const T* value, + const basic_format_specs& specs = {}, locale_ref = {}) + -> OutputIt { + check_pointer_type_spec(specs.type, error_handler()); + return write_ptr(out, to_uintptr(value), &specs); +} + +template +FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> + typename std::enable_if< + mapped_type_constant>::value == + type::custom_type, + OutputIt>::type { + using context_type = basic_format_context; + using formatter_type = + conditional_t::value, + typename context_type::template formatter_type, + fallback_formatter>; + context_type ctx(out, {}, {}); + return formatter_type().format(value, ctx); +} + +// An argument visitor that formats the argument and writes it via the output +// iterator. It's a class and not a generic lambda for compatibility with C++11. +template struct default_arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + basic_format_args args; + locale_ref loc; + + template auto operator()(T value) -> iterator { + return write(out, value); + } + auto operator()(typename basic_format_arg::handle h) -> iterator { + basic_format_parse_context parse_ctx({}); + context format_ctx(out, args, loc); + h.format(parse_ctx, format_ctx); + return format_ctx.out(); + } +}; + +template struct arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + const basic_format_specs& specs; + locale_ref locale; + + template + FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { + return detail::write(out, value, specs, locale); + } + auto operator()(typename basic_format_arg::handle) -> iterator { + // User-defined types are handled separately because they require access + // to the parse context. + return out; + } +}; + +template struct custom_formatter { + basic_format_parse_context& parse_ctx; + buffer_context& ctx; + + void operator()( + typename basic_format_arg>::handle h) const { + h.format(parse_ctx, ctx); + } + template void operator()(T) const {} +}; + +template +using is_integer = + bool_constant::value && !std::is_same::value && + !std::is_same::value && + !std::is_same::value>; + +template class width_checker { + public: + explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {} + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) handler_.on_error("negative width"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("width is not integer"); + return 0; + } + + private: + ErrorHandler& handler_; +}; + +template class precision_checker { + public: + explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {} + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) handler_.on_error("negative precision"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("precision is not integer"); + return 0; + } + + private: + ErrorHandler& handler_; +}; + +template