2017-05-26 04:48:44 +08:00
|
|
|
/*
|
2019-02-18 07:41:16 +08:00
|
|
|
* NativeAPI.actor.h
|
2017-05-26 04:48:44 +08:00
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#pragma once
|
2020-07-08 00:06:13 +08:00
|
|
|
#include "flow/IRandom.h"
|
|
|
|
#include "flow/Tracing.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#if defined(NO_INTELLISENSE) && !defined(FDBCLIENT_NATIVEAPI_ACTOR_G_H)
|
2021-03-11 02:06:03 +08:00
|
|
|
#define FDBCLIENT_NATIVEAPI_ACTOR_G_H
|
|
|
|
#include "fdbclient/NativeAPI.actor.g.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#elif !defined(FDBCLIENT_NATIVEAPI_ACTOR_H)
|
2021-03-11 02:06:03 +08:00
|
|
|
#define FDBCLIENT_NATIVEAPI_ACTOR_H
|
2019-02-18 07:41:16 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "flow/flow.h"
|
|
|
|
#include "flow/TDMetric.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbclient/FDBTypes.h"
|
2020-09-11 08:44:15 +08:00
|
|
|
#include "fdbclient/CommitProxyInterface.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbclient/FDBOptions.g.h"
|
|
|
|
#include "fdbclient/CoordinationInterface.h"
|
|
|
|
#include "fdbclient/ClusterInterface.h"
|
|
|
|
#include "fdbclient/ClientLogEvents.h"
|
2020-04-05 04:26:11 +08:00
|
|
|
#include "fdbclient/KeyRangeMap.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "flow/actorcompiler.h" // has to be last include
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-06-18 01:27:52 +08:00
|
|
|
// CLIENT_BUGGIFY should be used to randomly introduce failures at run time (like BUGGIFY but for client side testing)
|
|
|
|
// Unlike BUGGIFY, CLIENT_BUGGIFY can be enabled and disabled at runtime.
|
2021-03-11 02:06:03 +08:00
|
|
|
#define CLIENT_BUGGIFY_WITH_PROB(x) \
|
|
|
|
(getSBVar(__FILE__, __LINE__, BuggifyType::Client) && deterministicRandom()->random01() < (x))
|
2019-04-04 08:37:14 +08:00
|
|
|
#define CLIENT_BUGGIFY CLIENT_BUGGIFY_WITH_PROB(P_BUGGIFIED_SECTION_FIRES[int(BuggifyType::Client)])
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
// Incomplete types that are reference counted
|
|
|
|
class DatabaseContext;
|
2021-03-11 02:06:03 +08:00
|
|
|
template <>
|
|
|
|
void addref(DatabaseContext* ptr);
|
|
|
|
template <>
|
|
|
|
void delref(DatabaseContext* ptr);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
void validateOptionValue(Optional<StringRef> value, bool shouldBePresent);
|
|
|
|
|
|
|
|
void enableClientInfoLogging();
|
|
|
|
|
|
|
|
struct NetworkOptions {
|
|
|
|
std::string localAddress;
|
|
|
|
std::string clusterFile;
|
|
|
|
Optional<std::string> traceDirectory;
|
|
|
|
uint64_t traceRollSize;
|
2020-02-04 03:11:31 +08:00
|
|
|
uint64_t traceMaxLogsSize;
|
2017-05-26 04:48:44 +08:00
|
|
|
std::string traceLogGroup;
|
2019-01-26 05:47:12 +08:00
|
|
|
std::string traceFormat;
|
2019-11-07 06:31:13 +08:00
|
|
|
std::string traceClockSource;
|
2020-03-27 02:24:53 +08:00
|
|
|
std::string traceFileIdentifier;
|
2017-05-26 04:48:44 +08:00
|
|
|
Optional<bool> logClientInfo;
|
2020-02-29 01:35:21 +08:00
|
|
|
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions;
|
2020-01-29 04:09:37 +08:00
|
|
|
bool runLoopProfilingEnabled;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-02-29 01:35:21 +08:00
|
|
|
NetworkOptions();
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class Database {
|
|
|
|
public:
|
2018-09-22 06:58:14 +08:00
|
|
|
enum { API_VERSION_LATEST = -1 };
|
|
|
|
|
2021-04-16 02:45:14 +08:00
|
|
|
// Creates a database object that represents a connection to a cluster
|
|
|
|
// This constructor uses a preallocated DatabaseContext that may have been created
|
|
|
|
// on another thread
|
2021-03-11 02:06:03 +08:00
|
|
|
static Database createDatabase(Reference<ClusterConnectionFile> connFile,
|
|
|
|
int apiVersion,
|
|
|
|
bool internal = true,
|
|
|
|
LocalityData const& clientLocality = LocalityData(),
|
|
|
|
DatabaseContext* preallocatedDb = nullptr);
|
2021-04-16 02:45:14 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
static Database createDatabase(std::string connFileName,
|
|
|
|
int apiVersion,
|
|
|
|
bool internal = true,
|
|
|
|
LocalityData const& clientLocality = LocalityData());
|
|
|
|
|
|
|
|
Database() {} // an uninitialized database can be destructed or reassigned safely; that's it
|
|
|
|
void operator=(Database const& rhs) { db = rhs.db; }
|
|
|
|
Database(Database const& rhs) : db(rhs.db) {}
|
2020-06-10 08:33:41 +08:00
|
|
|
Database(Database&& r) noexcept : db(std::move(r.db)) {}
|
|
|
|
void operator=(Database&& r) noexcept { db = std::move(r.db); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
// For internal use by the native client:
|
|
|
|
explicit Database(Reference<DatabaseContext> cx) : db(cx) {}
|
2021-03-11 02:06:03 +08:00
|
|
|
explicit Database(DatabaseContext* cx) : db(cx) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
inline DatabaseContext* getPtr() const { return db.getPtr(); }
|
2018-09-22 06:58:14 +08:00
|
|
|
inline DatabaseContext* extractPtr() { return db.extractPtr(); }
|
2017-05-26 04:48:44 +08:00
|
|
|
DatabaseContext* operator->() const { return db.getPtr(); }
|
|
|
|
|
2019-06-29 04:24:32 +08:00
|
|
|
const UniqueOrderedOptionList<FDBTransactionOptions>& getTransactionDefaults() const;
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
private:
|
|
|
|
Reference<DatabaseContext> db;
|
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
// Configures the global networking machinery
|
|
|
|
void setupNetwork(uint64_t transportId = 0, bool useMetrics = false);
|
|
|
|
|
|
|
|
// This call blocks while the network is running. To use the API in a single-threaded
|
|
|
|
// environment, the calling program must have ACTORs already launched that are waiting
|
|
|
|
// to use the network. In this case, the program can terminate by calling stopNetwork()
|
|
|
|
// from a callback, thereby releasing this call to return. In a multithreaded setup
|
|
|
|
// this call can be called from a dedicated "networking" thread. All the network-based
|
|
|
|
// callbacks will happen on this second thread. When a program is finished, the
|
|
|
|
// call stopNetwork (from a non-networking thread) can cause the runNetwork() call to
|
|
|
|
// return.
|
|
|
|
//
|
|
|
|
// Throws network_already_setup if g_network has already been initalized
|
|
|
|
void runNetwork();
|
|
|
|
|
|
|
|
// See above. Can be called from a thread that is not the "networking thread"
|
|
|
|
//
|
|
|
|
// Throws network_not_setup if g_network has not been initalized
|
|
|
|
void stopNetwork();
|
|
|
|
|
|
|
|
struct StorageMetrics;
|
|
|
|
|
|
|
|
struct TransactionOptions {
|
|
|
|
double maxBackoff;
|
Fix VersionStamp problems by instead adding a COMMIT_ON_FIRST_PROXY transaction option.
Simulation identified the fact that we can violate the
VersionStamps-are-always-increasing promise via the following series of events:
1. On proxy 0, dumpData adds commit requests to proxy 0's commit promise stream
2. To any proxy, a client submits the first transaction of abortBackup, which stops further dumpData calls on proxy 0.
3. To any proxy that is not proxy 0, submit a transaction that checks if it needs to upgrade the destination version.
4. The transaction from (3) is committed
5. Transactions from (1) are committed
This is possible because the dumpData transactions have no read conflict
ranges, and thus it's impossible to make them abort due to "conflicting"
transactions. There's also no promise that if client C sends a commit to proxy
A, and later a client D sends a commit to proxy B, that B must log its commit
after A. (We only promise that if C is told it was committed before D is told
it was committed, then A committed before B.)
There was a failed attempt to fix this problem. We tried to add read conflict
ranges to dumpData transactions so that they could be aborted by "conflicting"
transactions. However, this failed because this now means that dumpData
transactions require conflict resolution, and the stale read version that they
use can cause them to be aborted with a transaction_too_old error.
(Transactions that don't have read conflict ranges will never return
transaction_too_old, because with no reads, the read snapshot version is
effectively meaningless.) This was never previously possible, so the existing
code doesn't retry commits, and to make things more complicated, the dumpData
commits must be applied in order. This would require either adding
dependencies to transactions (if A is going to commit then B must also be/have
committed), which would be complicated, or submitting transactions with a fixed
read version, and replaying the failed commits with a higher read version once
we get a transaction_too_old error, which would unacceptably slow down the
maximum throughput of dumpData.
Thus, we've instead elected to add a special transaction option that bypasses
proxy load balancing for commits, and always commits against proxy 0. We can
know for certain that after the transaction from (2) is committed, all of the
dumpData transactions that will be committed have been added to the commit
promise stream on proxy 0. Thus, if we enqueue another transaction against
proxy 0, we can know that it will be placed into the promise stream after all
of the dumpData transactions, thus providing the semantics that we require: no
dumpData transaction can commit after the destination version upgrade
transaction.
2017-12-20 08:44:07 +08:00
|
|
|
uint32_t getReadVersionFlags;
|
2019-06-21 05:06:32 +08:00
|
|
|
uint32_t sizeLimit;
|
2019-07-13 07:12:35 +08:00
|
|
|
int maxTransactionLoggingFieldLength;
|
2017-05-26 04:48:44 +08:00
|
|
|
bool checkWritesEnabled : 1;
|
|
|
|
bool causalWriteRisky : 1;
|
Fix VersionStamp problems by instead adding a COMMIT_ON_FIRST_PROXY transaction option.
Simulation identified the fact that we can violate the
VersionStamps-are-always-increasing promise via the following series of events:
1. On proxy 0, dumpData adds commit requests to proxy 0's commit promise stream
2. To any proxy, a client submits the first transaction of abortBackup, which stops further dumpData calls on proxy 0.
3. To any proxy that is not proxy 0, submit a transaction that checks if it needs to upgrade the destination version.
4. The transaction from (3) is committed
5. Transactions from (1) are committed
This is possible because the dumpData transactions have no read conflict
ranges, and thus it's impossible to make them abort due to "conflicting"
transactions. There's also no promise that if client C sends a commit to proxy
A, and later a client D sends a commit to proxy B, that B must log its commit
after A. (We only promise that if C is told it was committed before D is told
it was committed, then A committed before B.)
There was a failed attempt to fix this problem. We tried to add read conflict
ranges to dumpData transactions so that they could be aborted by "conflicting"
transactions. However, this failed because this now means that dumpData
transactions require conflict resolution, and the stale read version that they
use can cause them to be aborted with a transaction_too_old error.
(Transactions that don't have read conflict ranges will never return
transaction_too_old, because with no reads, the read snapshot version is
effectively meaningless.) This was never previously possible, so the existing
code doesn't retry commits, and to make things more complicated, the dumpData
commits must be applied in order. This would require either adding
dependencies to transactions (if A is going to commit then B must also be/have
committed), which would be complicated, or submitting transactions with a fixed
read version, and replaying the failed commits with a higher read version once
we get a transaction_too_old error, which would unacceptably slow down the
maximum throughput of dumpData.
Thus, we've instead elected to add a special transaction option that bypasses
proxy load balancing for commits, and always commits against proxy 0. We can
know for certain that after the transaction from (2) is committed, all of the
dumpData transactions that will be committed have been added to the commit
promise stream on proxy 0. Thus, if we enqueue another transaction against
proxy 0, we can know that it will be placed into the promise stream after all
of the dumpData transactions, thus providing the semantics that we require: no
dumpData transaction can commit after the destination version upgrade
transaction.
2017-12-20 08:44:07 +08:00
|
|
|
bool commitOnFirstProxy : 1;
|
2017-05-26 04:48:44 +08:00
|
|
|
bool debugDump : 1;
|
|
|
|
bool lockAware : 1;
|
2017-09-29 07:35:08 +08:00
|
|
|
bool readOnly : 1;
|
2018-02-10 10:21:29 +08:00
|
|
|
bool firstInBatch : 1;
|
2019-09-06 05:58:39 +08:00
|
|
|
bool includePort : 1;
|
2020-03-25 00:48:03 +08:00
|
|
|
bool reportConflictingKeys : 1;
|
2020-07-15 07:07:21 +08:00
|
|
|
bool expensiveClearCostEstimation : 1;
|
Fix VersionStamp problems by instead adding a COMMIT_ON_FIRST_PROXY transaction option.
Simulation identified the fact that we can violate the
VersionStamps-are-always-increasing promise via the following series of events:
1. On proxy 0, dumpData adds commit requests to proxy 0's commit promise stream
2. To any proxy, a client submits the first transaction of abortBackup, which stops further dumpData calls on proxy 0.
3. To any proxy that is not proxy 0, submit a transaction that checks if it needs to upgrade the destination version.
4. The transaction from (3) is committed
5. Transactions from (1) are committed
This is possible because the dumpData transactions have no read conflict
ranges, and thus it's impossible to make them abort due to "conflicting"
transactions. There's also no promise that if client C sends a commit to proxy
A, and later a client D sends a commit to proxy B, that B must log its commit
after A. (We only promise that if C is told it was committed before D is told
it was committed, then A committed before B.)
There was a failed attempt to fix this problem. We tried to add read conflict
ranges to dumpData transactions so that they could be aborted by "conflicting"
transactions. However, this failed because this now means that dumpData
transactions require conflict resolution, and the stale read version that they
use can cause them to be aborted with a transaction_too_old error.
(Transactions that don't have read conflict ranges will never return
transaction_too_old, because with no reads, the read snapshot version is
effectively meaningless.) This was never previously possible, so the existing
code doesn't retry commits, and to make things more complicated, the dumpData
commits must be applied in order. This would require either adding
dependencies to transactions (if A is going to commit then B must also be/have
committed), which would be complicated, or submitting transactions with a fixed
read version, and replaying the failed commits with a higher read version once
we get a transaction_too_old error, which would unacceptably slow down the
maximum throughput of dumpData.
Thus, we've instead elected to add a special transaction option that bypasses
proxy load balancing for commits, and always commits against proxy 0. We can
know for certain that after the transaction from (2) is committed, all of the
dumpData transactions that will be committed have been added to the commit
promise stream on proxy 0. Thus, if we enqueue another transaction against
proxy 0, we can know that it will be placed into the promise stream after all
of the dumpData transactions, thus providing the semantics that we require: no
dumpData transaction can commit after the destination version upgrade
transaction.
2017-12-20 08:44:07 +08:00
|
|
|
|
2020-04-25 02:31:16 +08:00
|
|
|
TransactionPriority priority;
|
|
|
|
|
2020-04-10 07:55:56 +08:00
|
|
|
TagSet tags; // All tags set on transaction
|
|
|
|
TagSet readTags; // Tags that can be sent with read requests
|
2020-03-21 02:23:11 +08:00
|
|
|
|
2020-06-16 03:12:08 +08:00
|
|
|
// update clear function if you add a new field
|
|
|
|
|
2019-03-22 06:48:40 +08:00
|
|
|
TransactionOptions(Database const& cx);
|
|
|
|
TransactionOptions();
|
2019-03-20 00:15:41 +08:00
|
|
|
|
|
|
|
void reset(Database const& cx);
|
2020-06-14 08:14:43 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void clear();
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
2020-03-25 00:48:03 +08:00
|
|
|
class ReadYourWritesTransaction; // workaround cyclic dependency
|
2017-05-26 04:48:44 +08:00
|
|
|
struct TransactionInfo {
|
|
|
|
Optional<UID> debugID;
|
2019-06-25 17:47:35 +08:00
|
|
|
TaskPriority taskID;
|
2020-07-10 01:49:33 +08:00
|
|
|
SpanID spanID;
|
2019-03-20 09:44:37 +08:00
|
|
|
bool useProvisionalProxies;
|
2020-03-25 00:48:03 +08:00
|
|
|
// Used to save conflicting keys if FDBTransactionOptions::REPORT_CONFLICTING_KEYS is enabled
|
2020-04-05 04:26:11 +08:00
|
|
|
// prefix/<key1> : '1' - any keys equal or larger than this key are (probably) conflicting keys
|
|
|
|
// prefix/<key2> : '0' - any keys equal or larger than this key are (definitely) not conflicting keys
|
|
|
|
std::shared_ptr<CoalescedKeyRangeMap<Value>> conflictingKeys;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-07-10 01:49:33 +08:00
|
|
|
explicit TransactionInfo(TaskPriority taskID, SpanID spanID)
|
|
|
|
: taskID(taskID), spanID(spanID), useProvisionalProxies(false) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct TransactionLogInfo : public ReferenceCounted<TransactionLogInfo>, NonCopyable {
|
2019-02-28 03:41:33 +08:00
|
|
|
enum LoggingLocation { DONT_LOG = 0, TRACE_LOG = 1, DATABASE = 2 };
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-07-13 07:12:35 +08:00
|
|
|
TransactionLogInfo() : logLocation(DONT_LOG), maxFieldLength(0) {}
|
|
|
|
TransactionLogInfo(LoggingLocation location) : logLocation(location), maxFieldLength(0) {}
|
2021-03-11 02:06:03 +08:00
|
|
|
TransactionLogInfo(std::string id, LoggingLocation location)
|
|
|
|
: logLocation(location), identifier(id), maxFieldLength(0) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-13 05:54:24 +08:00
|
|
|
void setIdentifier(std::string id) { identifier = id; }
|
|
|
|
void logTo(LoggingLocation loc) { logLocation = logLocation | loc; }
|
2019-07-13 07:12:35 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
template <typename T>
|
|
|
|
void addLog(const T& event) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (logLocation & TRACE_LOG) {
|
2020-06-24 08:06:29 +08:00
|
|
|
ASSERT(!identifier.empty());
|
2019-07-13 07:12:35 +08:00
|
|
|
event.logEvent(identifier, maxFieldLength);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flushed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (logLocation & DATABASE) {
|
2017-05-26 04:48:44 +08:00
|
|
|
logsAdded = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
static_assert(std::is_base_of<FdbClientLogEvents::Event, T>::value,
|
|
|
|
"Event should be derived class of FdbClientLogEvents::Event");
|
2017-05-26 04:48:44 +08:00
|
|
|
trLogWriter << event;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 05:54:24 +08:00
|
|
|
BinaryWriter trLogWriter{ IncludeVersion() };
|
|
|
|
bool logsAdded{ false };
|
|
|
|
bool flushed{ false };
|
|
|
|
int logLocation;
|
2019-07-13 07:12:35 +08:00
|
|
|
int maxFieldLength;
|
2019-02-13 05:54:24 +08:00
|
|
|
std::string identifier;
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct Watch : public ReferenceCounted<Watch>, NonCopyable {
|
|
|
|
Key key;
|
|
|
|
Optional<Value> value;
|
|
|
|
bool valuePresent;
|
|
|
|
Optional<Value> setValue;
|
|
|
|
bool setPresent;
|
|
|
|
Promise<Void> onChangeTrigger;
|
|
|
|
Promise<Void> onSetWatchTrigger;
|
|
|
|
Future<Void> watchFuture;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Watch() : watchFuture(Never()), valuePresent(false), setPresent(false) {}
|
|
|
|
Watch(Key key) : key(key), watchFuture(Never()), valuePresent(false), setPresent(false) {}
|
|
|
|
Watch(Key key, Optional<Value> val)
|
|
|
|
: key(key), value(val), watchFuture(Never()), valuePresent(true), setPresent(false) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
void setWatch(Future<Void> watchFuture);
|
|
|
|
};
|
|
|
|
|
|
|
|
class Transaction : NonCopyable {
|
|
|
|
public:
|
2021-03-11 02:06:03 +08:00
|
|
|
explicit Transaction(Database const& cx);
|
2017-05-26 04:48:44 +08:00
|
|
|
~Transaction();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void preinitializeOnForeignThread() { committedVersion = invalidVersion; }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void setVersion(Version v);
|
2017-05-26 04:48:44 +08:00
|
|
|
Future<Version> getReadVersion() { return getReadVersion(0); }
|
2020-01-11 04:23:59 +08:00
|
|
|
Future<Version> getRawReadVersion();
|
2020-01-16 11:15:35 +08:00
|
|
|
Optional<Version> getCachedReadVersion();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-08-10 03:55:21 +08:00
|
|
|
[[nodiscard]] Future<Optional<Value>> get(const Key& key, bool snapshot = false);
|
|
|
|
[[nodiscard]] Future<Void> watch(Reference<Watch> watch);
|
|
|
|
[[nodiscard]] Future<Key> getKey(const KeySelector& key, bool snapshot = false);
|
2021-03-11 02:06:03 +08:00
|
|
|
// Future< Optional<KeyValue> > get( const KeySelectorRef& key );
|
|
|
|
[[nodiscard]] Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
|
|
|
const KeySelector& end,
|
|
|
|
int limit,
|
|
|
|
bool snapshot = false,
|
|
|
|
bool reverse = false);
|
|
|
|
[[nodiscard]] Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
|
|
|
const KeySelector& end,
|
|
|
|
GetRangeLimits limits,
|
|
|
|
bool snapshot = false,
|
2019-08-10 03:55:21 +08:00
|
|
|
bool reverse = false);
|
2021-03-11 02:06:03 +08:00
|
|
|
[[nodiscard]] Future<Standalone<RangeResultRef>> getRange(const KeyRange& keys,
|
|
|
|
int limit,
|
|
|
|
bool snapshot = false,
|
2019-08-10 03:55:21 +08:00
|
|
|
bool reverse = false) {
|
|
|
|
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
2021-03-11 02:06:03 +08:00
|
|
|
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
|
|
|
limit,
|
|
|
|
snapshot,
|
|
|
|
reverse);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
[[nodiscard]] Future<Standalone<RangeResultRef>> getRange(const KeyRange& keys,
|
|
|
|
GetRangeLimits limits,
|
|
|
|
bool snapshot = false,
|
|
|
|
bool reverse = false) {
|
2019-08-10 03:55:21 +08:00
|
|
|
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
2021-03-11 02:06:03 +08:00
|
|
|
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
|
|
|
limits,
|
|
|
|
snapshot,
|
|
|
|
reverse);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2019-08-14 05:13:55 +08:00
|
|
|
[[nodiscard]] Future<Standalone<VectorRef<const char*>>> getAddressesForKey(const Key& key);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
void enableCheckWrites();
|
2021-03-11 02:06:03 +08:00
|
|
|
void addReadConflictRange(KeyRangeRef const& keys);
|
|
|
|
void addWriteConflictRange(KeyRangeRef const& keys);
|
2017-05-26 04:48:44 +08:00
|
|
|
void makeSelfConflicting();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<Void> warmRange(Database cx, KeyRange keys);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<std::pair<Optional<StorageMetrics>, int>> waitStorageMetrics(KeyRange const& keys,
|
|
|
|
StorageMetrics const& min,
|
|
|
|
StorageMetrics const& max,
|
|
|
|
StorageMetrics const& permittedError,
|
|
|
|
int shardLimit,
|
|
|
|
int expectedShardCount);
|
2020-02-05 01:51:17 +08:00
|
|
|
// Pass a negative value for `shardLimit` to indicate no limit on the shard number.
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<StorageMetrics> getStorageMetrics(KeyRange const& keys, int shardLimit);
|
|
|
|
Future<Standalone<VectorRef<KeyRef>>> splitStorageMetrics(KeyRange const& keys,
|
|
|
|
StorageMetrics const& limit,
|
|
|
|
StorageMetrics const& estimated);
|
2020-08-13 05:27:34 +08:00
|
|
|
Future<Standalone<VectorRef<ReadHotRangeWithMetrics>>> getReadHotRanges(KeyRange const& keys);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-06-19 00:37:49 +08:00
|
|
|
// Try to split the given range into equally sized chunks based on estimated size.
|
2020-06-27 02:40:20 +08:00
|
|
|
// The returned list would still be in form of [keys.begin, splitPoint1, splitPoint2, ... , keys.end]
|
2020-06-19 00:37:49 +08:00
|
|
|
Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(KeyRange const& keys, int64_t chunkSize);
|
2017-05-26 04:48:44 +08:00
|
|
|
// If checkWriteConflictRanges is true, existing write conflict ranges will be searched for this key
|
2021-03-11 02:06:03 +08:00
|
|
|
void set(const KeyRef& key, const ValueRef& value, bool addConflictRange = true);
|
|
|
|
void atomicOp(const KeyRef& key,
|
|
|
|
const ValueRef& value,
|
|
|
|
MutationRef::Type operationType,
|
|
|
|
bool addConflictRange = true);
|
|
|
|
void clear(const KeyRangeRef& range, bool addConflictRange = true);
|
|
|
|
void clear(const KeyRef& key, bool addConflictRange = true);
|
2019-08-10 03:55:21 +08:00
|
|
|
[[nodiscard]] Future<Void> commit(); // Throws not_committed or commit_unknown_result errors in normal operation
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Version getCommittedVersion() { return committedVersion; } // May be called only after commit() returns success
|
2019-08-14 05:13:55 +08:00
|
|
|
[[nodiscard]] Future<Standalone<StringRef>>
|
|
|
|
getVersionstamp(); // Will be fulfilled only after commit() returns success
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-10-20 00:57:11 +08:00
|
|
|
Future<uint64_t> getProtocolVersion();
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
Promise<Standalone<StringRef>> versionstampPromise;
|
|
|
|
|
2019-06-29 01:15:37 +08:00
|
|
|
uint32_t getSize();
|
2019-08-10 03:55:21 +08:00
|
|
|
[[nodiscard]] Future<Void> onError(Error const& e);
|
2017-05-26 04:48:44 +08:00
|
|
|
void flushTrLogsIfEnabled();
|
|
|
|
|
|
|
|
// These are to permit use as state variables in actors:
|
2020-12-04 06:06:11 +08:00
|
|
|
Transaction();
|
2020-06-10 08:33:41 +08:00
|
|
|
void operator=(Transaction&& r) noexcept;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
void reset();
|
|
|
|
void fullReset();
|
2018-06-02 06:21:40 +08:00
|
|
|
double getBackoff(int errCode);
|
2017-05-26 04:48:44 +08:00
|
|
|
void debugTransaction(UID dID) { info.debugID = dID; }
|
|
|
|
|
|
|
|
Future<Void> commitMutations();
|
|
|
|
void setupWatches();
|
|
|
|
void cancelWatches(Error const& e = transaction_cancelled());
|
|
|
|
|
|
|
|
TransactionInfo info;
|
|
|
|
int numErrors;
|
|
|
|
|
|
|
|
std::vector<Reference<Watch>> watches;
|
|
|
|
|
|
|
|
int apiVersionAtLeast(int minVersion) const;
|
|
|
|
|
|
|
|
void checkDeferredError();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Database getDatabase() const { return cx; }
|
2017-05-26 04:48:44 +08:00
|
|
|
static Reference<TransactionLogInfo> createTrLogInfoProbabilistically(const Database& cx);
|
|
|
|
TransactionOptions options;
|
2020-07-10 01:49:33 +08:00
|
|
|
Span span;
|
2017-05-26 04:48:44 +08:00
|
|
|
double startTime;
|
|
|
|
Reference<TransactionLogInfo> trLogInfo;
|
2020-04-30 05:09:00 +08:00
|
|
|
|
2020-12-04 06:06:11 +08:00
|
|
|
void setTransactionID(uint64_t id);
|
|
|
|
void setToken(uint64_t token);
|
|
|
|
|
2020-04-30 05:09:00 +08:00
|
|
|
const vector<Future<std::pair<Key, Key>>>& getExtraReadConflictRanges() const { return extraConflictRanges; }
|
2020-05-05 00:43:20 +08:00
|
|
|
Standalone<VectorRef<KeyRangeRef>> readConflictRanges() const {
|
|
|
|
return Standalone<VectorRef<KeyRangeRef>>(tr.transaction.read_conflict_ranges, tr.arena);
|
|
|
|
}
|
|
|
|
Standalone<VectorRef<KeyRangeRef>> writeConflictRanges() const {
|
|
|
|
return Standalone<VectorRef<KeyRangeRef>>(tr.transaction.write_conflict_ranges, tr.arena);
|
|
|
|
}
|
2020-04-30 05:09:00 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
private:
|
|
|
|
Future<Version> getReadVersion(uint32_t flags);
|
|
|
|
Database cx;
|
|
|
|
|
|
|
|
double backoff;
|
|
|
|
Version committedVersion;
|
|
|
|
CommitTransactionRequest tr;
|
|
|
|
Future<Version> readVersion;
|
2019-03-01 09:45:00 +08:00
|
|
|
Promise<Optional<Value>> metadataVersion;
|
2017-10-21 00:17:47 +08:00
|
|
|
vector<Future<std::pair<Key, Key>>> extraConflictRanges;
|
2017-05-26 04:48:44 +08:00
|
|
|
Promise<Void> commitResult;
|
|
|
|
Future<Void> committing;
|
|
|
|
};
|
|
|
|
|
2020-07-08 00:06:13 +08:00
|
|
|
ACTOR Future<Version> waitForCommittedVersion(Database cx, Version version, SpanID spanContext);
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsList(Database cx,
|
|
|
|
KeyRange keys,
|
|
|
|
int shardLimit);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::string unprintable(const std::string&);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
int64_t extractIntOption(Optional<StringRef> value,
|
|
|
|
int64_t minValue = std::numeric_limits<int64_t>::min(),
|
|
|
|
int64_t maxValue = std::numeric_limits<int64_t>::max());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-28 07:40:33 +08:00
|
|
|
// Takes a snapshot of the cluster, specifically the following persistent
|
|
|
|
// states: coordinator, TLog and storage state
|
2019-08-29 01:52:56 +08:00
|
|
|
ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID);
|
2019-02-28 07:40:33 +08:00
|
|
|
|
2019-08-09 07:30:05 +08:00
|
|
|
// Checks with Data Distributor that it is safe to mark all servers in exclusions as failed
|
2019-08-17 06:13:53 +08:00
|
|
|
ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exclusions);
|
2019-08-09 07:30:05 +08:00
|
|
|
|
2020-08-18 04:01:19 +08:00
|
|
|
inline uint64_t getWriteOperationCost(uint64_t bytes) {
|
|
|
|
return bytes / std::max(1, CLIENT_KNOBS->WRITE_COST_BYTE_FACTOR) + 1;
|
2020-08-11 06:29:59 +08:00
|
|
|
}
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "flow/unactorcompiler.h"
|
Fix VersionStamp problems by instead adding a COMMIT_ON_FIRST_PROXY transaction option.
Simulation identified the fact that we can violate the
VersionStamps-are-always-increasing promise via the following series of events:
1. On proxy 0, dumpData adds commit requests to proxy 0's commit promise stream
2. To any proxy, a client submits the first transaction of abortBackup, which stops further dumpData calls on proxy 0.
3. To any proxy that is not proxy 0, submit a transaction that checks if it needs to upgrade the destination version.
4. The transaction from (3) is committed
5. Transactions from (1) are committed
This is possible because the dumpData transactions have no read conflict
ranges, and thus it's impossible to make them abort due to "conflicting"
transactions. There's also no promise that if client C sends a commit to proxy
A, and later a client D sends a commit to proxy B, that B must log its commit
after A. (We only promise that if C is told it was committed before D is told
it was committed, then A committed before B.)
There was a failed attempt to fix this problem. We tried to add read conflict
ranges to dumpData transactions so that they could be aborted by "conflicting"
transactions. However, this failed because this now means that dumpData
transactions require conflict resolution, and the stale read version that they
use can cause them to be aborted with a transaction_too_old error.
(Transactions that don't have read conflict ranges will never return
transaction_too_old, because with no reads, the read snapshot version is
effectively meaningless.) This was never previously possible, so the existing
code doesn't retry commits, and to make things more complicated, the dumpData
commits must be applied in order. This would require either adding
dependencies to transactions (if A is going to commit then B must also be/have
committed), which would be complicated, or submitting transactions with a fixed
read version, and replaying the failed commits with a higher read version once
we get a transaction_too_old error, which would unacceptably slow down the
maximum throughput of dumpData.
Thus, we've instead elected to add a special transaction option that bypasses
proxy load balancing for commits, and always commits against proxy 0. We can
know for certain that after the transaction from (2) is committed, all of the
dumpData transactions that will be committed have been added to the commit
promise stream on proxy 0. Thus, if we enqueue another transaction against
proxy 0, we can know that it will be placed into the promise stream after all
of the dumpData transactions, thus providing the semantics that we require: no
dumpData transaction can commit after the destination version upgrade
transaction.
2017-12-20 08:44:07 +08:00
|
|
|
#endif
|