Merge branch 'master' of github.com:apple/foundationdb into redwood-io-priority
# Conflicts: # fdbserver/IPager.h # fdbserver/VersionedBTree.actor.cpp
This commit is contained in:
commit
09a8561606
|
@ -1,4 +1,5 @@
|
|||
# Build artifacts
|
||||
/my_build/
|
||||
/bin/
|
||||
/lib/
|
||||
/packages/
|
||||
|
|
|
@ -16,7 +16,12 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
if(WIN32)
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
else()
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
endif()
|
||||
|
||||
project(foundationdb
|
||||
VERSION 7.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
|
@ -196,9 +201,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/BuildFlags.h.in ${CMAKE_CUR
|
|||
if (CMAKE_EXPORT_COMPILE_COMMANDS AND WITH_PYTHON)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py
|
||||
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
|
|
|
@ -308,7 +308,7 @@ if(NOT OPEN_FOR_IDE)
|
|||
if(RUN_JUNIT_TESTS)
|
||||
# Sets up the JUnit testing structure to run through ctest
|
||||
#
|
||||
# To add a new junit test, add the class to the JAVA_JUNIT_TESTS variable in `src/tests.cmake`. Note that if you run a Suite,
|
||||
# To add a new junit test, add the class to the JAVA_JUNIT_TESTS variable in `src/tests.cmake`. Note that if you run a Suite,
|
||||
# ctest will NOT display underlying details of the suite itself, so it's best to avoid junit suites in general. Also,
|
||||
# if you need a different runner other than JUnitCore, you'll have to modify this so be aware.
|
||||
#
|
||||
|
@ -316,8 +316,8 @@ if(NOT OPEN_FOR_IDE)
|
|||
#
|
||||
# ctest .
|
||||
#
|
||||
# from the ${BUILD_DIR}/bindings/java subdirectory.
|
||||
#
|
||||
# from the ${BUILD_DIR}/bindings/java subdirectory.
|
||||
#
|
||||
# Note: if you are running from ${BUILD_DIR}, additional tests of the native logic will be run. To avoid these, use
|
||||
#
|
||||
# ctest . -R java-unit
|
||||
|
@ -325,15 +325,15 @@ if(NOT OPEN_FOR_IDE)
|
|||
# ctest has lots of flexible command options, so be sure to refer to its documentation if you want to do something specific(documentation
|
||||
# can be found at https://cmake.org/cmake/help/v3.19/manual/ctest.1.html)
|
||||
|
||||
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
|
||||
)
|
||||
get_property(junit_jar_path TARGET fdb-junit PROPERTY JAR_FILE)
|
||||
|
||||
|
||||
add_test(NAME java-unit
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${junit_jar_path}:${JUNIT_CLASSPATH}"
|
||||
|
@ -346,12 +346,12 @@ if(NOT OPEN_FOR_IDE)
|
|||
if(RUN_JAVA_INTEGRATION_TESTS)
|
||||
# Set up the integration tests. These tests generally require a running database server to function properly. Most tests
|
||||
# should be written such that they can be run in parallel with other integration tests (e.g. try to use a unique key range for each test
|
||||
# whenever possible), because it's a reasonable assumption that a single server will be shared among multiple tests, and might do so
|
||||
# whenever possible), because it's a reasonable assumption that a single server will be shared among multiple tests, and might do so
|
||||
# concurrently.
|
||||
#
|
||||
# Integration tests are run through ctest the same way as unit tests, but their label is prefixed with the entry 'integration-'.
|
||||
# Note that most java integration tests will fail if they can't quickly connect to a running FDB instance(depending on how the test is written, anyway).
|
||||
# However, if you want to explicitly skip them, you can run
|
||||
# Note that most java integration tests will fail if they can't quickly connect to a running FDB instance(depending on how the test is written, anyway).
|
||||
# However, if you want to explicitly skip them, you can run
|
||||
#
|
||||
# `ctest -E integration`
|
||||
#
|
||||
|
@ -368,8 +368,8 @@ if(NOT OPEN_FOR_IDE)
|
|||
# empty, consider generating a random prefix for the keys you write, use
|
||||
# the directory layer with a unique path, etc.)
|
||||
#
|
||||
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
|
@ -382,7 +382,14 @@ if(NOT OPEN_FOR_IDE)
|
|||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner"
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-T MultiClient"
|
||||
)
|
||||
|
||||
add_multi_fdbclient_test(NAME java-multi-integration
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-t MultiClient"
|
||||
)
|
||||
|
||||
endif()
|
||||
|
|
|
@ -22,4 +22,19 @@ To skip integration tests, execute `ctest -E integration` from `${BUILD_DIR}/bin
|
|||
To run _only_ integration tests, run `ctest -R integration` from `${BUILD_DIR}/bindings/java`.
|
||||
|
||||
There are lots of other useful `ctest` commands, which we don't need to get into here. For more information,
|
||||
see the [https://cmake.org/cmake/help/v3.19/manual/ctest.1.html](ctest documentation).
|
||||
see the [https://cmake.org/cmake/help/v3.19/manual/ctest.1.html](ctest documentation).
|
||||
|
||||
### Multi-Client tests
|
||||
Multi-Client tests are integration tests that can only be executed when multiple clusters are running. To write a multi-client
|
||||
test, do the following:
|
||||
|
||||
1. Tag all tests that require multiple clients with `@Tag("MultiClient")`
|
||||
2. Ensure that your tests have the `MultiClientHelper` extension present, and Registered as an extension
|
||||
3. Ensure that your test class is in the the JAVA_INTEGRATION_TESTS list in `test.cmake`
|
||||
|
||||
( see `BasicMultiClientIntegrationTest` for a good reference example)
|
||||
|
||||
It is important to note that it requires significant time to start and stop 3 separate clusters; if the underying test takes a long time to run,
|
||||
ctest will time out and kill the test. When that happens, there is no guarantee that the FDB clusters will be properly stopped! It is thus
|
||||
in your best interest to ensure that all tests run in a relatively small amount of time, or have a longer timeout attached.
|
||||
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* BasicMultiClientIntegrationTest
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Random;
|
||||
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Tag;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
/**
|
||||
* Simple class to test multi-client logic.
|
||||
*
|
||||
* Note that all Multi-client-only tests _must_ be tagged with "MultiClient", which will ensure that they are excluded
|
||||
* from non-multi-threaded tests.
|
||||
*/
|
||||
public class BasicMultiClientIntegrationTest {
|
||||
@RegisterExtension public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
@Test
|
||||
@Tag("MultiClient")
|
||||
void testMultiClientWritesAndReadsData() throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(630);
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
System.out.print("Starting tests.");
|
||||
Random rand = new Random();
|
||||
for (int counter = 0; counter < 25; ++counter) {
|
||||
for (Database db : dbs) {
|
||||
String key = Integer.toString(rand.nextInt(100000000));
|
||||
String val = Integer.toString(rand.nextInt(100000000));
|
||||
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(val).pack());
|
||||
return null;
|
||||
});
|
||||
|
||||
String fetchedVal = db.run(tr -> {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
return Tuple.fromBytes(result).getString(0);
|
||||
});
|
||||
Assertions.assertEquals(val, fetchedVal, "Wrong result!");
|
||||
}
|
||||
Thread.sleep(200);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* CycleMultiClientIntegrationTest
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* Setup: Generating a cycle 0 -> 1 -> 2 -> 3 -> 0, its length is 4
|
||||
* Process: randomly choose an element, reverse 2nd and 4rd element, considering the chosen one as the 1st element.
|
||||
* Check: verify no element is lost or added, and they are still a cycle.
|
||||
*
|
||||
* This test is to verify the atomicity of transactions.
|
||||
*/
|
||||
public class CycleMultiClientIntegrationTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
// more write txn than validate txn, as parent thread waits only for validate txn.
|
||||
private static final int writeTxnCnt = 2000;
|
||||
private static final int validateTxnCnt = 1000;
|
||||
private static final int threadPerDB = 5;
|
||||
|
||||
private static final int cycleLength = 4;
|
||||
private static List<String> expected = new ArrayList<>(Arrays.asList("0", "1", "2", "3"));
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
setupThreads(fdb);
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
System.out.println("Starting tests");
|
||||
setup(dbs);
|
||||
System.out.println("Start processing and validating");
|
||||
process(dbs);
|
||||
check(dbs);
|
||||
System.out.println("Test finished");
|
||||
}
|
||||
|
||||
private static synchronized void setupThreads(FDB fdb) {
|
||||
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
|
||||
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
|
||||
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
|
||||
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
|
||||
fdb.options().setTraceEnable("/tmp");
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
}
|
||||
|
||||
private static void setup(Collection<Database> dbs) {
|
||||
// 0 -> 1 -> 2 -> 3 -> 0
|
||||
for (Database db : dbs) {
|
||||
db.run(tr -> {
|
||||
for (int k = 0; k < cycleLength; k++) {
|
||||
String key = Integer.toString(k);
|
||||
String value = Integer.toString((k + 1) % cycleLength);
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(value).pack());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static void process(Collection<Database> dbs) {
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Thread thread = new Thread(CycleWorkload.create(db));
|
||||
thread.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void check(Collection<Database> dbs) throws InterruptedException {
|
||||
final Map<Thread, CycleChecker> threadsToCheckers = new HashMap<>();
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final CycleChecker checker = new CycleChecker(db);
|
||||
final Thread thread = new Thread(checker);
|
||||
thread.start();
|
||||
threadsToCheckers.put(thread, checker);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, CycleChecker> entry : threadsToCheckers.entrySet()) {
|
||||
entry.getKey().join();
|
||||
final boolean succeed = entry.getValue().succeed();
|
||||
Assertions.assertTrue(succeed, "Cycle test failed");
|
||||
}
|
||||
}
|
||||
|
||||
public static class CycleWorkload implements Runnable {
|
||||
|
||||
private final Database db;
|
||||
|
||||
private CycleWorkload(Database db) {
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
public static CycleWorkload create(Database db) {
|
||||
return new CycleWorkload(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < writeTxnCnt; i++) {
|
||||
db.run(tr -> {
|
||||
final int k = ThreadLocalRandom.current().nextInt(cycleLength);
|
||||
final String key = Integer.toString(k);
|
||||
byte[] result1 = tr.get(Tuple.from(key).pack()).join();
|
||||
String value1 = Tuple.fromBytes(result1).getString(0);
|
||||
|
||||
byte[] result2 = tr.get(Tuple.from(value1).pack()).join();
|
||||
String value2 = Tuple.fromBytes(result2).getString(0);
|
||||
|
||||
byte[] result3 = tr.get(Tuple.from(value2).pack()).join();
|
||||
String value3 = Tuple.fromBytes(result3).getString(0);
|
||||
|
||||
byte[] result4 = tr.get(Tuple.from(value3).pack()).join();
|
||||
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(value2).pack());
|
||||
tr.set(Tuple.from(value2).pack(), Tuple.from(value1).pack());
|
||||
tr.set(Tuple.from(value1).pack(), Tuple.from(value3).pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class CycleChecker implements Runnable {
|
||||
private final Database db;
|
||||
private boolean succeed;
|
||||
|
||||
public CycleChecker(Database db) {
|
||||
this.db = db;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static CycleChecker create(Database db) {
|
||||
return new CycleChecker(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < validateTxnCnt; i++) {
|
||||
db.run(tr -> {
|
||||
final int k = ThreadLocalRandom.current().nextInt(cycleLength);
|
||||
final String key = Integer.toString(k);
|
||||
byte[] result1 = tr.get(Tuple.from(key).pack()).join();
|
||||
String value1 = Tuple.fromBytes(result1).getString(0);
|
||||
|
||||
byte[] result2 = tr.get(Tuple.from(value1).pack()).join();
|
||||
String value2 = Tuple.fromBytes(result2).getString(0);
|
||||
|
||||
byte[] result3 = tr.get(Tuple.from(value2).pack()).join();
|
||||
String value3 = Tuple.fromBytes(result3).getString(0);
|
||||
|
||||
byte[] result4 = tr.get(Tuple.from(value3).pack()).join();
|
||||
String value4 = Tuple.fromBytes(result4).getString(0);
|
||||
|
||||
if (!key.equals(value4)) {
|
||||
succeed = false;
|
||||
}
|
||||
List<String> actual = new ArrayList<>(Arrays.asList(value1, value2, value3, value4));
|
||||
Collections.sort(actual);
|
||||
if (!expected.equals(actual)) {
|
||||
succeed = false;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public boolean succeed() {
|
||||
return succeed;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* MultiClientHelper.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import org.junit.jupiter.api.extension.AfterEachCallback;
|
||||
import org.junit.jupiter.api.extension.BeforeAllCallback;
|
||||
import org.junit.jupiter.api.extension.ExtensionContext;
|
||||
|
||||
/**
|
||||
* Callback to help define a multi-client scenario and ensure that
|
||||
* the clients can be configured properly.
|
||||
*/
|
||||
public class MultiClientHelper implements BeforeAllCallback,AfterEachCallback{
|
||||
private String[] clusterFiles;
|
||||
private Collection<Database> openDatabases;
|
||||
|
||||
public static String[] readClusterFromEnv() {
|
||||
/*
|
||||
* Reads the cluster file lists from the ENV variable
|
||||
* FDB_CLUSTERS.
|
||||
*/
|
||||
String clusterFilesProp = System.getenv("FDB_CLUSTERS");
|
||||
if (clusterFilesProp == null) {
|
||||
throw new IllegalStateException("Missing FDB cluster connection file names");
|
||||
}
|
||||
|
||||
return clusterFilesProp.split(";");
|
||||
}
|
||||
|
||||
Collection<Database> openDatabases(FDB fdb){
|
||||
if(openDatabases!=null){
|
||||
return openDatabases;
|
||||
}
|
||||
if(clusterFiles==null){
|
||||
clusterFiles = readClusterFromEnv();
|
||||
}
|
||||
Collection<Database> dbs = new ArrayList<Database>();
|
||||
for (String arg : clusterFiles) {
|
||||
System.out.printf("Opening Cluster: %s\n", arg);
|
||||
dbs.add(fdb.open(arg));
|
||||
}
|
||||
|
||||
this.openDatabases = dbs;
|
||||
return dbs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeAll(ExtensionContext arg0) throws Exception {
|
||||
clusterFiles = readClusterFromEnv();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterEach(ExtensionContext arg0) throws Exception {
|
||||
//close any databases that have been opened
|
||||
if(openDatabases!=null){
|
||||
for(Database db : openDatabases){
|
||||
db.close();
|
||||
}
|
||||
}
|
||||
openDatabases = null;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* Each cluster has a queue, producer writes a key and then send a message to this queue in JVM.
|
||||
* Consumer would consume the key by checking the existence of the key, if it does not find the key,
|
||||
* then the test would fail.
|
||||
*
|
||||
* This test is to verify the causal consistency of transactions for mutli-threaded client.
|
||||
*/
|
||||
public class SidebandMultiThreadClientTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
private static final Map<Database, BlockingQueue<String>> db2Queues = new HashMap<>();
|
||||
private static final int threadPerDB = 5;
|
||||
private static final int txnCnt = 1000;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
setupThreads(fdb);
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
for (Database db : dbs) {
|
||||
db2Queues.put(db, new LinkedBlockingQueue<>());
|
||||
}
|
||||
System.out.println("Start processing and validating");
|
||||
process(dbs);
|
||||
check(dbs);
|
||||
System.out.println("Test finished");
|
||||
}
|
||||
|
||||
private static synchronized void setupThreads(FDB fdb) {
|
||||
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
|
||||
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
|
||||
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
|
||||
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
|
||||
fdb.options().setTraceEnable("/tmp");
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
}
|
||||
|
||||
private static void process(Collection<Database> dbs) {
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Thread thread = new Thread(Producer.create(db, db2Queues.get(db)));
|
||||
thread.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void check(Collection<Database> dbs) throws InterruptedException {
|
||||
final Map<Thread, Consumer> threads2Consumers = new HashMap<>();
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Consumer consumer = Consumer.create(db, db2Queues.get(db));
|
||||
final Thread thread = new Thread(consumer);
|
||||
thread.start();
|
||||
threads2Consumers.put(thread, consumer);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, Consumer> entry : threads2Consumers.entrySet()) {
|
||||
entry.getKey().join();
|
||||
final boolean succeed = entry.getValue().succeed;
|
||||
Assertions.assertTrue(succeed, "Sideband test failed");
|
||||
}
|
||||
}
|
||||
|
||||
public static class Producer implements Runnable {
|
||||
private final Database db;
|
||||
private final BlockingQueue<String> queue;
|
||||
|
||||
private Producer(Database db, BlockingQueue<String> queue) {
|
||||
this.db = db;
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
public static Producer create(Database db, BlockingQueue<String> queue) {
|
||||
return new Producer(db, queue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < txnCnt; i++) {
|
||||
final long suffix = ThreadLocalRandom.current().nextLong();
|
||||
final String key = String.format("Sideband/Multithread/Test/%d", suffix);
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from("bar").pack());
|
||||
return null;
|
||||
});
|
||||
queue.offer(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class Consumer implements Runnable {
|
||||
private final Database db;
|
||||
private final BlockingQueue<String> queue;
|
||||
private boolean succeed;
|
||||
|
||||
private Consumer(Database db, BlockingQueue<String> queue) {
|
||||
this.db = db;
|
||||
this.queue = queue;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static Consumer create(Database db, BlockingQueue<String> queue) {
|
||||
return new Consumer(db, queue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
for (int i = 0; i < txnCnt && succeed; i++) {
|
||||
final String key = queue.take();
|
||||
db.run(tr -> {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
if (result == null) {
|
||||
System.out.println("FAILED to get key " + key + " from DB " + db);
|
||||
succeed = false;
|
||||
}
|
||||
if (!succeed) {
|
||||
return null;
|
||||
}
|
||||
String value = Tuple.fromBytes(result).getString(0);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
System.out.println("Get Exception in consumer: " + e);
|
||||
succeed = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -48,12 +48,16 @@ set(JUNIT_RESOURCES
|
|||
set(JAVA_INTEGRATION_TESTS
|
||||
src/integration/com/apple/foundationdb/DirectoryTest.java
|
||||
src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/BasicMultiClientIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
|
||||
)
|
||||
|
||||
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules,
|
||||
# utility classes, and so forth)
|
||||
set(JAVA_INTEGRATION_RESOURCES
|
||||
src/integration/com/apple/foundationdb/RequiresDatabase.java
|
||||
src/integration/com/apple/foundationdb/MultiClientHelper.java
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -81,5 +81,15 @@ if (NOT WIN32 AND NOT OPEN_FOR_IDE)
|
|||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
@CLUSTER_FILE@
|
||||
1
|
||||
)
|
||||
add_fdbclient_test(
|
||||
NAME multi_process_fdbcli_tests
|
||||
PROCESS_NUMBER 5
|
||||
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
@CLUSTER_FILE@
|
||||
5
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -332,22 +332,90 @@ def transaction(logger):
|
|||
output7 = run_fdbcli_command('get', 'key')
|
||||
assert output7 == "`key': not found"
|
||||
|
||||
def get_fdb_process_addresses():
|
||||
# get all processes' network addresses
|
||||
output = run_fdbcli_command('kill')
|
||||
# except the first line, each line is one process
|
||||
addresses = output.split('\n')[1:]
|
||||
assert len(addresses) == process_number
|
||||
return addresses
|
||||
|
||||
@enable_logging()
|
||||
def coordinators(logger):
|
||||
# we should only have one coordinator for now
|
||||
output1 = run_fdbcli_command('coordinators')
|
||||
assert len(output1.split('\n')) > 2
|
||||
cluster_description = output1.split('\n')[0].split(': ')[-1]
|
||||
logger.debug("Cluster description: {}".format(cluster_description))
|
||||
coordinators = output1.split('\n')[1].split(': ')[-1]
|
||||
# verify the coordinator
|
||||
coordinator_list = get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')
|
||||
assert len(coordinator_list) == 1
|
||||
assert coordinator_list[0]['address'] == coordinators
|
||||
# verify the cluster description
|
||||
assert get_value_from_status_json(True, 'cluster', 'connection_string').startswith('{}:'.format(cluster_description))
|
||||
addresses = get_fdb_process_addresses()
|
||||
# set all 5 processes as coordinators and update the cluster description
|
||||
new_cluster_description = 'a_simple_description'
|
||||
run_fdbcli_command('coordinators', *addresses, 'description={}'.format(new_cluster_description))
|
||||
# verify now we have 5 coordinators and the description is updated
|
||||
output2 = run_fdbcli_command('coordinators')
|
||||
assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description
|
||||
assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(5, ','.join(addresses))
|
||||
# auto change should go back to 1 coordinator
|
||||
run_fdbcli_command('coordinators', 'auto')
|
||||
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1
|
||||
|
||||
@enable_logging()
|
||||
def exclude(logger):
|
||||
# get all processes' network addresses
|
||||
addresses = get_fdb_process_addresses()
|
||||
logger.debug("Cluster processes: {}".format(' '.join(addresses)))
|
||||
# There should be no excluded process for now
|
||||
no_excluded_process_output = 'There are currently no servers or localities excluded from the database.'
|
||||
output1 = run_fdbcli_command('exclude')
|
||||
assert no_excluded_process_output in output1
|
||||
# randomly pick one and exclude the process
|
||||
excluded_address = random.choice(addresses)
|
||||
# sometimes we need to retry the exclude
|
||||
while True:
|
||||
logger.debug("Excluding process: {}".format(excluded_address))
|
||||
error_message = run_fdbcli_command_and_get_error('exclude', excluded_address)
|
||||
if not error_message:
|
||||
break
|
||||
logger.debug("Retry exclude after 1 second")
|
||||
time.sleep(1)
|
||||
output2 = run_fdbcli_command('exclude')
|
||||
# logger.debug(output3)
|
||||
assert 'There are currently 1 servers or localities being excluded from the database' in output2
|
||||
assert excluded_address in output2
|
||||
run_fdbcli_command('include', excluded_address)
|
||||
# check the include is successful
|
||||
output4 = run_fdbcli_command('exclude')
|
||||
assert no_excluded_process_output in output4
|
||||
|
||||
if __name__ == '__main__':
|
||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file>
|
||||
assert len(sys.argv) == 3, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file>"
|
||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>
|
||||
assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>"
|
||||
# shell command template
|
||||
command_template = [sys.argv[1], '-C', sys.argv[2], '--exec']
|
||||
# tests for fdbcli commands
|
||||
# assertions will fail if fdbcli does not work as expected
|
||||
advanceversion()
|
||||
cache_range()
|
||||
consistencycheck()
|
||||
datadistribution()
|
||||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
suspend()
|
||||
transaction()
|
||||
process_number = int(sys.argv[3])
|
||||
if process_number == 1:
|
||||
advanceversion()
|
||||
cache_range()
|
||||
consistencycheck()
|
||||
datadistribution()
|
||||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
suspend()
|
||||
transaction()
|
||||
else:
|
||||
assert process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
exclude()
|
||||
|
||||
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
FROM centos:6
|
||||
LABEL version=0.0.4
|
||||
|
||||
RUN yum install -y yum-utils
|
||||
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
|
||||
RUN yum -y install centos-release-scl
|
||||
RUN yum install -y devtoolset-7
|
||||
|
||||
# install cmake
|
||||
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz > /tmp/cmake.tar.gz &&\
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
|
||||
sha256sum -c /tmp/cmake-sha.txt &&\
|
||||
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
|
||||
|
||||
# install boost
|
||||
RUN curl -L https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
|
||||
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_72_0/boost /usr/local/include/ &&\
|
||||
rm -rf boost.tar.bz2 boost_1_72_0
|
||||
|
||||
# install mono (for actorcompiler)
|
||||
RUN yum install -y epel-release
|
||||
RUN yum install -y mono-core
|
||||
|
||||
# install Java
|
||||
RUN yum install -y java-1.8.0-openjdk-devel
|
||||
|
||||
# install LibreSSL
|
||||
RUN curl https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||
|
||||
|
||||
# install dependencies for bindings and documentation
|
||||
# python 2.7 is required for the documentation
|
||||
RUN yum install -y rh-python36-python-devel rh-ruby24 golang python27
|
||||
|
||||
# install packaging tools
|
||||
RUN yum install -y rpm-build debbuild
|
||||
|
||||
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
|
@ -1,279 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
arguments_usage() {
|
||||
cat <<EOF
|
||||
usage: build.sh [-h] [commands]
|
||||
-h: print this help message and
|
||||
abort execution
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
arguments_parse() {
|
||||
local __res=0
|
||||
while getopts ":h" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
arguments_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
\? )
|
||||
echo "Unknown option ${opt}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
commands=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
configure() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cmake ../foundationdb ${CMAKE_EXTRA_ARGS}
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc`
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc` packages
|
||||
cpack
|
||||
cpack -G RPM -D GENERATE_EL6=ON
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
build
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
package_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
rpm() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack -G RPM -D GENERATE_EL6=ON
|
||||
fakeroot cpack -G RPM
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
deb() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack -G DEB
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
test-fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
ctest -j`nproc` ${CTEST_EXTRA_ARGS}
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
test() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
build
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
test-fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
arguments_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
# in this case there was no error
|
||||
# We still want to exit the script
|
||||
__res=0
|
||||
fi
|
||||
break
|
||||
fi
|
||||
echo "Num commands ${#commands[@]}"
|
||||
for command in "${commands[@]}"
|
||||
do
|
||||
echo "Command: ${command}"
|
||||
case ${command} in
|
||||
configure )
|
||||
configure
|
||||
__res=$?
|
||||
;;
|
||||
build )
|
||||
build
|
||||
__res=$?
|
||||
;;
|
||||
build/fast )
|
||||
build_fast
|
||||
__res=$?
|
||||
;;
|
||||
package )
|
||||
package
|
||||
__res=$?
|
||||
;;
|
||||
package/fast )
|
||||
package_fast
|
||||
__res=$?
|
||||
;;
|
||||
rpm )
|
||||
rpm
|
||||
__res=$?
|
||||
;;
|
||||
deb )
|
||||
deb
|
||||
__res=$?
|
||||
;;
|
||||
test-fast)
|
||||
test-fast
|
||||
__res=$?
|
||||
;;
|
||||
test)
|
||||
test
|
||||
__res=$?
|
||||
;;
|
||||
* )
|
||||
echo "ERROR: Command not found ($command)"
|
||||
__res=1
|
||||
;;
|
||||
esac
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,3 +0,0 @@
|
|||
FROM centos:6
|
||||
|
||||
RUN yum install -y yum-utils upstart initscripts
|
|
@ -1,3 +0,0 @@
|
|||
FROM centos:7
|
||||
|
||||
RUN yum install -y yum-utils systemd sysvinit-tools
|
|
@ -1,3 +0,0 @@
|
|||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update && apt-get install -y systemd python
|
|
@ -1,65 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
common: &common
|
||||
image: foundationdb-build:0.0.4
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
depends_on: [common]
|
||||
#debuginfo builds need the build path to be longer than
|
||||
#the path where debuginfo sources are places. Crazy, yes,
|
||||
#see the manual for CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX.
|
||||
volumes:
|
||||
- ../..:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/foundationdb
|
||||
- ${BUILDDIR}:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
working_dir: /foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
|
||||
configure: &configure
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh configure
|
||||
|
||||
build: &build
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh build
|
||||
|
||||
build-fast: &build-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh build/fast
|
||||
|
||||
rpm: &rpm
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh rpm
|
||||
|
||||
deb: &deb
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh deb
|
||||
|
||||
package: &package
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package
|
||||
|
||||
package-fast: &package-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package/fast
|
||||
|
||||
test-fast: &test-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test-fast
|
||||
|
||||
test: &test
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test
|
||||
|
||||
snowflake-ci: &snowflake-ci
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package test-fast
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
[centos6]
|
||||
name = fdb-centos6
|
||||
location = centos6-test
|
||||
packages = ^.*el6((?!debuginfo).)*\.rpm$
|
||||
format = rpm
|
||||
|
||||
[centos7]
|
||||
name = fdb-centos7
|
||||
location = centos7-test
|
||||
packages = ^.*el7((?!debuginfo).)*\.rpm$
|
||||
format = rpm
|
||||
|
||||
[ubuntu_16_04]
|
||||
name = fdb-debian
|
||||
location = debian-test
|
||||
packages = ^.*\.deb$
|
||||
format = deb
|
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/deb.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
source ${source_dir}/modules/test_args.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
test_args_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
__res=0
|
||||
break
|
||||
elif [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
tests_main
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,5 +0,0 @@
|
|||
cmake_minimum_required(VERSION 2.8.0)
|
||||
project(fdb_c_app C)
|
||||
find_package(FoundationDB-Client REQUIRED)
|
||||
add_executable(app app.c)
|
||||
target_link_libraries(app PRIVATE fdb_c)
|
|
@ -1,7 +0,0 @@
|
|||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
fdb_select_api_version(710);
|
||||
return 0;
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${arguments_sh_included+x} ]
|
||||
then
|
||||
arguments_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
arguments_usage() {
|
||||
cat <<EOF
|
||||
usage: test_packages.sh [-h] [commands]
|
||||
-h: print this help message and
|
||||
abort execution
|
||||
-b DIR: point set the fdb build directory
|
||||
(this is a required argument).
|
||||
-s DIR: Path to fdb source directory.
|
||||
-p STR: Colon-separated list of package
|
||||
file names (without path) to
|
||||
test.
|
||||
-c PATH: Path to a ini-file with the docker
|
||||
configuration
|
||||
-t TEST: One of DEB, RPM, ALL
|
||||
-n TESTS: Colon separated list of test names
|
||||
to run (will run all if this option
|
||||
is not set)
|
||||
-j NUM Number of threads the tester should
|
||||
run in parallel.
|
||||
-P STR Pruning strategy for docker container
|
||||
(Can be ALL|FAILED|SUCCEEDED|NONE)
|
||||
Defaults to "SUCCEEDED"
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
arguments_parse() {
|
||||
local __res=0
|
||||
run_deb_tests=1
|
||||
run_rpm_tests=1
|
||||
docker_parallelism=1
|
||||
pruning_strategy=SUCCEEDED
|
||||
while getopts ":hb:s:p:c:t:n:j:P:" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
arguments_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
b )
|
||||
fdb_build="${OPTARG}"
|
||||
;;
|
||||
s )
|
||||
fdb_source="${OPTARG}"
|
||||
;;
|
||||
p )
|
||||
fdb_packages="${OPTARG}"
|
||||
;;
|
||||
c )
|
||||
docker_ini="${OPTARG}"
|
||||
;;
|
||||
t )
|
||||
if [ "${OPTARG}" = "DEB" ]
|
||||
then
|
||||
run_rpm_tests=0
|
||||
elif [ "${OPTARG}" = "RPM" ]
|
||||
then
|
||||
run_deb_tests=0
|
||||
elif [ "${OPTARG}" != "ALL" ]
|
||||
then
|
||||
echo -e "${RED}No such test: ${OPTARG}${NC}"
|
||||
echo "Note: Currently known tests are: RPM, DEB, and ALL"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
n )
|
||||
tests_to_run="${OPTARG}"
|
||||
;;
|
||||
j )
|
||||
docker_parallelism="${OPTARG}"
|
||||
if [[ $docker_parallelism =~ "^[0-9]+$" ]]
|
||||
then
|
||||
echo -e "${RED}Error: -j expects a number, ${OPTARG}, is not a number" >&2
|
||||
__res=1
|
||||
break
|
||||
elif [ $docker_parallelism -lt 1 ]
|
||||
then
|
||||
echo -e "${RED}Error: -j ${OPTARG} makes no sense" >&2
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
;;
|
||||
P )
|
||||
pruning_strategy="${OPTARG}"
|
||||
if ! [[ "${pruning_strategy}" =~ ^(ALL|FAILED|SUCCEEDED|NONE)$ ]]
|
||||
then
|
||||
fail "Unknown pruning strategy ${pruning_strategy}"
|
||||
fi
|
||||
;;
|
||||
\? )
|
||||
curr_index="$((OPTIND-1))"
|
||||
echo "Unknown option ${@:${curr_index}:1}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
commands=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,123 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${config_sh_included+x} ]
|
||||
then
|
||||
config_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
config_load_vms() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
if [ -z "${docker_ini+x}"]
|
||||
then
|
||||
docker_file="${source_dir}/../docker.ini"
|
||||
fi
|
||||
# parse the ini file and read it into an
|
||||
# associative array
|
||||
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) printf "ini_%s%s=\47%s\47\n", $1, section, $2 }' ${docker_file})"
|
||||
vms=( "${!ini_name[@]}" )
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "ERROR: Could not parse config-file ${docker_file}"
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
config_find_packages() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
cd ${fdb_build}
|
||||
while read f
|
||||
do
|
||||
if [[ "${f}" =~ .*"clients".* || "${f}" =~ .*"server".* ]]
|
||||
then
|
||||
if [ -z ${fdb_packages+x} ]
|
||||
then
|
||||
fdb_packages="${f}"
|
||||
else
|
||||
fdb_packages="${fdb_packages}:${f}"
|
||||
fi
|
||||
fi
|
||||
done <<< "$(ls *.deb *.rpm)"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
get_fdb_source() {
|
||||
local __res=0
|
||||
enterfun
|
||||
cd ${source_dir}
|
||||
while true
|
||||
do
|
||||
if [ -d .git ]
|
||||
then
|
||||
# we found the root
|
||||
pwd
|
||||
break
|
||||
fi
|
||||
if [ `pwd` = "/" ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
cd ..
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
fdb_build=0
|
||||
|
||||
config_verify() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
if [ -z ${fdb_source+x} ]
|
||||
then
|
||||
fdb_source=`get_fdb_source`
|
||||
fi
|
||||
if [ ! -d "${fdb_build}" ]
|
||||
then
|
||||
__res=1
|
||||
echo "ERROR: Could not find fdb build dir: ${fdb_build}"
|
||||
echo " Either set the environment variable fdb_build or"
|
||||
echo " pass it with -b <PATH_TO_BUILD>"
|
||||
fi
|
||||
if [ ! -f "${fdb_source}/flow/Net2.actor.cpp" ]
|
||||
then
|
||||
__res=1
|
||||
echo "ERROR: ${fdb_source} does not appear to be a fdb source"
|
||||
echo " directory. Either pass it with -s or set"
|
||||
echo " the environment variable fdb_source."
|
||||
fi
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
config_load_vms
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,40 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${deb_sh_included}" ]
|
||||
then
|
||||
deb_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
install_build_tools() {
|
||||
apt-get -y install cmake gcc
|
||||
}
|
||||
|
||||
install() {
|
||||
local __res=0
|
||||
enterfun
|
||||
echo "Install FoundationDB"
|
||||
cd /build/packages
|
||||
package_names=()
|
||||
for f in "${package_files[@]}"
|
||||
do
|
||||
package_name="$(dpkg -I ${f} | grep Package | sed 's/.*://')"
|
||||
package_names+=( "${package_name}" )
|
||||
done
|
||||
dpkg -i ${package_files[@]}
|
||||
apt-get -yf -o Dpkg::Options::="--force-confold" install
|
||||
__res=$?
|
||||
sleep 5
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
local __res=0
|
||||
enterfun
|
||||
apt-get -y remove ${package_names[@]}
|
||||
__res=$?
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,186 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${docker_sh_included+x}" ]
|
||||
then
|
||||
docker_sh_included=1
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/config.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
|
||||
failed_tests=()
|
||||
|
||||
docker_ids=()
|
||||
docker_threads=()
|
||||
docker_logs=()
|
||||
docker_error_logs=()
|
||||
|
||||
docker_wait_any() {
|
||||
local __res=0
|
||||
enterfun
|
||||
while [ "${#docker_threads[@]}" -gt 0 ]
|
||||
do
|
||||
IFS=";" read -ra res <${pipe_file}
|
||||
docker_id=${res[0]}
|
||||
result=${res[1]}
|
||||
i=0
|
||||
for (( idx=0; idx<${#docker_ids[@]}; idx++ ))
|
||||
do
|
||||
if [ "${docker_id}" = "${docker_ids[idx]}" ]
|
||||
then
|
||||
i=idx
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "${result}" -eq 0 ]
|
||||
then
|
||||
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]}"
|
||||
echo -e "\tDocker-ID: ${docker_ids[$i]} "
|
||||
echo -e "\tLog-File: ${docker_logs[$i]}"
|
||||
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
|
||||
else
|
||||
echo -e "${RED}Test FAILED: ${docker_threads[$i]}"
|
||||
echo -e "\tDocker-ID: ${docker_ids[$i]} "
|
||||
echo -e "\tLog-File: ${docker_logs[$i]}"
|
||||
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
|
||||
failed_tests+=( "${docker_threads[$i]}" )
|
||||
fi
|
||||
n=$((i+1))
|
||||
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
|
||||
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
|
||||
docker_logs=( "${docker_logs[@]:0:$i}" "${docker_logs[@]:$n}" )
|
||||
docker_error_logs=( "${docker_error_logs[@]:0:$i}" "${docker_error_logs[@]:$n}" )
|
||||
break
|
||||
done
|
||||
exitfun
|
||||
return "${__res}"
|
||||
}
|
||||
|
||||
docker_wait_all() {
|
||||
local __res=0
|
||||
while [ "${#docker_threads[@]}" -gt 0 ]
|
||||
do
|
||||
docker_wait_any
|
||||
if [ "$?" -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
docker_run() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
echo "Testing the following:"
|
||||
echo "======================"
|
||||
for K in "${vms[@]}"
|
||||
do
|
||||
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${K}]} ) )
|
||||
echo "Will test the following ${#curr_packages[@]} packages in docker-image ${K}:"
|
||||
for p in "${curr_packages[@]}"
|
||||
do
|
||||
echo " ${p}"
|
||||
done
|
||||
echo
|
||||
done
|
||||
log_dir="${fdb_build}/pkg_tester"
|
||||
pipe_file="${fdb_build}/pkg_tester.pipe"
|
||||
lock_file="${fdb_build}/pkg_tester.lock"
|
||||
if [ -p "${pipe_file}" ]
|
||||
then
|
||||
rm "${pipe_file}"
|
||||
successOr "Could not delete old pipe file"
|
||||
fi
|
||||
if [ -f "${lock_file}" ]
|
||||
then
|
||||
rm "${lock_file}"
|
||||
successOr "Could not delete old pipe file"
|
||||
fi
|
||||
touch "${lock_file}"
|
||||
successOr "Could not create lock file"
|
||||
mkfifo "${pipe_file}"
|
||||
successOr "Could not create pipe file"
|
||||
mkdir -p "${log_dir}"
|
||||
# setup the containers
|
||||
# TODO: shall we make this parallel as well?
|
||||
for vm in "${vms[@]}"
|
||||
do
|
||||
curr_name="${ini_name[$vm]}"
|
||||
curr_location="${ini_location[$vm]}"
|
||||
if [[ "$curr_location" = /* ]]
|
||||
then
|
||||
cd "${curr_location}"
|
||||
else
|
||||
cd ${source_dir}/../${curr_location}
|
||||
fi
|
||||
docker_buid_logs="${log_dir}/docker_build_${curr_name}"
|
||||
docker build . -t ${curr_name} 1> "${docker_buid_logs}.log" 2> "${docker_buid_logs}.err"
|
||||
successOr "Building Docker image ${curr_name} failed - see ${docker_buid_logs}.log and ${docker_buid_logs}.err"
|
||||
done
|
||||
if [ ! -z "${tests_to_run+x}"]
|
||||
then
|
||||
tests=()
|
||||
IFS=';' read -ra tests <<< "${tests_to_run}"
|
||||
fi
|
||||
for vm in "${vms[@]}"
|
||||
do
|
||||
curr_name="${ini_name[$vm]}"
|
||||
curr_format="${ini_format[$vm]}"
|
||||
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${vm}]} ) )
|
||||
for curr_test in "${tests[@]}"
|
||||
do
|
||||
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
|
||||
then
|
||||
docker_wait_any
|
||||
fi
|
||||
log_file="${log_dir}/${curr_name}_${curr_test}.log"
|
||||
err_file="${log_dir}/${curr_name}_${curr_test}.err"
|
||||
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
|
||||
-v "${fdb_build}:/build"\
|
||||
${curr_name} /sbin/init )
|
||||
echo "Starting Test ${curr_name}/${curr_test} Docker-ID: ${docker_id}"
|
||||
{
|
||||
docker exec "${docker_id}" bash \
|
||||
/foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]}\
|
||||
2> ${err_file} 1> ${log_file}
|
||||
res=$?
|
||||
if [ "${pruning_strategy}" = "ALL" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
elif [ "${res}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
elif [ "${res}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
fi
|
||||
flock "${lock_file}" echo "${docker_id};${res}" >> "${pipe_file}"
|
||||
} &
|
||||
docker_ids+=( "${docker_id}" )
|
||||
docker_threads+=( "${curr_name}/${curr_test}" )
|
||||
docker_logs+=( "${log_file}" )
|
||||
docker_error_logs+=( "${err_file}" )
|
||||
done
|
||||
done
|
||||
docker_wait_all
|
||||
rm ${pipe_file}
|
||||
if [ "${#failed_tests[@]}" -eq 0 ]
|
||||
then
|
||||
echo -e "${GREEN}SUCCESS${NC}"
|
||||
else
|
||||
echo -e "${RED}FAILURE"
|
||||
echo "The following tests failed:"
|
||||
for t in "${failed_tests[@]}"
|
||||
do
|
||||
echo " - ${t}"
|
||||
done
|
||||
echo -e "${NC}"
|
||||
__res=1
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return "${__res}"
|
||||
}
|
||||
fi
|
|
@ -1,23 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This module has to be included first and only once.
|
||||
# This is because of a limitation of older bash versions
|
||||
# that doesn't allow us to declare associative arrays
|
||||
# globally.
|
||||
|
||||
if [ -z "${global_sh_included+x}"]
|
||||
then
|
||||
global_sh_included=1
|
||||
else
|
||||
echo "global.sh can only be included once"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -A ini_name
|
||||
declare -A ini_location
|
||||
declare -A ini_packages
|
||||
declare -A ini_format
|
||||
declare -A test_start_state
|
||||
declare -A test_exit_state
|
||||
declare -a tests
|
||||
declare -a vms
|
|
@ -1,45 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${rpm_sh_included}" ]
|
||||
then
|
||||
rpm_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
conf_save_extension=".rpmsave"
|
||||
|
||||
install_build_tools() {
|
||||
yum -y install cmake gcc
|
||||
}
|
||||
|
||||
install() {
|
||||
local __res=0
|
||||
enterfun
|
||||
cd /build/packages
|
||||
package_names=()
|
||||
for f in "${package_files[@]}"
|
||||
do
|
||||
package_names+=( "$(rpm -qp ${f})" )
|
||||
done
|
||||
yum install -y ${package_files[@]}
|
||||
__res=$?
|
||||
# give the server some time to come up
|
||||
sleep 5
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
local __res=0
|
||||
enterfun
|
||||
if [ "$1" == "purge" ]
|
||||
then
|
||||
yum remove --purge -y ${package_names[@]}
|
||||
else
|
||||
yum remove -y ${package_names[@]}
|
||||
fi
|
||||
__res=$?
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,49 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${test_args_sh_included+x} ]
|
||||
then
|
||||
test_args_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
test_args_usage() {
|
||||
me=`basename "$0"`
|
||||
echo "usage: ${me} [-h] files..."
|
||||
cat <<EOF
|
||||
-n TEST: The name of the test to run
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
test_args_parse() {
|
||||
local __res=0
|
||||
run_deb_tests=1
|
||||
run_rpm_tests=1
|
||||
while getopts ":hn:" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
test_args_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
n )
|
||||
echo "test_name=${OPTARG}"
|
||||
test_name="${OPTARG}"
|
||||
;;
|
||||
\? )
|
||||
curr_index="$((OPTIND-1))"
|
||||
echo "Unknown option ${@:${curr_index}:1}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
package_files=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,64 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${testing_sh_included+x}" ]
|
||||
then
|
||||
testing_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
desired_state() {
|
||||
case $1 in
|
||||
CLEAN )
|
||||
:
|
||||
;;
|
||||
INSTALLED )
|
||||
install
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
tests_healthy() {
|
||||
enterfun
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cd /
|
||||
fdbcli --exec status
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
healthy="$(fdbcli --exec status | grep HEALTHY | wc -l)"
|
||||
if [ -z "${healthy}" ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
tests_clean() {
|
||||
uninstall purge
|
||||
successOr "FoundationDB was not uninstalled correctly"
|
||||
# systemd/initd are not running, so we have to kill manually here
|
||||
pidof fdbmonitor | xargs kill
|
||||
tests_clean_nouninstall
|
||||
rm -rf /etc/foundationdb
|
||||
rm -rf /var/lib/foundationdb
|
||||
rm -rf /var/log/foundationdb
|
||||
}
|
||||
|
||||
tests_main() {
|
||||
new_state="${test_start_state[${test_name}]}"
|
||||
echo "Setting desired state ${new_state} for ${test_name}"
|
||||
desired_state "${new_state}"
|
||||
${test_name}
|
||||
successOr "${test_name} Failed"
|
||||
echo "======================================================================="
|
||||
echo "Test $t successfully finished"
|
||||
echo "======================================================================="
|
||||
current_state="${test_exit_state[${test_name}]}"
|
||||
}
|
||||
fi
|
|
@ -1,143 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# In this file the tests are formulated which
|
||||
# should run in the docker container to test
|
||||
# whether the RPM and DEB packages work properly.
|
||||
#
|
||||
# In order to add a test, a user first has to
|
||||
# add the name of the test to the `tests` array
|
||||
# which is defined in this file.
|
||||
#
|
||||
# Then, she must define the state this test
|
||||
# expects the container to be in. To do that,
|
||||
# a value for the test has to be added to the
|
||||
# associative array `test_start_state`. Valid
|
||||
# values are:
|
||||
#
|
||||
# - INSTALLED: In this case, the test will be
|
||||
# started with a freshly installed FDB, but
|
||||
# no other changes were made to the container.
|
||||
# - CLEAN: This simply means that the container
|
||||
# will run a minimal version of the OS (as defined
|
||||
# in the corresponsing Dockerfile)
|
||||
#
|
||||
# A test is then simply a bash function with the
|
||||
# same name as the test. It can use the predefined
|
||||
# bash functions `install` and `uninstall` to either
|
||||
# install or uninstall FDB on the container. The FDB
|
||||
# build directory can be found in `/build`, the
|
||||
# source code will be located in `/foundationdb`
|
||||
|
||||
if [ -z "${tests_sh_included+x}" ]
|
||||
then
|
||||
tests_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/testing.sh
|
||||
|
||||
tests=( "fresh_install" "keep_config" )
|
||||
test_start_state=([fresh_install]=INSTALLED [keep_config]=CLEAN )
|
||||
|
||||
fresh_install() {
|
||||
tests_healthy
|
||||
successOr "Fresh installation is not clean"
|
||||
# test that we can read from and write to fdb
|
||||
cd /
|
||||
timeout 2 fdbcli --exec 'writemode on ; set foo bar'
|
||||
successOr "Cannot write to database"
|
||||
getresult="$(timeout 2 fdbcli --exec 'get foo')"
|
||||
successOr "Get on database failed"
|
||||
if [ "${getresult}" != "\`foo' is \`bar'" ]
|
||||
then
|
||||
fail "value was not set correctly"
|
||||
fi
|
||||
timeout 2 fdbcli --exec 'writemode on ; clear foo'
|
||||
successOr "Deleting value failed"
|
||||
getresult="$(timeout 2 fdbcli --exec 'get foo')"
|
||||
successOr "Get on database failed"
|
||||
if [ "${getresult}" != "\`foo': not found" ]
|
||||
then
|
||||
fail "value was not cleared correctly"
|
||||
fi
|
||||
PYTHON_TARGZ_NAME="$(ls /build/packages | grep 'foundationdb-[0-9.]*\.tar\.gz' | sed 's/\.tar\.gz$//')"
|
||||
tar -C /tmp -xvf /build/packages/${PYTHON_TARGZ_NAME}.tar.gz
|
||||
pushd /tmp/${PYTHON_TARGZ_NAME}
|
||||
python setup.py install
|
||||
successOr "Installing python bindings failed"
|
||||
popd
|
||||
python -c 'import fdb; fdb.api_version(710)'
|
||||
successOr "Loading python bindings failed"
|
||||
|
||||
# Test cmake and pkg-config integration: https://github.com/apple/foundationdb/issues/1483
|
||||
install_build_tools
|
||||
rm -rf build-fdb_c_app
|
||||
mkdir build-fdb_c_app
|
||||
pushd build-fdb_c_app
|
||||
cmake /foundationdb/build/cmake/package_tester/fdb_c_app && make
|
||||
successOr "FoundationDB-Client cmake integration failed"
|
||||
cc /foundationdb/build/cmake/package_tester/fdb_c_app/app.c `pkg-config --libs --cflags foundationdb-client`
|
||||
successOr "FoundationDB-Client pkg-config integration failed"
|
||||
popd
|
||||
}
|
||||
|
||||
keep_config() {
|
||||
mkdir /etc/foundationdb
|
||||
description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8)
|
||||
random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8)
|
||||
successOr "Could not create secret"
|
||||
echo $description:$random_str@127.0.0.1:4500 > /tmp/fdb.cluster
|
||||
successOr "Could not create fdb.cluster file"
|
||||
sed '/\[fdbserver.4500\]/a \[fdbserver.4501\]' /foundationdb/packaging/foundationdb.conf > /tmp/foundationdb.conf
|
||||
successOr "Could not change foundationdb.conf file"
|
||||
# we need to keep these files around for testing that the install didn't change them
|
||||
cp /tmp/fdb.cluster /etc/foundationdb/fdb.cluster
|
||||
cp /tmp/foundationdb.conf /etc/foundationdb/foundationdb.conf
|
||||
|
||||
install
|
||||
successOr "FoundationDB install failed"
|
||||
# make sure we are not in build directory as there is a fdbc.cluster file there
|
||||
echo "Configure new database - Install isn't supposed to do this for us"
|
||||
echo "as there was an existing configuration"
|
||||
cd /
|
||||
timeout 2 fdbcli --exec 'configure new single ssd'
|
||||
successOr "Couldn't configure new database"
|
||||
tests_healthy
|
||||
num_processes="$(timeout 2 fdbcli --exec 'status' | grep "FoundationDB processes" | sed -e 's/.*- //')"
|
||||
if [ "${num_processes}" -ne 2 ]
|
||||
then
|
||||
fail Number of processes incorrect after config change
|
||||
fi
|
||||
|
||||
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail Install changed configuration files
|
||||
fi
|
||||
differences="$(diff /tmp/foundationdb.conf /etc/foundationdb/foundationdb.conf)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail Install changed configuration files
|
||||
fi
|
||||
|
||||
uninstall
|
||||
# make sure config didn't get deleted
|
||||
# RPM, however, renames the file on remove, so we need to check for this
|
||||
conffile="/etc/foundationdb/foundationdb.conf${conf_save_extension}"
|
||||
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f "${conffile}" ]
|
||||
then
|
||||
fail "Uninstall removed configuration"
|
||||
fi
|
||||
differences="$(diff /tmp/foundationdb.conf ${conffile})"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail "${conffile} changed during remove"
|
||||
fi
|
||||
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail "/etc/foundationdb/fdb.cluster changed during remove"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
fi
|
|
@ -1,40 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${util_sh_included+x} ]
|
||||
then
|
||||
util_sh_included=1
|
||||
|
||||
# for colored output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
|
||||
enterfun() {
|
||||
pushd . > /dev/null
|
||||
}
|
||||
|
||||
exitfun() {
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
fail() {
|
||||
false
|
||||
successOr ${@:1}
|
||||
}
|
||||
|
||||
successOr() {
|
||||
local __res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
if [ "$#" -gt 1 ]
|
||||
then
|
||||
>&2 echo -e "${RED}${@:1} ${NC}"
|
||||
fi
|
||||
exit ${__res}
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
fi
|
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/rpm.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
source ${source_dir}/modules/test_args.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
test_args_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
__res=0
|
||||
break
|
||||
elif [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
tests_main
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/config.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/arguments.sh
|
||||
source ${source_dir}/modules/docker.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
arguments_parse "$@"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
config_verify
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
docker_run
|
||||
__res=$?
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,105 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.24
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
depends_on: [common]
|
||||
volumes:
|
||||
- ..:/__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb
|
||||
working_dir: /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- BUILD_DIR=./work
|
||||
|
||||
release-setup: &release-setup
|
||||
<<: *build-setup
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- RELEASE=true
|
||||
- BUILD_DIR=./work
|
||||
|
||||
snapshot-setup: &snapshot-setup
|
||||
<<: *build-setup
|
||||
|
||||
build-docs:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
working_dir: /foundationdb
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" docpackage'
|
||||
|
||||
|
||||
release-packages: &release-packages
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
snapshot-packages: &snapshot-packages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
prb-packages:
|
||||
<<: *snapshot-packages
|
||||
|
||||
|
||||
release-bindings: &release-bindings
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
snapshot-bindings: &snapshot-bindings
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
prb-bindings:
|
||||
<<: *snapshot-bindings
|
||||
|
||||
|
||||
snapshot-cmake: &snapshot-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" "packages" "strip_targets" && cpack'
|
||||
|
||||
prb-cmake:
|
||||
<<: *snapshot-cmake
|
||||
|
||||
|
||||
snapshot-bindings-cmake: &snapshot-bindings-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" "bindings/all"'
|
||||
|
||||
prb-bindings-cmake:
|
||||
<<: *snapshot-bindings-cmake
|
||||
|
||||
|
||||
snapshot-cmake: &snapshot-testpackages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}"'
|
||||
|
||||
prb-testpackages:
|
||||
<<: *snapshot-testpackages
|
||||
|
||||
|
||||
snapshot-ctest: &snapshot-ctest
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=1 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-ctest:
|
||||
<<: *snapshot-ctest
|
||||
|
||||
|
||||
snapshot-correctness: &snapshot-correctness
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=1 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-correctness:
|
||||
<<: *snapshot-correctness
|
||||
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
entrypoint: /bin/bash
|
|
@ -1,290 +0,0 @@
|
|||
FROM centos:6
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
RUN sed -i -e '/enabled/d' /etc/yum.repos.d/CentOS-Base.repo && \
|
||||
sed -i -e '/gpgcheck=1/a enabled=0' /etc/yum.repos.d/CentOS-Base.repo && \
|
||||
sed -i -n '/6.1/q;p' /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
sed -i -e "s/6\.0/$(cut -d\ -f3 /etc/redhat-release)/g" /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
sed -i -e 's/enabled=0/enabled=1/g' /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
yum install -y \
|
||||
centos-release-scl-rh \
|
||||
epel-release \
|
||||
scl-utils \
|
||||
yum-utils && \
|
||||
yum-config-manager --enable rhel-server-rhscl-7-rpms && \
|
||||
sed -i -e 's/#baseurl=/baseurl=/g' \
|
||||
-e 's/mirror.centos.org/vault.centos.org/g' \
|
||||
-e 's/mirrorlist=/#mirrorlist=/g' \
|
||||
/etc/yum.repos.d/CentOS-SCLo-scl-rh.repo && \
|
||||
yum install -y \
|
||||
binutils-devel \
|
||||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-valgrind-devel \
|
||||
dos2unix \
|
||||
dpkg \
|
||||
gettext-devel \
|
||||
git \
|
||||
golang \
|
||||
java-1.8.0-openjdk-devel \
|
||||
libcurl-devel \
|
||||
libuuid-devel \
|
||||
libxslt \
|
||||
lz4 \
|
||||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
wget \
|
||||
rh-python36 \
|
||||
rh-python36-python-devel \
|
||||
rh-ruby24 && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
# build/install autoconf -- same version installed by yum in centos7
|
||||
RUN curl -Ls http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz -o autoconf.tar.gz && \
|
||||
echo "954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 autoconf.tar.gz" > autoconf-sha.txt && \
|
||||
sha256sum -c autoconf-sha.txt && \
|
||||
mkdir autoconf && \
|
||||
tar --strip-components 1 --no-same-owner --directory autoconf -xf autoconf.tar.gz && \
|
||||
cd autoconf && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install automake -- same version installed by yum in centos7
|
||||
RUN curl -Ls http://ftp.gnu.org/gnu/automake/automake-1.13.4.tar.gz -o automake.tar.gz && \
|
||||
echo "4c93abc0bff54b296f41f92dd3aa1e73e554265a6f719df465574983ef6f878c automake.tar.gz" > automake-sha.txt && \
|
||||
sha256sum -c automake-sha.txt && \
|
||||
mkdir automake && \
|
||||
tar --strip-components 1 --no-same-owner --directory automake -xf automake.tar.gz && \
|
||||
cd automake && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install git
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/git/git/archive/v2.30.0.tar.gz -o git.tar.gz && \
|
||||
echo "8db4edd1a0a74ebf4b78aed3f9e25c8f2a7db3c00b1aaee94d1e9834fae24e61 git.tar.gz" > git-sha.txt && \
|
||||
sha256sum -c git-sha.txt && \
|
||||
mkdir git && \
|
||||
tar --strip-components 1 --no-same-owner --directory git -xf git.tar.gz && \
|
||||
cd git && \
|
||||
make configure && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install ninja
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ninja-build/ninja/archive/v1.9.0.zip -o ninja.zip && \
|
||||
echo "8e2e654a418373f10c22e4cc9bdbe9baeca8527ace8d572e0b421e9d9b85b7ef ninja.zip" > ninja-sha.txt && \
|
||||
sha256sum -c ninja-sha.txt && \
|
||||
unzip ninja.zip && \
|
||||
cd ninja-1.9.0 && \
|
||||
./configure.py --bootstrap && \
|
||||
cp ninja /usr/bin && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install cmake
|
||||
RUN curl -Ls https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz -o cmake.tar.gz && \
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 cmake.tar.gz" > cmake-sha.txt && \
|
||||
sha256sum -c cmake-sha.txt && \
|
||||
mkdir cmake && \
|
||||
tar --strip-components 1 --no-same-owner --directory cmake -xf cmake.tar.gz && \
|
||||
cp -r cmake/* /usr/local/ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install LLVM
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
curl -Ls https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/llvm-project-10.0.0.tar.xz -o llvm.tar.xz && \
|
||||
echo "6287a85f4a6aeb07dbffe27847117fe311ada48005f2b00241b523fe7b60716e llvm.tar.xz" > llvm-sha.txt && \
|
||||
sha256sum -c llvm-sha.txt && \
|
||||
mkdir llvm-project && \
|
||||
tar --strip-components 1 --no-same-owner --directory llvm-project -xf llvm.tar.xz && \
|
||||
mkdir -p llvm-project/build && \
|
||||
cd llvm-project/build && \
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-G Ninja \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;compiler-rt;libcxx;libcxxabi;libunwind;lld;lldb" \
|
||||
-DLLVM_STATIC_LINK_CXX_STDLIB=ON \
|
||||
../llvm && \
|
||||
cmake --build . && \
|
||||
cmake --build . --target install && \
|
||||
cd ../.. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install openssl
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://www.openssl.org/source/openssl-1.1.1h.tar.gz -o openssl.tar.gz && \
|
||||
echo "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 openssl.tar.gz" > openssl-sha.txt && \
|
||||
sha256sum -c openssl-sha.txt && \
|
||||
mkdir openssl && \
|
||||
tar --strip-components 1 --no-same-owner --directory openssl -xf openssl.tar.gz && \
|
||||
cd openssl && \
|
||||
./config CFLAGS="-fPIC -O3" --prefix=/usr/local && \
|
||||
make -j`nproc` && \
|
||||
make -j1 install && \
|
||||
ln -sv /usr/local/lib64/lib*.so.1.1 /usr/lib64/ && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install rocksdb to /opt
|
||||
RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocksdb.tar.gz && \
|
||||
echo "d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee rocksdb.tar.gz" > rocksdb-sha.txt && \
|
||||
sha256sum -c rocksdb-sha.txt && \
|
||||
tar --directory /opt -xf rocksdb.tar.gz && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
rm -rf /opt/boost_1_67_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
cd /opt/boost_1_72_0 &&\
|
||||
./bootstrap.sh --with-libraries=context &&\
|
||||
./b2 link=static cxxflags=-std=c++14 --prefix=/opt/boost_1_72_0 install &&\
|
||||
rm -rf /opt/boost_1_72_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# jemalloc (needed for FDB after 6.3)
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2 -o jemalloc-5.2.1.tar.bz2 && \
|
||||
echo "34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6 jemalloc-5.2.1.tar.bz2" > jemalloc-sha.txt && \
|
||||
sha256sum -c jemalloc-sha.txt && \
|
||||
mkdir jemalloc && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory jemalloc -xjf jemalloc-5.2.1.tar.bz2 && \
|
||||
cd jemalloc && \
|
||||
./configure --enable-static --disable-cxx && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install CCACHE
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ccache/ccache/releases/download/v4.0/ccache-4.0.tar.gz -o ccache.tar.gz && \
|
||||
echo "ac97af86679028ebc8555c99318352588ff50f515fc3a7f8ed21a8ad367e3d45 ccache.tar.gz" > ccache-sha256.txt && \
|
||||
sha256sum -c ccache-sha256.txt && \
|
||||
mkdir ccache &&\
|
||||
tar --strip-components 1 --no-same-owner --directory ccache -xf ccache.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DZSTD_FROM_INTERNET=ON ../ccache && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install toml
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz -o toml.tar.gz && \
|
||||
echo "bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d toml.tar.gz" > toml-sha256.txt && \
|
||||
sha256sum -c toml-sha256.txt && \
|
||||
mkdir toml && \
|
||||
tar --strip-components 1 --no-same-owner --directory toml -xf toml.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -Dtoml11_BUILD_TEST=OFF ../toml && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# download old fdbserver binaries
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /opt/foundationdb/old && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/fdbservers-${FDB_VERSION}.tar.gz | \
|
||||
tar --no-same-owner --directory /opt/foundationdb/old -xz && \
|
||||
chmod +x /opt/foundationdb/old/* && \
|
||||
ln -sf /opt/foundationdb/old/fdbserver-${FDB_VERSION} /opt/foundationdb/old/fdbserver
|
||||
|
||||
# build/install distcc
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
curl -Ls https://github.com/distcc/distcc/archive/v3.3.5.tar.gz -o distcc.tar.gz && \
|
||||
echo "13a4b3ce49dfc853a3de550f6ccac583413946b3a2fa778ddf503a9edc8059b0 distcc.tar.gz" > distcc-sha256.txt && \
|
||||
sha256sum -c distcc-sha256.txt && \
|
||||
mkdir distcc && \
|
||||
tar --strip-components 1 --no-same-owner --directory distcc -xf distcc.tar.gz && \
|
||||
cd distcc && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
RUN curl -Ls https://github.com/manticoresoftware/manticoresearch/raw/master/misc/junit/ctest2junit.xsl -o /opt/ctest2junit.xsl
|
||||
|
||||
# # Setting this environment variable switches from OpenSSL to BoringSSL
|
||||
# ENV OPENSSL_ROOT_DIR=/opt/boringssl
|
||||
#
|
||||
# # install BoringSSL: TODO: They don't seem to have releases(?) I picked today's master SHA.
|
||||
# RUN cd /opt &&\
|
||||
# git clone https://boringssl.googlesource.com/boringssl &&\
|
||||
# cd boringssl &&\
|
||||
# git checkout e796cc65025982ed1fb9ef41b3f74e8115092816 &&\
|
||||
# mkdir build
|
||||
#
|
||||
# # ninja doesn't respect CXXFLAGS, and the boringssl CMakeLists doesn't expose an option to define __STDC_FORMAT_MACROS
|
||||
# # also, enable -fPIC.
|
||||
# # this is moderately uglier than creating a patchfile, but easier to maintain.
|
||||
# RUN cd /opt/boringssl &&\
|
||||
# for f in crypto/fipsmodule/rand/fork_detect_test.cc \
|
||||
# include/openssl/bn.h \
|
||||
# ssl/test/bssl_shim.cc ; do \
|
||||
# perl -p -i -e 's/#include <inttypes.h>/#define __STDC_FORMAT_MACROS 1\n#include <inttypes.h>/g;' $f ; \
|
||||
# done &&\
|
||||
# perl -p -i -e 's/-Werror/-Werror -fPIC/' CMakeLists.txt &&\
|
||||
# git diff
|
||||
#
|
||||
# RUN cd /opt/boringssl/build &&\
|
||||
# scl enable devtoolset-8 rh-python36 rh-ruby24 -- cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. &&\
|
||||
# scl enable devtoolset-8 rh-python36 rh-ruby24 -- ninja &&\
|
||||
# ./ssl/ssl_test &&\
|
||||
# mkdir -p ../lib && cp crypto/libcrypto.a ssl/libssl.a ../lib
|
||||
#
|
||||
# # Localize time zone
|
||||
# ARG TIMEZONEINFO=America/Los_Angeles
|
||||
# RUN rm -f /etc/localtime && ln -s /usr/share/zoneinfo/${TIMEZONEINFO} /etc/localtime
|
||||
#
|
||||
# LABEL version=${IMAGE_TAG}
|
||||
# ENV DOCKER_IMAGEVER=${IMAGE_TAG}
|
||||
# ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
# ENV CC=/opt/rh/devtoolset-8/root/usr/bin/gcc
|
||||
# ENV CXX=/opt/rh/devtoolset-8/root/usr/bin/g++
|
||||
#
|
||||
# ENV CCACHE_NOHASHDIR=true
|
||||
# ENV CCACHE_UMASK=0000
|
||||
# ENV CCACHE_SLOPPINESS="file_macro,time_macros,include_file_mtime,include_file_ctime,file_stat_matches"
|
||||
#
|
||||
# CMD scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash
|
|
@ -1,84 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos6-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
# add vscode server
|
||||
RUN yum repolist && \
|
||||
yum -y install \
|
||||
bash-completion \
|
||||
byobu \
|
||||
cgdb \
|
||||
emacs-nox \
|
||||
jq \
|
||||
the_silver_searcher \
|
||||
tmux \
|
||||
tree \
|
||||
vim \
|
||||
zsh && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
pip3 install \
|
||||
lxml \
|
||||
psutil \
|
||||
python-dateutil \
|
||||
subprocess32 && \
|
||||
mkdir fdb-joshua && \
|
||||
cd fdb-joshua && \
|
||||
git clone https://github.com/FoundationDB/fdb-joshua . && \
|
||||
pip3 install /tmp/fdb-joshua && \
|
||||
cd /tmp && \
|
||||
curl -Ls https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.9/2020-11-02/bin/linux/amd64/kubectl -o kubectl && \
|
||||
echo "3dbe69e6deb35fbd6fec95b13d20ac1527544867ae56e3dae17e8c4d638b25b9 kubectl" > kubectl.txt && \
|
||||
sha256sum -c kubectl.txt && \
|
||||
mv kubectl /usr/local/bin/kubectl && \
|
||||
chmod 755 /usr/local/bin/kubectl && \
|
||||
curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.0.30.zip -o "awscliv2.zip" && \
|
||||
echo "7ee475f22c1b35cc9e53affbf96a9ffce91706e154a9441d0d39cbf8366b718e awscliv2.zip" > awscliv2.txt && \
|
||||
sha256sum -c awscliv2.txt && \
|
||||
unzip -qq awscliv2.zip && \
|
||||
./aws/install && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /usr/lib/foundationdb/plugins && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/joshua_tls_library.tar.gz | \
|
||||
tar --strip-components=1 --no-same-owner --directory /usr/lib/foundationdb/plugins -xz && \
|
||||
ln -sf /usr/lib/foundationdb/plugins/FDBGnuTLS.so /usr/lib/foundationdb/plugins/fdb-libressl-plugin.so && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/${FDB_VERSION}/linux/libfdb_c_${FDB_VERSION}.so -o /usr/lib64/libfdb_c_${FDB_VERSION}.so && \
|
||||
ln -sf /usr/lib64/libfdb_c_${FDB_VERSION}.so /usr/lib64/libfdb_c.so
|
||||
|
||||
WORKDIR /root
|
||||
RUN rm -f /root/anaconda-ks.cfg && \
|
||||
printf '%s\n' \
|
||||
'source /opt/rh/devtoolset-8/enable' \
|
||||
'source /opt/rh/rh-python36/enable' \
|
||||
'source /opt/rh/rh-ruby26/enable' \
|
||||
'' \
|
||||
'function cmk_ci() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -v -C ${HOME}/build_output -j 84 all packages strip_targets' \
|
||||
'}' \
|
||||
'function cmk() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -C ${HOME}/build_output -j 84' \
|
||||
'}' \
|
||||
'function ct() {' \
|
||||
' cd ${HOME}/build_output && ctest -j 32 --no-compress-output -T test --output-on-failure' \
|
||||
'}' \
|
||||
'function j() {' \
|
||||
' python3 -m joshua.joshua "${@}"' \
|
||||
'}' \
|
||||
'function jsd() {' \
|
||||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
>> .bashrc
|
|
@ -1,24 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos6-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
RUN useradd distcc && \
|
||||
source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
update-distcc-symlinks
|
||||
|
||||
EXPOSE 3632
|
||||
EXPOSE 3633
|
||||
USER distcc
|
||||
ENV ALLOW 0.0.0.0/0
|
||||
|
||||
ENTRYPOINT distccd \
|
||||
--daemon \
|
||||
--enable-tcp-insecure \
|
||||
--no-detach \
|
||||
--port 3632 \
|
||||
--log-stderr \
|
||||
--log-level info \
|
||||
--listen 0.0.0.0 \
|
||||
--allow ${ALLOW} \
|
||||
--jobs `nproc`
|
|
@ -1,247 +0,0 @@
|
|||
FROM centos:7
|
||||
|
||||
WORKDIR /tmp
|
||||
COPY mono-project.com.rpmkey.pgp ./
|
||||
RUN rpmkeys --import mono-project.com.rpmkey.pgp && \
|
||||
curl -Ls https://download.mono-project.com/repo/centos7-stable.repo -o /etc/yum.repos.d/mono-centos7-stable.repo && \
|
||||
yum repolist && \
|
||||
yum install -y \
|
||||
centos-release-scl-rh \
|
||||
epel-release \
|
||||
scl-utils \
|
||||
yum-utils && \
|
||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo && \
|
||||
yum install -y \
|
||||
autoconf \
|
||||
automake \
|
||||
binutils-devel \
|
||||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-systemtap-sdt-devel \
|
||||
docker-ce \
|
||||
dos2unix \
|
||||
dpkg \
|
||||
gettext-devel \
|
||||
git \
|
||||
golang \
|
||||
java-11-openjdk-devel \
|
||||
libcurl-devel \
|
||||
libuuid-devel \
|
||||
libxslt \
|
||||
lz4 \
|
||||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
wget && \
|
||||
if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
yum install -y \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
rh-ruby27; \
|
||||
else \
|
||||
yum install -y \
|
||||
rh-python36 \
|
||||
rh-python36-python-devel \
|
||||
rh-ruby26; \
|
||||
fi && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
# build/install git
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/git/git/archive/v2.30.0.tar.gz -o git.tar.gz && \
|
||||
echo "8db4edd1a0a74ebf4b78aed3f9e25c8f2a7db3c00b1aaee94d1e9834fae24e61 git.tar.gz" > git-sha.txt && \
|
||||
sha256sum -c git-sha.txt && \
|
||||
mkdir git && \
|
||||
tar --strip-components 1 --no-same-owner --directory git -xf git.tar.gz && \
|
||||
cd git && \
|
||||
make configure && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install ninja
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ninja-build/ninja/archive/v1.9.0.zip -o ninja.zip && \
|
||||
echo "8e2e654a418373f10c22e4cc9bdbe9baeca8527ace8d572e0b421e9d9b85b7ef ninja.zip" > ninja-sha.txt && \
|
||||
sha256sum -c ninja-sha.txt && \
|
||||
unzip ninja.zip && \
|
||||
cd ninja-1.9.0 && \
|
||||
./configure.py --bootstrap && \
|
||||
cp ninja /usr/bin && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install cmake
|
||||
RUN if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
curl -Ls https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-aarch64.tar.gz -o cmake.tar.gz; \
|
||||
echo "69ec045c6993907a4f4a77349d0a0668f1bd3ce8bc5f6fbab6dc7a7e2ffc4f80 cmake.tar.gz" > cmake-sha.txt; \
|
||||
else \
|
||||
curl -Ls https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz -o cmake.tar.gz; \
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 cmake.tar.gz" > cmake-sha.txt; \
|
||||
fi && \
|
||||
sha256sum -c cmake-sha.txt && \
|
||||
mkdir cmake && \
|
||||
tar --strip-components 1 --no-same-owner --directory cmake -xf cmake.tar.gz && \
|
||||
cp -r cmake/* /usr/local/ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install LLVM
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.0/llvm-project-11.0.0.tar.xz -o llvm.tar.xz && \
|
||||
echo "b7b639fc675fa1c86dd6d0bc32267be9eb34451748d2efd03f674b773000e92b llvm.tar.xz" > llvm-sha.txt && \
|
||||
sha256sum -c llvm-sha.txt && \
|
||||
mkdir llvm-project && \
|
||||
tar --strip-components 1 --no-same-owner --directory llvm-project -xf llvm.tar.xz && \
|
||||
mkdir -p llvm-project/build && \
|
||||
cd llvm-project/build && \
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-G Ninja \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;compiler-rt;libcxx;libcxxabi;libunwind;lld;lldb" \
|
||||
-DLLVM_STATIC_LINK_CXX_STDLIB=ON \
|
||||
../llvm && \
|
||||
cmake --build . && \
|
||||
cmake --build . --target install && \
|
||||
cd ../.. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install openssl
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://www.openssl.org/source/openssl-1.1.1h.tar.gz -o openssl.tar.gz && \
|
||||
echo "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 openssl.tar.gz" > openssl-sha.txt && \
|
||||
sha256sum -c openssl-sha.txt && \
|
||||
mkdir openssl && \
|
||||
tar --strip-components 1 --no-same-owner --directory openssl -xf openssl.tar.gz && \
|
||||
cd openssl && \
|
||||
./config CFLAGS="-fPIC -O3" --prefix=/usr/local && \
|
||||
make -j`nproc` && \
|
||||
make -j1 install && \
|
||||
ln -sv /usr/local/lib64/lib*.so.1.1 /usr/lib64/ && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install rocksdb to /opt
|
||||
RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocksdb.tar.gz && \
|
||||
echo "d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee rocksdb.tar.gz" > rocksdb-sha.txt && \
|
||||
sha256sum -c rocksdb-sha.txt && \
|
||||
tar --directory /opt -xf rocksdb.tar.gz && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
rm -rf /opt/boost_1_67_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
cd /opt/boost_1_72_0 &&\
|
||||
./bootstrap.sh --with-libraries=context &&\
|
||||
./b2 link=static cxxflags=-std=c++14 --prefix=/opt/boost_1_72_0 install &&\
|
||||
rm -rf /opt/boost_1_72_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# jemalloc (needed for FDB after 6.3)
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2 -o jemalloc-5.2.1.tar.bz2 && \
|
||||
echo "34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6 jemalloc-5.2.1.tar.bz2" > jemalloc-sha.txt && \
|
||||
sha256sum -c jemalloc-sha.txt && \
|
||||
mkdir jemalloc && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory jemalloc -xjf jemalloc-5.2.1.tar.bz2 && \
|
||||
cd jemalloc && \
|
||||
./configure --enable-static --disable-cxx && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install CCACHE
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ccache/ccache/releases/download/v4.0/ccache-4.0.tar.gz -o ccache.tar.gz && \
|
||||
echo "ac97af86679028ebc8555c99318352588ff50f515fc3a7f8ed21a8ad367e3d45 ccache.tar.gz" > ccache-sha256.txt && \
|
||||
sha256sum -c ccache-sha256.txt && \
|
||||
mkdir ccache &&\
|
||||
tar --strip-components 1 --no-same-owner --directory ccache -xf ccache.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DZSTD_FROM_INTERNET=ON ../ccache && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install toml
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz -o toml.tar.gz && \
|
||||
echo "bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d toml.tar.gz" > toml-sha256.txt && \
|
||||
sha256sum -c toml-sha256.txt && \
|
||||
mkdir toml && \
|
||||
tar --strip-components 1 --no-same-owner --directory toml -xf toml.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -Dtoml11_BUILD_TEST=OFF ../toml && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# download old fdbserver binaries
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /opt/foundationdb/old && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/fdbservers-${FDB_VERSION}.tar.gz | \
|
||||
tar --no-same-owner --directory /opt/foundationdb/old -xz && \
|
||||
chmod +x /opt/foundationdb/old/* && \
|
||||
ln -sf /opt/foundationdb/old/fdbserver-${FDB_VERSION} /opt/foundationdb/old/fdbserver
|
||||
|
||||
# build/install distcc
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
source /opt/rh/rh-python38/enable; \
|
||||
else \
|
||||
source /opt/rh/rh-python36/enable; \
|
||||
fi && \
|
||||
curl -Ls https://github.com/distcc/distcc/archive/v3.3.5.tar.gz -o distcc.tar.gz && \
|
||||
echo "13a4b3ce49dfc853a3de550f6ccac583413946b3a2fa778ddf503a9edc8059b0 distcc.tar.gz" > distcc-sha256.txt && \
|
||||
sha256sum -c distcc-sha256.txt && \
|
||||
mkdir distcc && \
|
||||
tar --strip-components 1 --no-same-owner --directory distcc -xf distcc.tar.gz && \
|
||||
cd distcc && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# valgrind
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://sourceware.org/pub/valgrind/valgrind-3.17.0.tar.bz2 -o valgrind-3.17.0.tar.bz2 && \
|
||||
echo "ad3aec668e813e40f238995f60796d9590eee64a16dff88421430630e69285a2 valgrind-3.17.0.tar.bz2" > valgrind-sha.txt && \
|
||||
sha256sum -c valgrind-sha.txt && \
|
||||
mkdir valgrind && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory valgrind -xjf valgrind-3.17.0.tar.bz2 && \
|
||||
cd valgrind && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
RUN curl -Ls https://github.com/manticoresoftware/manticoresearch/raw/master/misc/junit/ctest2junit.xsl -o /opt/ctest2junit.xsl
|
|
@ -1,40 +0,0 @@
|
|||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: SKS 1.1.6
|
||||
Comment: Hostname: sks.pod01.fleetstreetops.com
|
||||
|
||||
mQENBFPfqCcBCADctOzyTxfWvf40Nlb+AMkcJyb505WSbzhWU8yPmBNAJOnbwueMsTkNMHEO
|
||||
u8fGRNxRWj5o/Db1N7EoSQtK3OgFnBef8xquUyrzA1nJ2aPfUWX+bhTG1TwyrtLaOssFRz6z
|
||||
/h/ChUIFvt2VZCw+Yx4BiKi+tvgwrHTYB/Yf2J9+R/1O6949n6veFFRBfgPOL0djhvRqXzhv
|
||||
FjJkh4xhTaGVeOnRR3+YQkblmti2n6KYl0n2kNB40ujSqpTloSfnR5tmJpz00WoOA9MJBdvH
|
||||
txTTn8l6rVzXbm4mW9ZmB1kht/BgWaNLaIisW5AZSkQKer35wOWf0G7Gw+cWHq+I7W9pABEB
|
||||
AAG0OlhhbWFyaW4gUHVibGljIEplbmtpbnMgKGF1dG8tc2lnbmluZykgPHJlbGVuZ0B4YW1h
|
||||
cmluLmNvbT6JARwEEAECAAYFAlQIhKQACgkQyQ+cuQ4frQyc1wf+MCusJK4ANLWikbgiSSx1
|
||||
qMBveBlLKLEdCxYY+B9rc/pRDw448iBdd+nuSVdbRoqLgoN8gHbClboP+i22yw+mga0KASD7
|
||||
b1mpdYB0npR3H73zbYArn3qTV8s/yUXkIAEFUtj0yoEuv8KjO8P7nZJh8OuqqAupUVN0s3Kj
|
||||
ONqXqi6Ro3fvVEZWOUFZl/FmY5KmXlpcw+YwE5CaNhJ2WunrjFTDqynRU/LeoPEKuwyYvfo9
|
||||
37zJFCrpAUMTr/9QpEKmV61H7fEHA9oHq97FBwWfjOU0l2mrXt1zJ97xVd2DXxrZodlkiY6B
|
||||
76rhaT4ZhltY1E7WB2Z9WPfTe1Y6jz4fZ4kBHAQQAQgABgUCWEyoiAAKCRABFQplW72BAn/P
|
||||
CAC0GkRBR3JTmG8WGeQMLb/o6Gon9cxpLnKv1GgFbHSM7XYMe7ySh5zxORwFuECuJ5+qcA6c
|
||||
Ve/kJAV8rewLULL9yvHK3oK7R8zoVGbFVm+lyoxiaXpkkWg21Mb8IubiO+tA/dJc7hKQSpoI
|
||||
0+dmJNaNrTVwqj0tQ8e0OL9KvBOYwFbSe06bocSNPVmKCt0EOvpGcQfzFw5UEjJVkqFn/moU
|
||||
rSxj0YsJpwRXB1pOsBaQC6r9oCgUvxPf4H77U07+ImXzxRWInVPYFSXSiBA7p+hzvsikmZEl
|
||||
iIAia8mTteUF1GeK4kafUk6iZZUfBlCIb9sV4O9Vvv8W0VjK4Vg6O2UAiQE4BBMBAgAiBQJT
|
||||
36gnAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCmoZs409gx75DoB/9h5p8u1cUS
|
||||
y6Mp2PjjW398LJZaqWwaa2W/lcLEKN7oWTC5Yf5BEuVsO9270pVln9Cv7hiqcbC8kywk+sZv
|
||||
RsYO3uoTRwsmImc/7uaK382hey1A2hvkH5fYHmY/5Z/Z0bm/A0k0chhG2ycjWjZXYLZ96I0V
|
||||
U3ZBQBHoh3qRtgWq4yWTsCJBX+FKPBdmkIpgcPXQw+hak0mj2sILqjScRZT1Oe+WJsMNMaLa
|
||||
8dSdw+pPm8NM/VGLmO9iTTDApuAsRixpCYLdJY+ThGNrKe6xDswQo8gr3gbBkJi0wLRDP2Rz
|
||||
q7rD0TC2PxOaWOZ7hmyz+EhjLcjZhHNJTaa+NV0k8YAwuQENBFPfqCcBCACtc7HssC9S3PxJ
|
||||
m1youvGfYLhm+KzMO+gIoy7R32VXIZNxrkMYzaeerqSsMwxdhEjyOscT+rJbRGZ+9iPOGeh4
|
||||
AqZlzzOuxQ/Lg5h+2mGVXe0Avb+A2zC56mLSQCL3W8NjABUZdknnc1YIf9Dz05fy4jPEttNS
|
||||
y+Rzte0ITLH1Hy/PKBrlF5n+G1/86f3L5n1ZZXmV3vi+rXT/OyEh9xRS4usmR6kVh4o2XGlI
|
||||
zUrUjhZvb4lxrHfWgzKlWFoUSydaZDk7eikTKF692RiSSpLbDLW2sNOdzT2eqv2B8CJRF5sL
|
||||
bD6BB3dAbH7KfqKiCT3xcCZhNEZw+M+GcRO/HNbnABEBAAGJAR8EGAECAAkFAlPfqCcCGwwA
|
||||
CgkQpqGbONPYMe+sNQgAwjm9PJ45t7NBNTXn1zadoQQbPqz9qAlWiII0k+zzJCTTVqgyIXJY
|
||||
I6zdNiB/Oh1Xajs/T9z9tL54+LLqgtZKa0lzDOmcxn6Iujf3a1MFdYxKgaQtT2ADxAimuBoz
|
||||
3Y1ohxXgAs2+VISWYoPBI+UWhYqg11zq3uwpFIYQBRgkVydCxefCxY19okNp9FPC7KJPpJkO
|
||||
NgDAK693Y9mOZXSq+XeGhjy3Sxesl0PYLIfV33z+vCpc2o1dDA5wuycgfqupNQITkQm6gPOH
|
||||
1jLu8Vttm4fdEtVMcqkn8dJFomo3JW3qxI7IWwjbVRg10G8LGAuBbD6CA0dGSf8PkHFYv2Xs
|
||||
dQ==
|
||||
=MWcF
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
|
@ -1,113 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
# add vscode server
|
||||
RUN yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo && \
|
||||
yum repolist && \
|
||||
yum -y install \
|
||||
bash-completion \
|
||||
byobu \
|
||||
cgdb \
|
||||
emacs-nox \
|
||||
fish \
|
||||
jq \
|
||||
ripgrep \
|
||||
the_silver_searcher \
|
||||
tmux \
|
||||
tree \
|
||||
vim \
|
||||
zsh && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
pip3 install \
|
||||
lxml \
|
||||
psutil \
|
||||
python-dateutil \
|
||||
subprocess32 && \
|
||||
mkdir fdb-joshua && \
|
||||
cd fdb-joshua && \
|
||||
git clone https://github.com/FoundationDB/fdb-joshua . && \
|
||||
pip3 install /tmp/fdb-joshua && \
|
||||
cd /tmp && \
|
||||
curl -Ls https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.9/2020-11-02/bin/linux/amd64/kubectl -o kubectl && \
|
||||
echo "3dbe69e6deb35fbd6fec95b13d20ac1527544867ae56e3dae17e8c4d638b25b9 kubectl" > kubectl.txt && \
|
||||
sha256sum -c kubectl.txt && \
|
||||
mv kubectl /usr/local/bin/kubectl && \
|
||||
chmod 755 /usr/local/bin/kubectl && \
|
||||
curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.0.30.zip -o "awscliv2.zip" && \
|
||||
echo "7ee475f22c1b35cc9e53affbf96a9ffce91706e154a9441d0d39cbf8366b718e awscliv2.zip" > awscliv2.txt && \
|
||||
sha256sum -c awscliv2.txt && \
|
||||
unzip -qq awscliv2.zip && \
|
||||
./aws/install && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /usr/lib/foundationdb/plugins && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/joshua_tls_library.tar.gz | \
|
||||
tar --strip-components=1 --no-same-owner --directory /usr/lib/foundationdb/plugins -xz && \
|
||||
ln -sf /usr/lib/foundationdb/plugins/FDBGnuTLS.so /usr/lib/foundationdb/plugins/fdb-libressl-plugin.so && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/${FDB_VERSION}/linux/libfdb_c_${FDB_VERSION}.so -o /usr/lib64/libfdb_c_${FDB_VERSION}.so && \
|
||||
ln -sf /usr/lib64/libfdb_c_${FDB_VERSION}.so /usr/lib64/libfdb_c.so
|
||||
|
||||
WORKDIR /root
|
||||
RUN curl -Ls https://update.code.visualstudio.com/latest/server-linux-x64/stable -o /tmp/vscode-server-linux-x64.tar.gz && \
|
||||
mkdir -p .vscode-server/bin/latest && \
|
||||
tar --strip-components 1 --no-same-owner --directory .vscode-server/bin/latest -xf /tmp/vscode-server-linux-x64.tar.gz && \
|
||||
touch .vscode-server/bin/latest/0 && \
|
||||
rm -rf /tmp/*
|
||||
RUN rm -f /root/anaconda-ks.cfg && \
|
||||
printf '%s\n' \
|
||||
'#!/usr/bin/env bash' \
|
||||
'set -Eeuo pipefail' \
|
||||
'' \
|
||||
'mkdir -p ~/.docker' \
|
||||
'cat > ~/.docker/config.json << EOF' \
|
||||
'{' \
|
||||
' "proxies":' \
|
||||
' {' \
|
||||
' "default":' \
|
||||
' {' \
|
||||
' "httpProxy": "${HTTP_PROXY}",' \
|
||||
' "httpsProxy": "${HTTPS_PROXY}",' \
|
||||
' "noProxy": "${NO_PROXY}"' \
|
||||
' }' \
|
||||
' }' \
|
||||
'}' \
|
||||
'EOF' \
|
||||
> docker_proxy.sh && \
|
||||
chmod 755 docker_proxy.sh && \
|
||||
printf '%s\n' \
|
||||
'source /opt/rh/devtoolset-8/enable' \
|
||||
'source /opt/rh/rh-python36/enable' \
|
||||
'source /opt/rh/rh-ruby26/enable' \
|
||||
'' \
|
||||
'function cmk_ci() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -v -C ${HOME}/build_output -j 84 all packages strip_targets' \
|
||||
'}' \
|
||||
'function cmk() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -C ${HOME}/build_output -j 84' \
|
||||
'}' \
|
||||
'function ct() {' \
|
||||
' cd ${HOME}/build_output && ctest -j 32 --no-compress-output -T test --output-on-failure' \
|
||||
'}' \
|
||||
'function j() {' \
|
||||
' python3 -m joshua.joshua "${@}"' \
|
||||
'}' \
|
||||
'function jsd() {' \
|
||||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
'bash ${HOME}/docker_proxy.sh' \
|
||||
>> .bashrc
|
|
@ -1,24 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
RUN useradd distcc && \
|
||||
source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
update-distcc-symlinks
|
||||
|
||||
EXPOSE 3632
|
||||
EXPOSE 3633
|
||||
USER distcc
|
||||
ENV ALLOW 0.0.0.0/0
|
||||
|
||||
ENTRYPOINT distccd \
|
||||
--daemon \
|
||||
--enable-tcp-insecure \
|
||||
--no-detach \
|
||||
--port 3632 \
|
||||
--log-stderr \
|
||||
--log-level info \
|
||||
--listen 0.0.0.0 \
|
||||
--allow ${ALLOW} \
|
||||
--jobs `nproc`
|
|
@ -1,20 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
ENV YCSB_VERSION=ycsb-foundationdb-binding-0.17.0 \
|
||||
PATH=${PATH}:/usr/bin
|
||||
|
||||
RUN cd /opt \
|
||||
&& eval curl "-Ls https://github.com/brianfrankcooper/YCSB/releases/download/0.17.0/ycsb-foundationdb-binding-0.17.0.tar.gz" \
|
||||
| tar -xzvf -
|
||||
|
||||
RUN rm -Rf /opt/${YCSB_VERSION}/lib/fdb-java-5.2.5.jar
|
||||
|
||||
# COPY The Appropriate fdb-java-.jar Aaron from packages
|
||||
# COPY binary RPM for foundationd-db
|
||||
# Install Binary
|
||||
|
||||
WORKDIR "/opt/${YCSB_VERSION}"
|
||||
|
||||
ENTRYPOINT ["bin/ycsb.sh"]
|
|
@ -1,99 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# we first check whether the user is in the group docker
|
||||
user=$(id -un)
|
||||
DIR_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
|
||||
group=$(id -gn)
|
||||
uid=$(id -u)
|
||||
gid=$(id -g)
|
||||
gids=( $(id -G) )
|
||||
groups=( $(id -Gn) )
|
||||
tmpdir="/tmp/fdb-docker-${DIR_UUID}"
|
||||
image=fdb-dev
|
||||
|
||||
pushd .
|
||||
mkdir ${tmpdir}
|
||||
cd ${tmpdir}
|
||||
|
||||
echo
|
||||
|
||||
cat <<EOF >> Dockerfile
|
||||
FROM foundationdb/foundationdb-dev:0.11.1
|
||||
RUN yum install -y sudo
|
||||
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||
RUN groupadd -g 1100 sudo
|
||||
EOF
|
||||
|
||||
num_groups=${#gids[@]}
|
||||
additional_groups="-G sudo"
|
||||
for ((i=0;i<num_groups;i++))
|
||||
do
|
||||
echo "RUN groupadd -g ${gids[$i]} ${groups[$i]} || true" >> Dockerfile
|
||||
if [ ${gids[i]} -ne ${gid} ]
|
||||
then
|
||||
additional_groups="${additional_groups},${gids[$i]}"
|
||||
fi
|
||||
done
|
||||
|
||||
cat <<EOF >> Dockerfile
|
||||
RUN useradd -u ${uid} -g ${gid} ${additional_groups} -m ${user}
|
||||
|
||||
USER ${user}
|
||||
CMD scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash
|
||||
|
||||
EOF
|
||||
|
||||
echo "Created ${tmpdir}"
|
||||
echo "Buidling Docker container ${image}"
|
||||
sudo docker build -t ${image} .
|
||||
|
||||
popd
|
||||
|
||||
echo "Writing startup script"
|
||||
mkdir -p $HOME/bin
|
||||
cat <<EOF > $HOME/bin/fdb-dev
|
||||
#!/usr/bin/bash
|
||||
|
||||
if [ -d "\${CCACHE_DIR}" ]
|
||||
then
|
||||
args="-v \${CCACHE_DIR}:\${CCACHE_DIR}"
|
||||
args="\${args} -e CCACHE_DIR=\${CCACHE_DIR}"
|
||||
args="\${args} -e CCACHE_UMASK=\${CCACHE_UMASK}"
|
||||
ccache_args=\$args
|
||||
fi
|
||||
|
||||
if [ -t 1 ] ; then
|
||||
TERMINAL_ARGS=-it `# Run in interactive mode and simulate a TTY`
|
||||
else
|
||||
TERMINAL_ARGS=-i `# Run in interactive mode`
|
||||
fi
|
||||
|
||||
sudo docker run --rm `# delete (temporary) image after return` \\
|
||||
\${TERMINAL_ARGS} \\
|
||||
--privileged=true `# Run in privileged mode ` \\
|
||||
--cap-add=SYS_PTRACE \\
|
||||
--security-opt seccomp=unconfined \\
|
||||
-v "${HOME}:${HOME}" `# Mount home directory` \\
|
||||
-w="\$(pwd)" \\
|
||||
\${ccache_args} \\
|
||||
${image} "\$@"
|
||||
EOF
|
||||
|
||||
cat <<EOF > $HOME/bin/clangd
|
||||
#!/usr/bin/bash
|
||||
|
||||
fdb-dev scl enable devtoolset-8 rh-python36 rh-ruby24 -- clangd
|
||||
EOF
|
||||
|
||||
if [[ ":$PATH:" != *":$HOME/bin:"* ]]
|
||||
then
|
||||
echo "WARNING: $HOME/bin is not in your PATH!"
|
||||
echo -e "\tThis can cause problems with some scripts (like fdb-clangd)"
|
||||
fi
|
||||
chmod +x $HOME/bin/fdb-dev
|
||||
chmod +x $HOME/bin/clangd
|
||||
echo "To start the dev docker image run $HOME/bin/fdb-dev"
|
||||
echo "$HOME/bin/clangd can be used for IDE integration"
|
||||
echo "You can edit these files but be aware that this script will overwrite your changes if you rerun it"
|
|
@ -1,3 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: txt-to-toml.py [src.txt]")
|
||||
return 1
|
||||
|
||||
filename = sys.argv[1]
|
||||
|
||||
indent = " "
|
||||
in_workload = False
|
||||
first_test = False
|
||||
keys_before_test = False
|
||||
|
||||
for line in open(filename):
|
||||
k = ""
|
||||
v = ""
|
||||
|
||||
if line.strip().startswith(";"):
|
||||
print((indent if in_workload else "") + line.strip().replace(";", "#"))
|
||||
continue
|
||||
|
||||
if "=" in line:
|
||||
(k, v) = line.strip().split("=")
|
||||
(k, v) = (k.strip(), v.strip())
|
||||
|
||||
if k == "testTitle":
|
||||
first_test = True
|
||||
if in_workload:
|
||||
print("")
|
||||
in_workload = False
|
||||
if keys_before_test:
|
||||
print("")
|
||||
keys_before_test = False
|
||||
print("[[test]]")
|
||||
|
||||
if k == "testName":
|
||||
in_workload = True
|
||||
print("")
|
||||
print(indent + "[[test.workload]]")
|
||||
|
||||
if not first_test:
|
||||
keys_before_test = True
|
||||
|
||||
if v.startswith("."):
|
||||
v = "0" + v
|
||||
|
||||
if any(c.isalpha() or c in ["/", "!"] for c in v):
|
||||
if v != "true" and v != "false":
|
||||
v = "'" + v + "'"
|
||||
|
||||
if k == "buggify":
|
||||
print("buggify = " + ("true" if v == "'on'" else "false"))
|
||||
elif k:
|
||||
print((indent if in_workload else "") + k + " = " + v)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -136,6 +136,7 @@ function(add_fdb_test)
|
|||
${VALGRIND_OPTION}
|
||||
${ADD_FDB_TEST_TEST_FILES}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||
set_tests_properties("${test_name}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||
get_filename_component(test_dir_full ${first_file} DIRECTORY)
|
||||
if(NOT ${test_dir_full} STREQUAL "")
|
||||
get_filename_component(test_dir ${test_dir_full} NAME)
|
||||
|
@ -394,9 +395,10 @@ function(package_bindingtester)
|
|||
add_dependencies(bindingtester copy_bindingtester_binaries)
|
||||
endfunction()
|
||||
|
||||
# Creates a single cluster before running the specified command (usually a ctest test)
|
||||
function(add_fdbclient_test)
|
||||
set(options DISABLED ENABLED)
|
||||
set(oneValueArgs NAME)
|
||||
set(oneValueArgs NAME PROCESS_NUMBER TEST_TIMEOUT)
|
||||
set(multiValueArgs COMMAND)
|
||||
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(OPEN_FOR_IDE)
|
||||
|
@ -412,12 +414,57 @@ function(add_fdbclient_test)
|
|||
message(FATAL_ERROR "COMMAND is a required argument for add_fdbclient_test")
|
||||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
if (T_PROCESS_NUMBER)
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--process-number ${T_PROCESS_NUMBER}
|
||||
--
|
||||
${T_COMMAND})
|
||||
else()
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--
|
||||
${T_COMMAND})
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endif()
|
||||
if (T_TEST_TIMEOUT)
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT ${T_TEST_TIMEOUT})
|
||||
else()
|
||||
# default timeout
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endif()
|
||||
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||
endfunction()
|
||||
|
||||
# Creates 3 distinct clusters before running the specified command.
|
||||
# This is useful for testing features that require multiple clusters (like the
|
||||
# multi-cluster FDB client)
|
||||
function(add_multi_fdbclient_test)
|
||||
set(options DISABLED ENABLED)
|
||||
set(oneValueArgs NAME)
|
||||
set(multiValueArgs COMMAND)
|
||||
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(OPEN_FOR_IDE)
|
||||
return()
|
||||
endif()
|
||||
if(NOT T_ENABLED AND T_DISABLED)
|
||||
return()
|
||||
endif()
|
||||
if(NOT T_NAME)
|
||||
message(FATAL_ERROR "NAME is a required argument for add_multi_fdbclient_test")
|
||||
endif()
|
||||
if(NOT T_COMMAND)
|
||||
message(FATAL_ERROR "COMMAND is a required argument for add_multi_fdbclient_test")
|
||||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_multi_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--clusters 3
|
||||
--
|
||||
${T_COMMAND})
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endfunction()
|
||||
|
||||
function(add_java_test)
|
||||
|
|
|
@ -100,8 +100,7 @@ if(WIN32)
|
|||
endif()
|
||||
add_compile_options(/W0 /EHsc /bigobj $<$<CONFIG:Release>:/Zi> /MP /FC /Gm-)
|
||||
add_compile_definitions(NOMINMAX)
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
|
||||
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
|
||||
else()
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
|
|
|
@ -352,7 +352,7 @@ API for random reads to the DiskQueue. That ability is now required for
|
|||
peeking, and thus, `IDiskQueue`'s API has been enhanced correspondingly:
|
||||
|
||||
``` CPP
|
||||
enum class CheckHashes { NO, YES };
|
||||
BOOLEAN_PARAM(CheckHashes);
|
||||
|
||||
class IDiskQueue {
|
||||
// ...
|
||||
|
@ -369,9 +369,9 @@ and not `(start, length)`.
|
|||
Spilled data, when using spill-by-value, was resistant to bitrot via data being
|
||||
checksummed interally within SQLite's B-tree. Now that reads can be done
|
||||
directly, the responsibility for verifying data integrity falls upon the
|
||||
DiskQueue. `CheckHashes::YES` will cause the DiskQueue to use the checksum in
|
||||
DiskQueue. `CheckHashes::TRUE` will cause the DiskQueue to use the checksum in
|
||||
each DiskQueue page to verify data integrity. If an externally maintained
|
||||
checksums exists to verify the returned data, then `CheckHashes::NO` can be
|
||||
checksums exists to verify the returned data, then `CheckHashes::FALSE` can be
|
||||
used to elide the checksumming. A page failing its checksum will cause the
|
||||
transaction log to die with an `io_error()`.
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
add_subdirectory(tutorial)
|
||||
if(WIN32)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# build a virtualenv
|
||||
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||
|
|
|
@ -598,7 +598,7 @@ int main(int argc, char** argv) {
|
|||
Error::init();
|
||||
|
||||
StringRef url(param.container_url);
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 10 << 20, param.log_dir, "convert", param.trace_log_group);
|
||||
|
|
|
@ -579,7 +579,7 @@ int main(int argc, char** argv) {
|
|||
Error::init();
|
||||
|
||||
StringRef url(param.container_url);
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 10 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
|
|
|
@ -133,6 +133,7 @@ enum {
|
|||
OPT_WAITFORDONE,
|
||||
OPT_BACKUPKEYS_FILTER,
|
||||
OPT_INCREMENTALONLY,
|
||||
OPT_ENCRYPTION_KEY_FILE,
|
||||
|
||||
// Backup Modify
|
||||
OPT_MOD_ACTIVE_INTERVAL,
|
||||
|
@ -259,6 +260,7 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
|
||||
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -697,6 +699,7 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
|
|||
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
|
||||
{ OPT_RESTORE_BEGIN_VERSION, "--begin_version", SO_REQ_SEP },
|
||||
{ OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY, "--inconsistent_snapshot_only", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -1089,6 +1092,8 @@ static void printBackupUsage(bool devhelp) {
|
|||
" Performs incremental backup without the base backup.\n"
|
||||
" This option indicates to the backup agent that it will only need to record the log files, "
|
||||
"and ignore the range files.\n");
|
||||
printf(" --encryption_key_file"
|
||||
" The AES-128-GCM key in the provided file is used for encrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
|
@ -1162,6 +1167,8 @@ static void printRestoreUsage(bool devhelp) {
|
|||
" To be used in conjunction with incremental restore.\n"
|
||||
" Indicates to the backup agent to only begin replaying log files from a certain version, "
|
||||
"instead of the entire set.\n");
|
||||
printf(" --encryption_key_file"
|
||||
" The AES-128-GCM key in the provided file is used for decrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
|
@ -1463,7 +1470,7 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
std::string id,
|
||||
ProgramExe exe,
|
||||
Database dest,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
// This process will write a document that looks like this:
|
||||
// { backup : { $expires : {<subdoc>}, version: <version from approximately 30 seconds from now> }
|
||||
// so that the value under 'backup' will eventually expire to null and thus be ignored by
|
||||
|
@ -1639,7 +1646,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
|
|||
std::string name,
|
||||
std::string id,
|
||||
int limit = 1) {
|
||||
state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
|
||||
state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, Snapshot::True));
|
||||
state bool readMore = false;
|
||||
state int i;
|
||||
for (i = 0; i < docs.size(); ++i) {
|
||||
|
@ -1668,7 +1675,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
|
|||
}
|
||||
if (readMore) {
|
||||
limit = 10000;
|
||||
RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
|
||||
RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, Snapshot::True));
|
||||
docs = std::move(docs2);
|
||||
readMore = false;
|
||||
}
|
||||
|
@ -1705,7 +1712,10 @@ ACTOR Future<json_spirit::mObject> getLayerStatus(Database src, std::string root
|
|||
|
||||
// Read layer status for this layer and get the total count of agent processes (instances) then adjust the poll delay
|
||||
// based on that and BACKUP_AGGREGATE_POLL_RATE
|
||||
ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::string name, double* pollDelay) {
|
||||
ACTOR Future<Void> updateAgentPollRate(Database src,
|
||||
std::string rootKey,
|
||||
std::string name,
|
||||
std::shared_ptr<double> pollDelay) {
|
||||
loop {
|
||||
try {
|
||||
json_spirit::mObject status = wait(getLayerStatus(src, rootKey));
|
||||
|
@ -1727,7 +1737,7 @@ ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::s
|
|||
ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
||||
std::string name,
|
||||
ProgramExe exe,
|
||||
double* pollDelay,
|
||||
std::shared_ptr<double> pollDelay,
|
||||
Database taskDest = Database(),
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString()) {
|
||||
state std::string metaKey = layerStatusMetaPrefixRange.begin.toString() + "json/" + name;
|
||||
|
@ -1757,7 +1767,8 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Future<std::string> futureStatusDoc = getLayerStatus(tr, name, id, exe, taskDest, true);
|
||||
state Future<std::string> futureStatusDoc =
|
||||
getLayerStatus(tr, name, id, exe, taskDest, Snapshot::True);
|
||||
wait(cleanupStatus(tr, rootKey, name, id));
|
||||
std::string statusdoc = wait(futureStatusDoc);
|
||||
tr->set(instanceKey, statusdoc);
|
||||
|
@ -1774,7 +1785,7 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
|
||||
// Now that status was written at least once by this process (and hopefully others), start the poll rate
|
||||
// control updater if it wasn't started yet
|
||||
if (!pollRateUpdater.isValid() && pollDelay != nullptr)
|
||||
if (!pollRateUpdater.isValid())
|
||||
pollRateUpdater = updateAgentPollRate(statusUpdateDest, rootKey, name, pollDelay);
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarnAlways, "UnableToWriteStatus").error(e);
|
||||
|
@ -1784,17 +1795,17 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
}
|
||||
|
||||
ACTOR Future<Void> runDBAgent(Database src, Database dest) {
|
||||
state double pollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
state std::shared_ptr<double> pollDelay = std::make_shared<double>(1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE);
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString();
|
||||
state Future<Void> status = statusUpdateActor(src, "dr_backup", ProgramExe::DR_AGENT, &pollDelay, dest, id);
|
||||
state Future<Void> status = statusUpdateActor(src, "dr_backup", ProgramExe::DR_AGENT, pollDelay, dest, id);
|
||||
state Future<Void> status_other =
|
||||
statusUpdateActor(dest, "dr_backup_dest", ProgramExe::DR_AGENT, &pollDelay, dest, id);
|
||||
statusUpdateActor(dest, "dr_backup_dest", ProgramExe::DR_AGENT, pollDelay, dest, id);
|
||||
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
loop {
|
||||
try {
|
||||
wait(backupAgent.run(dest, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
wait(backupAgent.run(dest, pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_operation_cancelled)
|
||||
|
@ -1811,14 +1822,14 @@ ACTOR Future<Void> runDBAgent(Database src, Database dest) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> runAgent(Database db) {
|
||||
state double pollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
state Future<Void> status = statusUpdateActor(db, "backup", ProgramExe::AGENT, &pollDelay);
|
||||
state std::shared_ptr<double> pollDelay = std::make_shared<double>(1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE);
|
||||
state Future<Void> status = statusUpdateActor(db, "backup", ProgramExe::AGENT, pollDelay);
|
||||
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
loop {
|
||||
try {
|
||||
wait(backupAgent.run(db, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
wait(backupAgent.run(db, pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_operation_cancelled)
|
||||
|
@ -1846,7 +1857,8 @@ ACTOR Future<Void> submitDBBackup(Database src,
|
|||
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
|
||||
}
|
||||
|
||||
wait(backupAgent.submitBackup(dest, KeyRef(tagName), backupRanges, false, StringRef(), StringRef(), true));
|
||||
wait(backupAgent.submitBackup(
|
||||
dest, KeyRef(tagName), backupRanges, StopWhenDone::False, StringRef(), StringRef(), LockDB::True));
|
||||
|
||||
// Check if a backup agent is running
|
||||
bool agentRunning = wait(backupAgent.checkActive(dest));
|
||||
|
@ -1890,10 +1902,10 @@ ACTOR Future<Void> submitBackup(Database db,
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
std::string tagName,
|
||||
bool dryRun,
|
||||
bool waitForCompletion,
|
||||
bool stopWhenDone,
|
||||
bool usePartitionedLog,
|
||||
bool incrementalBackupOnly) {
|
||||
WaitForComplete waitForCompletion,
|
||||
StopWhenDone stopWhenDone,
|
||||
UsePartitionedLog usePartitionedLog,
|
||||
IncrementalBackupOnly incrementalBackupOnly) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -1996,7 +2008,7 @@ ACTOR Future<Void> switchDBBackup(Database src,
|
|||
Database dest,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
std::string tagName,
|
||||
bool forceAction) {
|
||||
ForceAction forceAction) {
|
||||
try {
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
|
@ -2046,7 +2058,7 @@ ACTOR Future<Void> statusDBBackup(Database src, Database dest, std::string tagNa
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showErrors, bool json) {
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, ShowErrors showErrors, bool json) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2063,11 +2075,15 @@ ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showError
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> abortDBBackup(Database src, Database dest, std::string tagName, bool partial, bool dstOnly) {
|
||||
ACTOR Future<Void> abortDBBackup(Database src,
|
||||
Database dest,
|
||||
std::string tagName,
|
||||
PartialBackup partial,
|
||||
DstOnly dstOnly) {
|
||||
try {
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
wait(backupAgent.abortBackup(dest, Key(tagName), partial, false, dstOnly));
|
||||
wait(backupAgent.abortBackup(dest, Key(tagName), partial, AbortOldBackup::False, dstOnly));
|
||||
wait(backupAgent.unlockBackup(dest, Key(tagName)));
|
||||
|
||||
printf("The DR on tag `%s' was successfully aborted.\n", printable(StringRef(tagName)).c_str());
|
||||
|
@ -2118,7 +2134,7 @@ ACTOR Future<Void> abortBackup(Database db, std::string tagName) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> cleanupMutations(Database db, bool deleteData) {
|
||||
ACTOR Future<Void> cleanupMutations(Database db, DeleteData deleteData) {
|
||||
try {
|
||||
wait(cleanupBackup(db, deleteData));
|
||||
} catch (Error& e) {
|
||||
|
@ -2131,7 +2147,7 @@ ACTOR Future<Void> cleanupMutations(Database db, bool deleteData) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitBackup(Database db, std::string tagName, bool stopWhenDone) {
|
||||
ACTOR Future<Void> waitBackup(Database db, std::string tagName, StopWhenDone stopWhenDone) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2150,7 +2166,7 @@ ACTOR Future<Void> waitBackup(Database db, std::string tagName, bool stopWhenDon
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, bool waitForCompletion) {
|
||||
ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, WaitForComplete waitForCompletion) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2220,7 +2236,9 @@ ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause
|
|||
return Void();
|
||||
}
|
||||
|
||||
Reference<IBackupContainer> openBackupContainer(const char* name, std::string destinationContainer) {
|
||||
Reference<IBackupContainer> openBackupContainer(const char* name,
|
||||
std::string destinationContainer,
|
||||
Optional<std::string> const& encryptionKeyFile = {}) {
|
||||
// Error, if no dest container was specified
|
||||
if (destinationContainer.empty()) {
|
||||
fprintf(stderr, "ERROR: No backup destination was specified.\n");
|
||||
|
@ -2230,7 +2248,7 @@ Reference<IBackupContainer> openBackupContainer(const char* name, std::string de
|
|||
|
||||
Reference<IBackupContainer> c;
|
||||
try {
|
||||
c = IBackupContainer::openContainer(destinationContainer);
|
||||
c = IBackupContainer::openContainer(destinationContainer, encryptionKeyFile);
|
||||
} catch (Error& e) {
|
||||
std::string msg = format("ERROR: '%s' on URL '%s'", e.what(), destinationContainer.c_str());
|
||||
if (e.code() == error_code_backup_invalid_url && !IBackupContainer::lastOpenError.empty()) {
|
||||
|
@ -2255,12 +2273,13 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
Version targetVersion,
|
||||
std::string targetTimestamp,
|
||||
bool performRestore,
|
||||
bool verbose,
|
||||
bool waitForDone,
|
||||
Verbose verbose,
|
||||
WaitForComplete waitForDone,
|
||||
std::string addPrefix,
|
||||
std::string removePrefix,
|
||||
bool onlyAppyMutationLogs,
|
||||
bool inconsistentSnapshotOnly) {
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
if (ranges.empty()) {
|
||||
ranges.push_back_deep(ranges.arena(), normalKeys);
|
||||
}
|
||||
|
@ -2296,7 +2315,8 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
state Reference<IBackupContainer> bc = openBackupContainer(exeRestore.toString().c_str(), container);
|
||||
state Reference<IBackupContainer> bc =
|
||||
openBackupContainer(exeRestore.toString().c_str(), container, encryptionKeyFile);
|
||||
|
||||
// If targetVersion is unset then use the maximum restorable version from the backup description
|
||||
if (targetVersion == invalidVersion) {
|
||||
|
@ -2306,7 +2326,7 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
|
||||
BackupDescription desc = wait(bc->describeBackup());
|
||||
|
||||
if (onlyAppyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
if (onlyApplyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
targetVersion = desc.contiguousLogEnd.get() - 1;
|
||||
} else if (desc.maxRestorableVersion.present()) {
|
||||
targetVersion = desc.maxRestorableVersion.get();
|
||||
|
@ -2330,10 +2350,11 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
verbose,
|
||||
KeyRef(addPrefix),
|
||||
KeyRef(removePrefix),
|
||||
true,
|
||||
onlyAppyMutationLogs,
|
||||
LockDB::True,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion));
|
||||
beginVersion,
|
||||
encryptionKeyFile));
|
||||
|
||||
if (waitForDone && verbose) {
|
||||
// If restore is now complete then report version restored
|
||||
|
@ -2369,8 +2390,8 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
|
|||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
Version dbVersion,
|
||||
bool performRestore,
|
||||
bool verbose,
|
||||
bool waitForDone) {
|
||||
Verbose verbose,
|
||||
WaitForComplete waitForDone) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
state Version restoreVersion = invalidVersion;
|
||||
|
@ -2413,7 +2434,7 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
|
|||
ranges,
|
||||
KeyRef(container),
|
||||
dbVersion,
|
||||
true,
|
||||
LockDB::True,
|
||||
randomUID,
|
||||
LiteralStringRef(""),
|
||||
LiteralStringRef("")));
|
||||
|
@ -2512,7 +2533,8 @@ ACTOR Future<Void> expireBackupData(const char* name,
|
|||
Database db,
|
||||
bool force,
|
||||
Version restorableAfterVersion,
|
||||
std::string restorableAfterDatetime) {
|
||||
std::string restorableAfterDatetime,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
if (!endDatetime.empty()) {
|
||||
Version v = wait(timeKeeperVersionFromDatetime(endDatetime, db));
|
||||
endVersion = v;
|
||||
|
@ -2531,7 +2553,7 @@ ACTOR Future<Void> expireBackupData(const char* name,
|
|||
}
|
||||
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer, encryptionKeyFile);
|
||||
|
||||
state IBackupContainer::ExpireProgress progress;
|
||||
state std::string lastProgress;
|
||||
|
@ -2613,9 +2635,10 @@ ACTOR Future<Void> describeBackup(const char* name,
|
|||
std::string destinationContainer,
|
||||
bool deep,
|
||||
Optional<Database> cx,
|
||||
bool json) {
|
||||
bool json,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer, encryptionKeyFile);
|
||||
state BackupDescription desc = wait(c->describeBackup(deep));
|
||||
if (cx.present())
|
||||
wait(desc.resolveVersionTimes(cx.get()));
|
||||
|
@ -2645,7 +2668,7 @@ ACTOR Future<Void> queryBackup(const char* name,
|
|||
Version restoreVersion,
|
||||
std::string originalClusterFile,
|
||||
std::string restoreTimestamp,
|
||||
bool verbose) {
|
||||
Verbose verbose) {
|
||||
state UID operationId = deterministicRandom()->randomUniqueID();
|
||||
state JsonBuilderObject result;
|
||||
state std::string errorMessage;
|
||||
|
@ -2838,7 +2861,7 @@ ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOp
|
|||
}
|
||||
|
||||
state BackupConfig config(uidFlag.get().first);
|
||||
EBackupState s = wait(config.stateEnum().getOrThrow(tr, false, backup_invalid_info()));
|
||||
EBackupState s = wait(config.stateEnum().getOrThrow(tr, Snapshot::False, backup_invalid_info()));
|
||||
if (!FileBackupAgent::isRunnable(s)) {
|
||||
fprintf(stderr, "Backup on tag '%s' is not runnable.\n", tagName.c_str());
|
||||
throw backup_error();
|
||||
|
@ -2858,7 +2881,7 @@ ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOp
|
|||
}
|
||||
|
||||
if (options.activeSnapshotIntervalSeconds.present()) {
|
||||
Version begin = wait(config.snapshotBeginVersion().getOrThrow(tr, false, backup_error()));
|
||||
Version begin = wait(config.snapshotBeginVersion().getOrThrow(tr, Snapshot::False, backup_error()));
|
||||
config.snapshotTargetEndVersion().set(tr,
|
||||
begin + ((int64_t)options.activeSnapshotIntervalSeconds.get() *
|
||||
CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
|
@ -3244,13 +3267,13 @@ int main(int argc, char* argv[]) {
|
|||
Version beginVersion = invalidVersion;
|
||||
Version restoreVersion = invalidVersion;
|
||||
std::string restoreTimestamp;
|
||||
bool waitForDone = false;
|
||||
bool stopWhenDone = true;
|
||||
bool usePartitionedLog = false; // Set to true to use new backup system
|
||||
bool incrementalBackupOnly = false;
|
||||
bool onlyAppyMutationLogs = false;
|
||||
bool inconsistentSnapshotOnly = false;
|
||||
bool forceAction = false;
|
||||
WaitForComplete waitForDone{ false };
|
||||
StopWhenDone stopWhenDone{ true };
|
||||
UsePartitionedLog usePartitionedLog{ false }; // Set to true to use new backup system
|
||||
IncrementalBackupOnly incrementalBackupOnly{ false };
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs{ false };
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly{ false };
|
||||
ForceAction forceAction{ false };
|
||||
bool trace = false;
|
||||
bool quietDisplay = false;
|
||||
bool dryRun = false;
|
||||
|
@ -3260,8 +3283,8 @@ int main(int argc, char* argv[]) {
|
|||
uint64_t traceRollSize = TRACE_DEFAULT_ROLL_SIZE;
|
||||
uint64_t traceMaxLogsSize = TRACE_DEFAULT_MAX_LOGS_SIZE;
|
||||
ESOError lastError;
|
||||
bool partial = true;
|
||||
bool dstOnly = false;
|
||||
PartialBackup partial{ true };
|
||||
DstOnly dstOnly{ false };
|
||||
LocalityData localities;
|
||||
uint64_t memLimit = 8LL << 30;
|
||||
Optional<uint64_t> ti;
|
||||
|
@ -3271,7 +3294,8 @@ int main(int argc, char* argv[]) {
|
|||
std::string restoreClusterFileDest;
|
||||
std::string restoreClusterFileOrig;
|
||||
bool jsonOutput = false;
|
||||
bool deleteData = false;
|
||||
DeleteData deleteData{ false };
|
||||
Optional<std::string> encryptionKeyFile;
|
||||
|
||||
BackupModifyOptions modifyOptions;
|
||||
|
||||
|
@ -3355,13 +3379,13 @@ int main(int argc, char* argv[]) {
|
|||
dryRun = true;
|
||||
break;
|
||||
case OPT_DELETE_DATA:
|
||||
deleteData = true;
|
||||
deleteData.set(true);
|
||||
break;
|
||||
case OPT_MIN_CLEANUP_SECONDS:
|
||||
knobs.emplace_back("min_cleanup_seconds", args->OptionArg());
|
||||
break;
|
||||
case OPT_FORCE:
|
||||
forceAction = true;
|
||||
forceAction.set(true);
|
||||
break;
|
||||
case OPT_TRACE:
|
||||
trace = true;
|
||||
|
@ -3441,10 +3465,10 @@ int main(int argc, char* argv[]) {
|
|||
sourceClusterFile = args->OptionArg();
|
||||
break;
|
||||
case OPT_CLEANUP:
|
||||
partial = false;
|
||||
partial.set(false);
|
||||
break;
|
||||
case OPT_DSTONLY:
|
||||
dstOnly = true;
|
||||
dstOnly.set(true);
|
||||
break;
|
||||
case OPT_KNOB: {
|
||||
std::string syn = args->OptionSyntax();
|
||||
|
@ -3503,17 +3527,20 @@ int main(int argc, char* argv[]) {
|
|||
modifyOptions.verifyUID = args->OptionArg();
|
||||
break;
|
||||
case OPT_WAITFORDONE:
|
||||
waitForDone = true;
|
||||
waitForDone.set(true);
|
||||
break;
|
||||
case OPT_NOSTOPWHENDONE:
|
||||
stopWhenDone = false;
|
||||
stopWhenDone.set(false);
|
||||
break;
|
||||
case OPT_USE_PARTITIONED_LOG:
|
||||
usePartitionedLog = true;
|
||||
usePartitionedLog.set(true);
|
||||
break;
|
||||
case OPT_INCREMENTALONLY:
|
||||
incrementalBackupOnly = true;
|
||||
onlyAppyMutationLogs = true;
|
||||
incrementalBackupOnly.set(true);
|
||||
onlyApplyMutationLogs.set(true);
|
||||
break;
|
||||
case OPT_ENCRYPTION_KEY_FILE:
|
||||
encryptionKeyFile = args->OptionArg();
|
||||
break;
|
||||
case OPT_RESTORECONTAINER:
|
||||
restoreContainer = args->OptionArg();
|
||||
|
@ -3565,7 +3592,7 @@ int main(int argc, char* argv[]) {
|
|||
break;
|
||||
}
|
||||
case OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY: {
|
||||
inconsistentSnapshotOnly = true;
|
||||
inconsistentSnapshotOnly.set(true);
|
||||
break;
|
||||
}
|
||||
#ifdef _WIN32
|
||||
|
@ -3704,7 +3731,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::NO, IsSimulated::NO);
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
||||
for (const auto& [knobName, knobValueString] : knobs) {
|
||||
try {
|
||||
|
@ -3731,7 +3758,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
||||
g_knobs.initialize(Randomize::NO, IsSimulated::NO);
|
||||
g_knobs.initialize(Randomize::False, IsSimulated::False);
|
||||
|
||||
if (trace) {
|
||||
if (!traceLogGroup.empty())
|
||||
|
@ -3769,7 +3796,7 @@ int main(int argc, char* argv[]) {
|
|||
Reference<IBackupContainer> c;
|
||||
|
||||
try {
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
return FDB_EXIT_ERROR;
|
||||
|
@ -3813,7 +3840,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, true, localities);
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
||||
|
@ -3833,7 +3860,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
try {
|
||||
sourceDb = Database::createDatabase(sourceCcf, -1, true, localities);
|
||||
sourceDb = Database::createDatabase(sourceCcf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", sourceCcf->getFilename().c_str());
|
||||
|
@ -3853,7 +3880,7 @@ int main(int argc, char* argv[]) {
|
|||
if (!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
// Test out the backup url to make sure it parses. Doesn't test to make sure it's actually writeable.
|
||||
openBackupContainer(argv[0], destinationContainer);
|
||||
openBackupContainer(argv[0], destinationContainer, encryptionKeyFile);
|
||||
f = stopAfter(submitBackup(db,
|
||||
destinationContainer,
|
||||
initialSnapshotIntervalSeconds,
|
||||
|
@ -3879,7 +3906,7 @@ int main(int argc, char* argv[]) {
|
|||
case BackupType::STATUS:
|
||||
if (!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
f = stopAfter(statusBackup(db, tagName, true, jsonOutput));
|
||||
f = stopAfter(statusBackup(db, tagName, ShowErrors::True, jsonOutput));
|
||||
break;
|
||||
|
||||
case BackupType::ABORT:
|
||||
|
@ -3932,7 +3959,8 @@ int main(int argc, char* argv[]) {
|
|||
db,
|
||||
forceAction,
|
||||
expireRestorableAfterVersion,
|
||||
expireRestorableAfterDatetime));
|
||||
expireRestorableAfterDatetime,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
|
||||
case BackupType::DELETE_BACKUP:
|
||||
|
@ -3952,7 +3980,8 @@ int main(int argc, char* argv[]) {
|
|||
destinationContainer,
|
||||
describeDeep,
|
||||
describeTimestamps ? Optional<Database>(db) : Optional<Database>(),
|
||||
jsonOutput));
|
||||
jsonOutput,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
|
||||
case BackupType::LIST:
|
||||
|
@ -3968,7 +3997,7 @@ int main(int argc, char* argv[]) {
|
|||
restoreVersion,
|
||||
restoreClusterFileOrig,
|
||||
restoreTimestamp,
|
||||
!quietDisplay));
|
||||
Verbose{ !quietDisplay }));
|
||||
break;
|
||||
|
||||
case BackupType::DUMP:
|
||||
|
@ -4029,15 +4058,16 @@ int main(int argc, char* argv[]) {
|
|||
restoreVersion,
|
||||
restoreTimestamp,
|
||||
!dryRun,
|
||||
!quietDisplay,
|
||||
Verbose{ !quietDisplay },
|
||||
waitForDone,
|
||||
addPrefix,
|
||||
removePrefix,
|
||||
onlyAppyMutationLogs,
|
||||
inconsistentSnapshotOnly));
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
case RestoreType::WAIT:
|
||||
f = stopAfter(success(ba.waitRestore(db, KeyRef(tagName), true)));
|
||||
f = stopAfter(success(ba.waitRestore(db, KeyRef(tagName), Verbose::True)));
|
||||
break;
|
||||
case RestoreType::ABORT:
|
||||
f = stopAfter(
|
||||
|
@ -4097,8 +4127,14 @@ int main(int argc, char* argv[]) {
|
|||
// TODO: We have not implemented the code commented out in this case
|
||||
switch (restoreType) {
|
||||
case RestoreType::START:
|
||||
f = stopAfter(runFastRestoreTool(
|
||||
db, tagName, restoreContainer, backupKeys, restoreVersion, !dryRun, !quietDisplay, waitForDone));
|
||||
f = stopAfter(runFastRestoreTool(db,
|
||||
tagName,
|
||||
restoreContainer,
|
||||
backupKeys,
|
||||
restoreVersion,
|
||||
!dryRun,
|
||||
Verbose{ !quietDisplay },
|
||||
waitForDone));
|
||||
break;
|
||||
case RestoreType::WAIT:
|
||||
printf("[TODO][ERROR] FastRestore does not support RESTORE_WAIT yet!\n");
|
||||
|
|
|
@ -3151,7 +3151,7 @@ struct CLIOptions {
|
|||
}
|
||||
|
||||
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
||||
g_knobs.initialize(Randomize::NO, IsSimulated::NO);
|
||||
g_knobs.initialize(Randomize::False, IsSimulated::False);
|
||||
}
|
||||
|
||||
int processArg(CSimpleOpt& args) {
|
||||
|
@ -3322,7 +3322,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
TraceEvent::setNetworkThread();
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, false);
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::False);
|
||||
if (!opt.exec.present()) {
|
||||
printf("Using cluster file `%s'.\n", ccf->getFilename().c_str());
|
||||
}
|
||||
|
@ -4924,7 +4924,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
registerCrashHandler();
|
||||
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::NO, IsSimulated::NO);
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
|
||||
#ifdef __unixish__
|
||||
struct sigaction act;
|
||||
|
|
|
@ -256,7 +256,7 @@ public:
|
|||
m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
|
||||
// Add first part
|
||||
m_parts.push_back(Reference<Part>(new Part(1, m_bstore->knobs.multipart_min_part_size)));
|
||||
m_parts.push_back(makeReference<Part>(1, m_bstore->knobs.multipart_min_part_size));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -83,6 +83,6 @@ TEST_CASE("/asynctaskthread/add") {
|
|||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, 100));
|
||||
}
|
||||
wait(waitForAll(clients));
|
||||
ASSERT(sum == 1000);
|
||||
ASSERT_EQ(sum, 1000);
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -36,6 +36,26 @@
|
|||
#include "fdbclient/BackupContainer.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(LockDB);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(UnlockDB);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(StopWhenDone);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Verbose);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(WaitForComplete);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(ForceAction);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Terminator);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(IncrementalBackupOnly);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(UsePartitionedLog);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(OnlyApplyMutationLogs);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(InconsistentSnapshotOnly);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(ShowErrors);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(AbortOldBackup);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(DstOnly); // TODO: More descriptive name?
|
||||
FDB_DECLARE_BOOLEAN_PARAM(WaitForDestUID);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(CheckBackupUID);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(DeleteData);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(SetValidation);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(PartialBackup);
|
||||
|
||||
class BackupAgentBase : NonCopyable {
|
||||
public:
|
||||
// Time formatter for anything backup or restore related
|
||||
|
@ -65,6 +85,7 @@ public:
|
|||
static const Key keyConfigStopWhenDoneKey;
|
||||
static const Key keyStateStatus;
|
||||
static const Key keyStateStop;
|
||||
static const Key keyStateLogBeginVersion;
|
||||
static const Key keyLastUid;
|
||||
static const Key keyBeginKey;
|
||||
static const Key keyEndKey;
|
||||
|
@ -82,151 +103,26 @@ public:
|
|||
static const Key keySourceStates;
|
||||
static const Key keySourceTagName;
|
||||
|
||||
static const int logHeaderSize;
|
||||
static constexpr int logHeaderSize = 12;
|
||||
|
||||
// Convert the status text to an enumerated value
|
||||
static EnumState getState(std::string stateText) {
|
||||
auto enState = EnumState::STATE_ERRORED;
|
||||
|
||||
if (stateText.empty()) {
|
||||
enState = EnumState::STATE_NEVERRAN;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been submitted")) {
|
||||
enState = EnumState::STATE_SUBMITTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been started")) {
|
||||
enState = EnumState::STATE_RUNNING;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("is differential")) {
|
||||
enState = EnumState::STATE_RUNNING_DIFFERENTIAL;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been completed")) {
|
||||
enState = EnumState::STATE_COMPLETED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been aborted")) {
|
||||
enState = EnumState::STATE_ABORTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been partially aborted")) {
|
||||
enState = EnumState::STATE_PARTIALLY_ABORTED;
|
||||
}
|
||||
|
||||
return enState;
|
||||
}
|
||||
static EnumState getState(std::string const& stateText);
|
||||
|
||||
// Convert the status enum to a text description
|
||||
static const char* getStateText(EnumState enState) {
|
||||
const char* stateText;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
stateText = "has errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
stateText = "has never been started";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
stateText = "has been submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
stateText = "has been started";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
stateText = "is differential";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
stateText = "has been completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
stateText = "has been aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
stateText = "has been partially aborted";
|
||||
break;
|
||||
default:
|
||||
stateText = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return stateText;
|
||||
}
|
||||
static const char* getStateText(EnumState enState);
|
||||
|
||||
// Convert the status enum to a name
|
||||
static const char* getStateName(EnumState enState) {
|
||||
const char* s;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
s = "Errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
s = "NeverRan";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
s = "Submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
s = "Running";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
s = "RunningDifferentially";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
s = "Completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
s = "Aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
s = "Aborting";
|
||||
break;
|
||||
default:
|
||||
s = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
static const char* getStateName(EnumState enState);
|
||||
|
||||
// Determine if the specified state is runnable
|
||||
static bool isRunnable(EnumState enState) {
|
||||
bool isRunnable = false;
|
||||
static bool isRunnable(EnumState enState);
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
case EnumState::STATE_RUNNING:
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
isRunnable = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
static KeyRef getDefaultTag() { return StringRef(defaultTagName); }
|
||||
|
||||
return isRunnable;
|
||||
}
|
||||
|
||||
static const KeyRef getDefaultTag() { return StringRef(defaultTagName); }
|
||||
|
||||
static const std::string getDefaultTagName() { return defaultTagName; }
|
||||
static std::string getDefaultTagName() { return defaultTagName; }
|
||||
|
||||
// This is only used for automatic backup name generation
|
||||
static Standalone<StringRef> getCurrentTime() {
|
||||
double t = now();
|
||||
time_t curTime = t;
|
||||
char buffer[128];
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&curTime);
|
||||
strftime(buffer, 128, "%Y-%m-%d-%H-%M-%S", timeinfo);
|
||||
|
||||
std::string time(buffer);
|
||||
return StringRef(time + format(".%06d", (int)(1e6 * (t - curTime))));
|
||||
}
|
||||
static Standalone<StringRef> getCurrentTime();
|
||||
|
||||
protected:
|
||||
static const std::string defaultTagName;
|
||||
|
@ -249,7 +145,11 @@ public:
|
|||
|
||||
KeyBackedProperty<Key> lastBackupTimestamp() { return config.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
|
||||
Future<Void> run(Database cx, double* pollDelay, int maxConcurrentTasks) {
|
||||
Future<Void> run(Database cx, double pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, std::make_shared<double const>(pollDelay), maxConcurrentTasks);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, std::shared_ptr<double const> pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, pollDelay, maxConcurrentTasks);
|
||||
}
|
||||
|
||||
|
@ -260,13 +160,13 @@ public:
|
|||
static Key getPauseKey();
|
||||
|
||||
// parallel restore
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true);
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, UnlockDB = UnlockDB::True);
|
||||
Future<Void> submitParallelRestore(Database cx,
|
||||
Key backupTag,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key bcUrl,
|
||||
Version targetVersion,
|
||||
bool lockDB,
|
||||
LockDB lockDB,
|
||||
UID randomUID,
|
||||
Key addPrefix,
|
||||
Key removePrefix);
|
||||
|
@ -288,29 +188,31 @@ public:
|
|||
Key tagName,
|
||||
Key url,
|
||||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
bool waitForComplete = true,
|
||||
Version targetVersion = -1,
|
||||
bool verbose = true,
|
||||
WaitForComplete = WaitForComplete::True,
|
||||
Version targetVersion = ::invalidVersion,
|
||||
Verbose = Verbose::True,
|
||||
Key addPrefix = Key(),
|
||||
Key removePrefix = Key(),
|
||||
bool lockDB = true,
|
||||
bool onlyAppyMutationLogs = false,
|
||||
bool inconsistentSnapshotOnly = false,
|
||||
Version beginVersion = -1);
|
||||
LockDB = LockDB::True,
|
||||
OnlyApplyMutationLogs = OnlyApplyMutationLogs::False,
|
||||
InconsistentSnapshotOnly = InconsistentSnapshotOnly::False,
|
||||
Version beginVersion = ::invalidVersion,
|
||||
Optional<std::string> const& encryptionKeyFileName = {});
|
||||
Future<Version> restore(Database cx,
|
||||
Optional<Database> cxOrig,
|
||||
Key tagName,
|
||||
Key url,
|
||||
bool waitForComplete = true,
|
||||
Version targetVersion = -1,
|
||||
bool verbose = true,
|
||||
WaitForComplete waitForComplete = WaitForComplete::True,
|
||||
Version targetVersion = ::invalidVersion,
|
||||
Verbose verbose = Verbose::True,
|
||||
KeyRange range = normalKeys,
|
||||
Key addPrefix = Key(),
|
||||
Key removePrefix = Key(),
|
||||
bool lockDB = true,
|
||||
bool onlyAppyMutationLogs = false,
|
||||
bool inconsistentSnapshotOnly = false,
|
||||
Version beginVersion = -1) {
|
||||
LockDB lockDB = LockDB::True,
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs = OnlyApplyMutationLogs::False,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly = InconsistentSnapshotOnly::False,
|
||||
Version beginVersion = ::invalidVersion,
|
||||
Optional<std::string> const& encryptionKeyFileName = {}) {
|
||||
Standalone<VectorRef<KeyRangeRef>> rangeRef;
|
||||
rangeRef.push_back_deep(rangeRef.arena(), range);
|
||||
return restore(cx,
|
||||
|
@ -324,9 +226,10 @@ public:
|
|||
addPrefix,
|
||||
removePrefix,
|
||||
lockDB,
|
||||
onlyAppyMutationLogs,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion);
|
||||
beginVersion,
|
||||
encryptionKeyFileName);
|
||||
}
|
||||
Future<Version> atomicRestore(Database cx,
|
||||
Key tagName,
|
||||
|
@ -347,7 +250,7 @@ public:
|
|||
Future<ERestoreState> abortRestore(Database cx, Key tagName);
|
||||
|
||||
// Waits for a restore tag to reach a final (stable) state.
|
||||
Future<ERestoreState> waitRestore(Database cx, Key tagName, bool verbose);
|
||||
Future<ERestoreState> waitRestore(Database cx, Key tagName, Verbose);
|
||||
|
||||
// Get a string describing the status of a tag
|
||||
Future<std::string> restoreStatus(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
|
@ -362,20 +265,22 @@ public:
|
|||
Key outContainer,
|
||||
int initialSnapshotIntervalSeconds,
|
||||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
std::string const& tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
bool partitionedLog = false,
|
||||
bool incrementalBackupOnly = false);
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
UsePartitionedLog = UsePartitionedLog::False,
|
||||
IncrementalBackupOnly = IncrementalBackupOnly::False,
|
||||
Optional<std::string> const& encryptionKeyFileName = {});
|
||||
Future<Void> submitBackup(Database cx,
|
||||
Key outContainer,
|
||||
int initialSnapshotIntervalSeconds,
|
||||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
std::string const& tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
bool partitionedLog = false,
|
||||
bool incrementalBackupOnly = false) {
|
||||
StopWhenDone stopWhenDone = StopWhenDone::True,
|
||||
UsePartitionedLog partitionedLog = UsePartitionedLog::False,
|
||||
IncrementalBackupOnly incrementalBackupOnly = IncrementalBackupOnly::False,
|
||||
Optional<std::string> const& encryptionKeyFileName = {}) {
|
||||
return runRYWTransactionFailIfLocked(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return submitBackup(tr,
|
||||
outContainer,
|
||||
|
@ -385,7 +290,8 @@ public:
|
|||
backupRanges,
|
||||
stopWhenDone,
|
||||
partitionedLog,
|
||||
incrementalBackupOnly);
|
||||
incrementalBackupOnly,
|
||||
encryptionKeyFileName);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -407,19 +313,19 @@ public:
|
|||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return abortBackup(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<std::string> getStatus(Database cx, bool showErrors, std::string tagName);
|
||||
Future<std::string> getStatus(Database cx, ShowErrors, std::string tagName);
|
||||
Future<std::string> getStatusJSON(Database cx, std::string tagName);
|
||||
|
||||
Future<Optional<Version>> getLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
bool snapshot = false);
|
||||
Snapshot = Snapshot::False);
|
||||
void setLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName, Version version);
|
||||
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<EnumState> waitBackup(Database cx,
|
||||
std::string tagName,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
Reference<IBackupContainer>* pContainer = nullptr,
|
||||
UID* pUID = nullptr);
|
||||
|
||||
|
@ -478,7 +384,11 @@ public:
|
|||
sourceTagNames = std::move(r.sourceTagNames);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, double* pollDelay, int maxConcurrentTasks) {
|
||||
Future<Void> run(Database cx, double pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, std::make_shared<double const>(pollDelay), maxConcurrentTasks);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, std::shared_ptr<double const> pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, pollDelay, maxConcurrentTasks);
|
||||
}
|
||||
|
||||
|
@ -487,7 +397,7 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool forceAction = false);
|
||||
ForceAction = ForceAction::False);
|
||||
|
||||
Future<Void> unlockBackup(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
Future<Void> unlockBackup(Database cx, Key tagName) {
|
||||
|
@ -506,18 +416,18 @@ public:
|
|||
Future<Void> submitBackup(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
Key addPrefix = StringRef(),
|
||||
Key removePrefix = StringRef(),
|
||||
bool lockDatabase = false,
|
||||
LockDB lockDatabase = LockDB::False,
|
||||
PreBackupAction backupAction = PreBackupAction::VERIFY);
|
||||
Future<Void> submitBackup(Database cx,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone stopWhenDone = StopWhenDone::True,
|
||||
Key addPrefix = StringRef(),
|
||||
Key removePrefix = StringRef(),
|
||||
bool lockDatabase = false,
|
||||
LockDB lockDatabase = LockDB::False,
|
||||
PreBackupAction backupAction = PreBackupAction::VERIFY) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return submitBackup(
|
||||
|
@ -533,35 +443,36 @@ public:
|
|||
|
||||
Future<Void> abortBackup(Database cx,
|
||||
Key tagName,
|
||||
bool partial = false,
|
||||
bool abortOldBackup = false,
|
||||
bool dstOnly = false,
|
||||
bool waitForDestUID = false);
|
||||
PartialBackup = PartialBackup::False,
|
||||
AbortOldBackup = AbortOldBackup::False,
|
||||
DstOnly = DstOnly::False,
|
||||
WaitForDestUID = WaitForDestUID::False);
|
||||
|
||||
Future<std::string> getStatus(Database cx, int errorLimit, Key tagName);
|
||||
|
||||
Future<EnumState> getStateValue(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<EnumState> getStateValue(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
Future<EnumState> getStateValue(Database cx, UID logUid) {
|
||||
return runRYWTransaction(cx,
|
||||
[=](Reference<ReadYourWritesTransaction> tr) { return getStateValue(tr, logUid); });
|
||||
}
|
||||
|
||||
Future<UID> getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<UID> getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
Future<UID> getDestUid(Database cx, UID logUid) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return getDestUid(tr, logUid); });
|
||||
}
|
||||
|
||||
Future<UID> getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, bool snapshot = false);
|
||||
Future<UID> getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, Snapshot = Snapshot::False);
|
||||
Future<UID> getLogUid(Database cx, Key tagName) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return getLogUid(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<int64_t> getRangeBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<int64_t> getLogBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
|
||||
Future<int64_t> getRangeBytesWritten(Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
Snapshot = Snapshot::False);
|
||||
Future<int64_t> getLogBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<EnumState> waitBackup(Database cx, Key tagName, bool stopWhenDone = true);
|
||||
Future<EnumState> waitBackup(Database cx, Key tagName, StopWhenDone = StopWhenDone::True);
|
||||
Future<EnumState> waitSubmitted(Database cx, Key tagName);
|
||||
Future<Void> waitUpgradeToLatestDrVersion(Database cx, Key tagName);
|
||||
|
||||
|
@ -619,7 +530,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion = Optional<Version>(),
|
||||
bool checkBackupUid = false,
|
||||
CheckBackupUID = CheckBackupUID::False,
|
||||
Version backupUid = 0);
|
||||
Key getApplyKey(Version version, Key backupUid);
|
||||
Version getLogKeyVersion(Key key);
|
||||
|
@ -631,18 +542,18 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
PromiseStream<RangeResultWithVersion> results,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
bool terminator = true,
|
||||
bool systemAccess = false,
|
||||
bool lockAware = false);
|
||||
Terminator terminator = Terminator::True,
|
||||
AccessSystemKeys systemAccess = AccessSystemKeys::False,
|
||||
LockAware lockAware = LockAware::False);
|
||||
ACTOR Future<Void> readCommitted(Database cx,
|
||||
PromiseStream<RCGroup> results,
|
||||
Future<Void> active,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy,
|
||||
bool terminator = true,
|
||||
bool systemAccess = false,
|
||||
bool lockAware = false);
|
||||
Terminator terminator = Terminator::True,
|
||||
AccessSystemKeys systemAccess = AccessSystemKeys::False,
|
||||
LockAware lockAware = LockAware::False);
|
||||
ACTOR Future<Void> applyMutations(Database cx,
|
||||
Key uid,
|
||||
Key addPrefix,
|
||||
|
@ -652,7 +563,7 @@ ACTOR Future<Void> applyMutations(Database cx,
|
|||
RequestStream<CommitTransactionRequest> commit,
|
||||
NotifiedVersion* committedVersion,
|
||||
Reference<KeyRangeMap<Version>> keyVersion);
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData);
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, DeleteData deleteData);
|
||||
|
||||
using EBackupState = BackupAgentBase::EnumState;
|
||||
template <>
|
||||
|
@ -695,14 +606,15 @@ public:
|
|||
typedef KeyBackedMap<std::string, UidAndAbortedFlagT> TagMap;
|
||||
// Map of tagName to {UID, aborted_flag} located in the fileRestorePrefixRange keyspace.
|
||||
class TagUidMap : public KeyBackedMap<std::string, UidAndAbortedFlagT> {
|
||||
ACTOR static Future<std::vector<KeyBackedTag>> getAll_impl(TagUidMap* tagsMap,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot);
|
||||
|
||||
public:
|
||||
TagUidMap(const StringRef& prefix) : TagMap(LiteralStringRef("tag->uid/").withPrefix(prefix)), prefix(prefix) {}
|
||||
|
||||
ACTOR static Future<std::vector<KeyBackedTag>> getAll_impl(TagUidMap* tagsMap,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot);
|
||||
|
||||
Future<std::vector<KeyBackedTag>> getAll(Reference<ReadYourWritesTransaction> tr, bool snapshot = false) {
|
||||
Future<std::vector<KeyBackedTag>> getAll(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return getAll_impl(this, tr, snapshot);
|
||||
}
|
||||
|
||||
|
@ -718,12 +630,12 @@ static inline KeyBackedTag makeBackupTag(std::string tagName) {
|
|||
}
|
||||
|
||||
static inline Future<std::vector<KeyBackedTag>> getAllRestoreTags(Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return TagUidMap(fileRestorePrefixRange.begin).getAll(tr, snapshot);
|
||||
}
|
||||
|
||||
static inline Future<std::vector<KeyBackedTag>> getAllBackupTags(Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return TagUidMap(fileBackupPrefixRange.begin).getAll(tr, snapshot);
|
||||
}
|
||||
|
||||
|
@ -738,7 +650,9 @@ public:
|
|||
|
||||
KeyBackedConfig(StringRef prefix, Reference<Task> task) : KeyBackedConfig(prefix, TaskParams.uid().get(task)) {}
|
||||
|
||||
Future<Void> toTask(Reference<ReadYourWritesTransaction> tr, Reference<Task> task, bool setValidation = true) {
|
||||
Future<Void> toTask(Reference<ReadYourWritesTransaction> tr,
|
||||
Reference<Task> task,
|
||||
SetValidation setValidation = SetValidation::True) {
|
||||
// Set the uid task parameter
|
||||
TaskParams.uid().set(task, uid);
|
||||
|
||||
|
|
|
@ -26,6 +26,24 @@
|
|||
#include "flow/ActorCollection.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(LockDB);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(UnlockDB);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(StopWhenDone);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Verbose);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(WaitForComplete);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(ForceAction);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Terminator);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(UsePartitionedLog);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(InconsistentSnapshotOnly);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(ShowErrors);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(AbortOldBackup);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(DstOnly);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(WaitForDestUID);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(CheckBackupUID);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(DeleteData);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(SetValidation);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(PartialBackup);
|
||||
|
||||
std::string BackupAgentBase::formatTime(int64_t epochs) {
|
||||
time_t curTime = (time_t)epochs;
|
||||
char buffer[30];
|
||||
|
@ -95,32 +113,33 @@ int64_t BackupAgentBase::parseTime(std::string timestamp) {
|
|||
return ts;
|
||||
}
|
||||
|
||||
const Key BackupAgentBase::keyFolderId = LiteralStringRef("config_folderid");
|
||||
const Key BackupAgentBase::keyBeginVersion = LiteralStringRef("beginVersion");
|
||||
const Key BackupAgentBase::keyEndVersion = LiteralStringRef("endVersion");
|
||||
const Key BackupAgentBase::keyPrevBeginVersion = LiteralStringRef("prevBeginVersion");
|
||||
const Key BackupAgentBase::keyConfigBackupTag = LiteralStringRef("config_backup_tag");
|
||||
const Key BackupAgentBase::keyConfigLogUid = LiteralStringRef("config_log_uid");
|
||||
const Key BackupAgentBase::keyConfigBackupRanges = LiteralStringRef("config_backup_ranges");
|
||||
const Key BackupAgentBase::keyConfigStopWhenDoneKey = LiteralStringRef("config_stop_when_done");
|
||||
const Key BackupAgentBase::keyStateStop = LiteralStringRef("state_stop");
|
||||
const Key BackupAgentBase::keyStateStatus = LiteralStringRef("state_status");
|
||||
const Key BackupAgentBase::keyLastUid = LiteralStringRef("last_uid");
|
||||
const Key BackupAgentBase::keyBeginKey = LiteralStringRef("beginKey");
|
||||
const Key BackupAgentBase::keyEndKey = LiteralStringRef("endKey");
|
||||
const Key BackupAgentBase::keyDrVersion = LiteralStringRef("drVersion");
|
||||
const Key BackupAgentBase::destUid = LiteralStringRef("destUid");
|
||||
const Key BackupAgentBase::backupStartVersion = LiteralStringRef("backupStartVersion");
|
||||
const Key BackupAgentBase::keyFolderId = "config_folderid"_sr;
|
||||
const Key BackupAgentBase::keyBeginVersion = "beginVersion"_sr;
|
||||
const Key BackupAgentBase::keyEndVersion = "endVersion"_sr;
|
||||
const Key BackupAgentBase::keyPrevBeginVersion = "prevBeginVersion"_sr;
|
||||
const Key BackupAgentBase::keyConfigBackupTag = "config_backup_tag"_sr;
|
||||
const Key BackupAgentBase::keyConfigLogUid = "config_log_uid"_sr;
|
||||
const Key BackupAgentBase::keyConfigBackupRanges = "config_backup_ranges"_sr;
|
||||
const Key BackupAgentBase::keyConfigStopWhenDoneKey = "config_stop_when_done"_sr;
|
||||
const Key BackupAgentBase::keyStateStop = "state_stop"_sr;
|
||||
const Key BackupAgentBase::keyStateStatus = "state_status"_sr;
|
||||
const Key BackupAgentBase::keyStateLogBeginVersion = "last_begin_version"_sr;
|
||||
const Key BackupAgentBase::keyLastUid = "last_uid"_sr;
|
||||
const Key BackupAgentBase::keyBeginKey = "beginKey"_sr;
|
||||
const Key BackupAgentBase::keyEndKey = "endKey"_sr;
|
||||
const Key BackupAgentBase::keyDrVersion = "drVersion"_sr;
|
||||
const Key BackupAgentBase::destUid = "destUid"_sr;
|
||||
const Key BackupAgentBase::backupStartVersion = "backupStartVersion"_sr;
|
||||
|
||||
const Key BackupAgentBase::keyTagName = LiteralStringRef("tagname");
|
||||
const Key BackupAgentBase::keyStates = LiteralStringRef("state");
|
||||
const Key BackupAgentBase::keyConfig = LiteralStringRef("config");
|
||||
const Key BackupAgentBase::keyErrors = LiteralStringRef("errors");
|
||||
const Key BackupAgentBase::keyRanges = LiteralStringRef("ranges");
|
||||
const Key BackupAgentBase::keyTasks = LiteralStringRef("tasks");
|
||||
const Key BackupAgentBase::keyFutures = LiteralStringRef("futures");
|
||||
const Key BackupAgentBase::keySourceStates = LiteralStringRef("source_states");
|
||||
const Key BackupAgentBase::keySourceTagName = LiteralStringRef("source_tagname");
|
||||
const Key BackupAgentBase::keyTagName = "tagname"_sr;
|
||||
const Key BackupAgentBase::keyStates = "state"_sr;
|
||||
const Key BackupAgentBase::keyConfig = "config"_sr;
|
||||
const Key BackupAgentBase::keyErrors = "errors"_sr;
|
||||
const Key BackupAgentBase::keyRanges = "ranges"_sr;
|
||||
const Key BackupAgentBase::keyTasks = "tasks"_sr;
|
||||
const Key BackupAgentBase::keyFutures = "futures"_sr;
|
||||
const Key BackupAgentBase::keySourceStates = "source_states"_sr;
|
||||
const Key BackupAgentBase::keySourceTagName = "source_tagname"_sr;
|
||||
|
||||
bool copyParameter(Reference<Task> source, Reference<Task> dest, Key key) {
|
||||
if (source) {
|
||||
|
@ -374,9 +393,9 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
PromiseStream<RangeResultWithVersion> results,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
bool terminator,
|
||||
bool systemAccess,
|
||||
bool lockAware) {
|
||||
Terminator terminator,
|
||||
AccessSystemKeys systemAccess,
|
||||
LockAware lockAware) {
|
||||
state KeySelector begin = firstGreaterOrEqual(range.begin);
|
||||
state KeySelector end = firstGreaterOrEqual(range.end);
|
||||
state Transaction tr(cx);
|
||||
|
@ -450,9 +469,9 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy,
|
||||
bool terminator,
|
||||
bool systemAccess,
|
||||
bool lockAware) {
|
||||
Terminator terminator,
|
||||
AccessSystemKeys systemAccess,
|
||||
LockAware lockAware) {
|
||||
state KeySelector nextKey = firstGreaterOrEqual(range.begin);
|
||||
state KeySelector end = firstGreaterOrEqual(range.end);
|
||||
|
||||
|
@ -559,7 +578,8 @@ Future<Void> readCommitted(Database cx,
|
|||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy) {
|
||||
return readCommitted(cx, results, Void(), lock, range, groupBy, true, true, true);
|
||||
return readCommitted(
|
||||
cx, results, Void(), lock, range, groupBy, Terminator::True, AccessSystemKeys::True, LockAware::True);
|
||||
}
|
||||
|
||||
ACTOR Future<int> dumpData(Database cx,
|
||||
|
@ -770,7 +790,7 @@ ACTOR static Future<Void> _eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion,
|
||||
bool checkBackupUid,
|
||||
CheckBackupUID checkBackupUid,
|
||||
Version backupUid) {
|
||||
state Key backupLatestVersionsPath = destUidValue.withPrefix(backupLatestVersionsPrefix);
|
||||
state Key backupLatestVersionsKey = logUidValue.withPrefix(backupLatestVersionsPath);
|
||||
|
@ -898,7 +918,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion,
|
||||
bool checkBackupUid,
|
||||
CheckBackupUID checkBackupUid,
|
||||
Version backupUid) {
|
||||
return _eraseLogData(tr, logUidValue, destUidValue, endVersion, checkBackupUid, backupUid);
|
||||
}
|
||||
|
@ -995,7 +1015,7 @@ ACTOR Future<Void> cleanupLogMutations(Database cx, Value destUidValue, bool del
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData) {
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, DeleteData deleteData) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
loop {
|
||||
try {
|
||||
|
@ -1014,3 +1034,124 @@ ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the status text to an enumerated value
|
||||
BackupAgentBase::EnumState BackupAgentBase::getState(std::string const& stateText) {
|
||||
auto enState = EnumState::STATE_ERRORED;
|
||||
|
||||
if (stateText.empty()) {
|
||||
enState = EnumState::STATE_NEVERRAN;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been submitted")) {
|
||||
enState = EnumState::STATE_SUBMITTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been started")) {
|
||||
enState = EnumState::STATE_RUNNING;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("is differential")) {
|
||||
enState = EnumState::STATE_RUNNING_DIFFERENTIAL;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been completed")) {
|
||||
enState = EnumState::STATE_COMPLETED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been aborted")) {
|
||||
enState = EnumState::STATE_ABORTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been partially aborted")) {
|
||||
enState = EnumState::STATE_PARTIALLY_ABORTED;
|
||||
}
|
||||
|
||||
return enState;
|
||||
}
|
||||
|
||||
const char* BackupAgentBase::getStateText(EnumState enState) {
|
||||
const char* stateText;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
stateText = "has errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
stateText = "has never been started";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
stateText = "has been submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
stateText = "has been started";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
stateText = "is differential";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
stateText = "has been completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
stateText = "has been aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
stateText = "has been partially aborted";
|
||||
break;
|
||||
default:
|
||||
stateText = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return stateText;
|
||||
}
|
||||
|
||||
const char* BackupAgentBase::getStateName(EnumState enState) {
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
return "Errored";
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
return "NeverRan";
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
return "Submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
return "Running";
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
return "RunningDifferentially";
|
||||
case EnumState::STATE_COMPLETED:
|
||||
return "Completed";
|
||||
case EnumState::STATE_ABORTED:
|
||||
return "Aborted";
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
return "Aborting";
|
||||
default:
|
||||
return "<undefined>";
|
||||
}
|
||||
}
|
||||
|
||||
bool BackupAgentBase::isRunnable(EnumState enState) {
|
||||
switch (enState) {
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
case EnumState::STATE_RUNNING:
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Standalone<StringRef> BackupAgentBase::getCurrentTime() {
|
||||
double t = now();
|
||||
time_t curTime = t;
|
||||
char buffer[128];
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&curTime);
|
||||
strftime(buffer, 128, "%Y-%m-%d-%H-%M-%S", timeinfo);
|
||||
|
||||
std::string time(buffer);
|
||||
return StringRef(time + format(".%06d", (int)(1e6 * (t - curTime))));
|
||||
}
|
||||
|
||||
std::string const BackupAgentBase::defaultTagName = "default";
|
||||
|
|
|
@ -58,6 +58,7 @@ ACTOR Future<Void> appendStringRefWithLen(Reference<IBackupFile> file, Standalon
|
|||
wait(file->append(s.begin(), s.size()));
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace IBackupFile_impl
|
||||
|
||||
Future<Void> IBackupFile::appendStringRefWithLen(Standalone<StringRef> s) {
|
||||
|
@ -253,7 +254,8 @@ std::vector<std::string> IBackupContainer::getURLFormats() {
|
|||
}
|
||||
|
||||
// Get an IBackupContainer based on a container URL string
|
||||
Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& url) {
|
||||
Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& url,
|
||||
Optional<std::string> const& encryptionKeyFileName) {
|
||||
static std::map<std::string, Reference<IBackupContainer>> m_cache;
|
||||
|
||||
Reference<IBackupContainer>& r = m_cache[url];
|
||||
|
@ -262,9 +264,9 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
|
||||
try {
|
||||
StringRef u(url);
|
||||
if (u.startsWith(LiteralStringRef("file://"))) {
|
||||
r = Reference<IBackupContainer>(new BackupContainerLocalDirectory(url));
|
||||
} else if (u.startsWith(LiteralStringRef("blobstore://"))) {
|
||||
if (u.startsWith("file://"_sr)) {
|
||||
r = makeReference<BackupContainerLocalDirectory>(url, encryptionKeyFileName);
|
||||
} else if (u.startsWith("blobstore://"_sr)) {
|
||||
std::string resource;
|
||||
|
||||
// The URL parameters contain blobstore endpoint tunables as well as possible backup-specific options.
|
||||
|
@ -277,15 +279,16 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
for (auto c : resource)
|
||||
if (!isalnum(c) && c != '_' && c != '-' && c != '.' && c != '/')
|
||||
throw backup_invalid_url();
|
||||
r = Reference<IBackupContainer>(new BackupContainerS3BlobStore(bstore, resource, backupParams));
|
||||
r = makeReference<BackupContainerS3BlobStore>(bstore, resource, backupParams, encryptionKeyFileName);
|
||||
}
|
||||
#ifdef BUILD_AZURE_BACKUP
|
||||
else if (u.startsWith(LiteralStringRef("azure://"))) {
|
||||
u.eat(LiteralStringRef("azure://"));
|
||||
auto address = NetworkAddress::parse(u.eat(LiteralStringRef("/")).toString());
|
||||
auto containerName = u.eat(LiteralStringRef("/")).toString();
|
||||
auto accountName = u.eat(LiteralStringRef("/")).toString();
|
||||
r = Reference<IBackupContainer>(new BackupContainerAzureBlobStore(address, containerName, accountName));
|
||||
else if (u.startsWith("azure://"_sr)) {
|
||||
u.eat("azure://"_sr);
|
||||
auto address = NetworkAddress::parse(u.eat("/"_sr).toString());
|
||||
auto containerName = u.eat("/"_sr).toString();
|
||||
auto accountName = u.eat("/"_sr).toString();
|
||||
r = makeReference<BackupContainerAzureBlobStore>(
|
||||
address, containerName, accountName, encryptionKeyFileName);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
|
@ -315,10 +318,10 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL) {
|
||||
try {
|
||||
StringRef u(baseURL);
|
||||
if (u.startsWith(LiteralStringRef("file://"))) {
|
||||
if (u.startsWith("file://"_sr)) {
|
||||
std::vector<std::string> results = wait(BackupContainerLocalDirectory::listURLs(baseURL));
|
||||
return results;
|
||||
} else if (u.startsWith(LiteralStringRef("blobstore://"))) {
|
||||
} else if (u.startsWith("blobstore://"_sr)) {
|
||||
std::string resource;
|
||||
|
||||
S3BlobStoreEndpoint::ParametersT backupParams;
|
||||
|
@ -333,14 +336,14 @@ ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL)
|
|||
}
|
||||
|
||||
// Create a dummy container to parse the backup-specific parameters from the URL and get a final bucket name
|
||||
BackupContainerS3BlobStore dummy(bstore, "dummy", backupParams);
|
||||
BackupContainerS3BlobStore dummy(bstore, "dummy", backupParams, {});
|
||||
|
||||
std::vector<std::string> results = wait(BackupContainerS3BlobStore::listURLs(bstore, dummy.getBucket()));
|
||||
return results;
|
||||
}
|
||||
// TODO: Enable this when Azure backups are ready
|
||||
/*
|
||||
else if (u.startsWith(LiteralStringRef("azure://"))) {
|
||||
else if (u.startsWith("azure://"_sr)) {
|
||||
std::vector<std::string> results = wait(BackupContainerAzureBlobStore::listURLs(baseURL));
|
||||
return results;
|
||||
}
|
||||
|
@ -386,7 +389,7 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
|
|||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state std::vector<std::pair<int64_t, Version>> results =
|
||||
wait(versionMap.getRange(tr, 0, time, 1, false, true));
|
||||
wait(versionMap.getRange(tr, 0, time, 1, Snapshot::False, Reverse::True));
|
||||
if (results.size() != 1) {
|
||||
// No key less than time was found in the database
|
||||
// Look for a key >= time.
|
||||
|
@ -425,7 +428,7 @@ ACTOR Future<Optional<int64_t>> timeKeeperEpochsFromVersion(Version v, Reference
|
|||
|
||||
// Find the highest time < mid
|
||||
state std::vector<std::pair<int64_t, Version>> results =
|
||||
wait(versionMap.getRange(tr, min, mid, 1, false, true));
|
||||
wait(versionMap.getRange(tr, min, mid, 1, Snapshot::False, Reverse::True));
|
||||
|
||||
if (results.size() != 1) {
|
||||
if (mid == min) {
|
||||
|
|
|
@ -293,7 +293,8 @@ public:
|
|||
Version beginVersion = -1) = 0;
|
||||
|
||||
// Get an IBackupContainer based on a container spec string
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url);
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url,
|
||||
const Optional<std::string>& encryptionKeyFileName = {});
|
||||
static std::vector<std::string> getURLFormats();
|
||||
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerAzureBlobStore.h"
|
||||
#include "fdbrpc/AsyncFileEncrypted.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
|
@ -167,8 +168,12 @@ public:
|
|||
if (!exists) {
|
||||
throw file_not_found();
|
||||
}
|
||||
return Reference<IAsyncFile>(
|
||||
new ReadFile(self->asyncTaskThread, self->containerName, fileName, self->client.get()));
|
||||
Reference<IAsyncFile> f =
|
||||
makeReference<ReadFile>(self->asyncTaskThread, self->containerName, fileName, self->client.get());
|
||||
if (self->usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, false);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<IBackupFile>> writeFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
|
@ -177,10 +182,11 @@ public:
|
|||
auto outcome = client->create_append_blob(containerName, fileName).get();
|
||||
return Void();
|
||||
}));
|
||||
return Reference<IBackupFile>(
|
||||
new BackupFile(fileName,
|
||||
Reference<IAsyncFile>(new WriteFile(
|
||||
self->asyncTaskThread, self->containerName, fileName, self->client.get()))));
|
||||
auto f = makeReference<WriteFile>(self->asyncTaskThread, self->containerName, fileName, self->client.get());
|
||||
if (self->usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, true);
|
||||
}
|
||||
return makeReference<BackupFile>(fileName, f);
|
||||
}
|
||||
|
||||
static void listFiles(AzureClient* client,
|
||||
|
@ -213,6 +219,16 @@ public:
|
|||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> create(BackupContainerAzureBlobStore* self) {
|
||||
state Future<Void> f1 =
|
||||
self->asyncTaskThread.execAsync([containerName = self->containerName, client = self->client.get()] {
|
||||
client->create_container(containerName).wait();
|
||||
return Void();
|
||||
});
|
||||
state Future<Void> f2 = self->usesEncryption() ? self->encryptionSetupComplete() : Void();
|
||||
return f1 && f2;
|
||||
}
|
||||
};
|
||||
|
||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||
|
@ -225,10 +241,11 @@ Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileNa
|
|||
|
||||
BackupContainerAzureBlobStore::BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName)
|
||||
const std::string& containerName,
|
||||
const Optional<std::string>& encryptionKeyFileName)
|
||||
: containerName(containerName) {
|
||||
setEncryptionKey(encryptionKeyFileName);
|
||||
std::string accountKey = std::getenv("AZURE_KEY");
|
||||
|
||||
auto credential = std::make_shared<azure::storage_lite::shared_key_credential>(accountName, accountKey);
|
||||
auto storageAccount = std::make_shared<azure::storage_lite::storage_account>(
|
||||
accountName, credential, false, format("http://%s/%s", address.toString().c_str(), accountName.c_str()));
|
||||
|
@ -244,10 +261,7 @@ void BackupContainerAzureBlobStore::delref() {
|
|||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::create() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
client->create_container(containerName).wait();
|
||||
return Void();
|
||||
});
|
||||
return BackupContainerAzureBlobStoreImpl::create(this);
|
||||
}
|
||||
Future<bool> BackupContainerAzureBlobStore::exists() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
|
|
|
@ -44,7 +44,8 @@ class BackupContainerAzureBlobStore final : public BackupContainerFileSystem,
|
|||
public:
|
||||
BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName);
|
||||
const std::string& containerName,
|
||||
const Optional<std::string>& encryptionKeyFileName);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
#include "fdbclient/BackupContainerLocalDirectory.h"
|
||||
#include "fdbclient/JsonBuilder.h"
|
||||
#include "flow/StreamCipher.h"
|
||||
#include "flow/UnitTest.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -290,13 +291,13 @@ public:
|
|||
|
||||
std::map<int, std::vector<int>> tagIndices; // tagId -> indices in files
|
||||
for (int i = 0; i < logs.size(); i++) {
|
||||
ASSERT(logs[i].tagId >= 0);
|
||||
ASSERT(logs[i].tagId < logs[i].totalTags);
|
||||
ASSERT_GE(logs[i].tagId, 0);
|
||||
ASSERT_LT(logs[i].tagId, logs[i].totalTags);
|
||||
auto& indices = tagIndices[logs[i].tagId];
|
||||
// filter out if indices.back() is subset of files[i] or vice versa
|
||||
if (!indices.empty()) {
|
||||
if (logs[indices.back()].isSubset(logs[i])) {
|
||||
ASSERT(logs[indices.back()].fileSize <= logs[i].fileSize);
|
||||
ASSERT_LE(logs[indices.back()].fileSize, logs[i].fileSize);
|
||||
indices.back() = i;
|
||||
} else if (!logs[i].isSubset(logs[indices.back()])) {
|
||||
indices.push_back(i);
|
||||
|
@ -864,7 +865,7 @@ public:
|
|||
int i = 0;
|
||||
for (int j = 1; j < logs.size(); j++) {
|
||||
if (logs[j].isSubset(logs[i])) {
|
||||
ASSERT(logs[j].fileSize <= logs[i].fileSize);
|
||||
ASSERT_LE(logs[j].fileSize, logs[i].fileSize);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1032,10 +1033,10 @@ public:
|
|||
}
|
||||
|
||||
static std::string versionFolderString(Version v, int smallestBucket) {
|
||||
ASSERT(smallestBucket < 14);
|
||||
ASSERT_LT(smallestBucket, 14);
|
||||
// Get a 0-padded fixed size representation of v
|
||||
std::string vFixedPrecision = format("%019lld", v);
|
||||
ASSERT(vFixedPrecision.size() == 19);
|
||||
ASSERT_EQ(vFixedPrecision.size(), 19);
|
||||
// Truncate smallestBucket from the fixed length representation
|
||||
vFixedPrecision.resize(vFixedPrecision.size() - smallestBucket);
|
||||
|
||||
|
@ -1126,6 +1127,42 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> createTestEncryptionKeyFile(std::string filename) {
|
||||
state Reference<IAsyncFile> keyFile = wait(IAsyncFileSystem::filesystem()->open(
|
||||
filename,
|
||||
IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_CREATE,
|
||||
0600));
|
||||
StreamCipher::Key::RawKeyType testKey;
|
||||
generateRandomData(testKey.data(), testKey.size());
|
||||
keyFile->write(testKey.data(), testKey.size(), 0);
|
||||
wait(keyFile->sync());
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> readEncryptionKey(std::string encryptionKeyFileName) {
|
||||
state Reference<IAsyncFile> keyFile;
|
||||
state StreamCipher::Key::RawKeyType key;
|
||||
try {
|
||||
Reference<IAsyncFile> _keyFile =
|
||||
wait(IAsyncFileSystem::filesystem()->open(encryptionKeyFileName, 0x0, 0400));
|
||||
keyFile = _keyFile;
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarnAlways, "FailedToOpenEncryptionKeyFile")
|
||||
.detail("FileName", encryptionKeyFileName)
|
||||
.error(e);
|
||||
throw e;
|
||||
}
|
||||
int bytesRead = wait(keyFile->read(key.data(), key.size(), 0));
|
||||
if (bytesRead != key.size()) {
|
||||
TraceEvent(SevWarnAlways, "InvalidEncryptionKeyFileSize")
|
||||
.detail("ExpectedSize", key.size())
|
||||
.detail("ActualSize", bytesRead);
|
||||
throw invalid_encryption_key_file();
|
||||
}
|
||||
ASSERT_EQ(bytesRead, key.size());
|
||||
StreamCipher::Key::initializeKey(std::move(key));
|
||||
return Void();
|
||||
}
|
||||
}; // class BackupContainerFileSystemImpl
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerFileSystem::writeLogFile(Version beginVersion,
|
||||
|
@ -1432,6 +1469,20 @@ BackupContainerFileSystem::VersionProperty BackupContainerFileSystem::unreliable
|
|||
BackupContainerFileSystem::VersionProperty BackupContainerFileSystem::logType() {
|
||||
return { Reference<BackupContainerFileSystem>::addRef(this), "mutation_log_type" };
|
||||
}
|
||||
bool BackupContainerFileSystem::usesEncryption() const {
|
||||
return encryptionSetupFuture.isValid();
|
||||
}
|
||||
Future<Void> BackupContainerFileSystem::encryptionSetupComplete() const {
|
||||
return encryptionSetupFuture;
|
||||
}
|
||||
void BackupContainerFileSystem::setEncryptionKey(Optional<std::string> const& encryptionKeyFileName) {
|
||||
if (encryptionKeyFileName.present()) {
|
||||
encryptionSetupFuture = BackupContainerFileSystemImpl::readEncryptionKey(encryptionKeyFileName.get());
|
||||
}
|
||||
}
|
||||
Future<Void> BackupContainerFileSystem::createTestEncryptionKeyFile(std::string const &filename) {
|
||||
return BackupContainerFileSystemImpl::createTestEncryptionKeyFile(filename);
|
||||
}
|
||||
|
||||
namespace backup_test {
|
||||
|
||||
|
@ -1466,12 +1517,12 @@ ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c, Reference<I
|
|||
|
||||
state Reference<IAsyncFile> inputFile = wait(c->readFile(f->getFileName()));
|
||||
int64_t fileSize = wait(inputFile->size());
|
||||
ASSERT(size == fileSize);
|
||||
ASSERT_EQ(size, fileSize);
|
||||
if (size > 0) {
|
||||
state Standalone<VectorRef<uint8_t>> buf;
|
||||
buf.resize(buf.arena(), fileSize);
|
||||
int b = wait(inputFile->read(buf.begin(), buf.size(), 0));
|
||||
ASSERT(b == buf.size());
|
||||
ASSERT_EQ(b, buf.size());
|
||||
ASSERT(buf == content);
|
||||
}
|
||||
return Void();
|
||||
|
@ -1485,7 +1536,7 @@ Version nextVersion(Version v) {
|
|||
|
||||
// Write a snapshot file with only begin & end key
|
||||
ACTOR static Future<Void> testWriteSnapshotFile(Reference<IBackupFile> file, Key begin, Key end, uint32_t blockSize) {
|
||||
ASSERT(blockSize > 3 * sizeof(uint32_t) + begin.size() + end.size());
|
||||
ASSERT_GT(blockSize, 3 * sizeof(uint32_t) + begin.size() + end.size());
|
||||
|
||||
uint32_t fileVersion = BACKUP_AGENT_SNAPSHOT_FILE_VERSION;
|
||||
// write Header
|
||||
|
@ -1506,12 +1557,16 @@ ACTOR static Future<Void> testWriteSnapshotFile(Reference<IBackupFile> file, Key
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> testBackupContainer(std::string url) {
|
||||
ACTOR Future<Void> testBackupContainer(std::string url, Optional<std::string> encryptionKeyFileName) {
|
||||
state FlowLock lock(100e6);
|
||||
|
||||
if (encryptionKeyFileName.present()) {
|
||||
wait(BackupContainerFileSystem::createTestEncryptionKeyFile(encryptionKeyFileName.get()));
|
||||
}
|
||||
|
||||
printf("BackupContainerTest URL %s\n", url.c_str());
|
||||
|
||||
state Reference<IBackupContainer> c = IBackupContainer::openContainer(url);
|
||||
state Reference<IBackupContainer> c = IBackupContainer::openContainer(url, encryptionKeyFileName);
|
||||
|
||||
// Make sure container doesn't exist, then create it.
|
||||
try {
|
||||
|
@ -1597,9 +1652,9 @@ ACTOR static Future<Void> testBackupContainer(std::string url) {
|
|||
wait(waitForAll(writes));
|
||||
|
||||
state BackupFileList listing = wait(c->dumpFileList());
|
||||
ASSERT(listing.ranges.size() == nRangeFiles);
|
||||
ASSERT(listing.logs.size() == logs.size());
|
||||
ASSERT(listing.snapshots.size() == snapshots.size());
|
||||
ASSERT_EQ(listing.ranges.size(), nRangeFiles);
|
||||
ASSERT_EQ(listing.logs.size(), logs.size());
|
||||
ASSERT_EQ(listing.snapshots.size(), snapshots.size());
|
||||
|
||||
state BackupDescription desc = wait(c->describeBackup());
|
||||
printf("\n%s\n", desc.toString().c_str());
|
||||
|
@ -1629,8 +1684,8 @@ ACTOR static Future<Void> testBackupContainer(std::string url) {
|
|||
|
||||
// If there is an error, it must be backup_cannot_expire and we have to be on the last snapshot
|
||||
if (f.isError()) {
|
||||
ASSERT(f.getError().code() == error_code_backup_cannot_expire);
|
||||
ASSERT(i == listing.snapshots.size() - 1);
|
||||
ASSERT_EQ(f.getError().code(), error_code_backup_cannot_expire);
|
||||
ASSERT_EQ(i, listing.snapshots.size() - 1);
|
||||
wait(c->expireData(expireVersion, true));
|
||||
}
|
||||
|
||||
|
@ -1646,31 +1701,34 @@ ACTOR static Future<Void> testBackupContainer(std::string url) {
|
|||
ASSERT(d.isError() && d.getError().code() == error_code_backup_does_not_exist);
|
||||
|
||||
BackupFileList empty = wait(c->dumpFileList());
|
||||
ASSERT(empty.ranges.size() == 0);
|
||||
ASSERT(empty.logs.size() == 0);
|
||||
ASSERT(empty.snapshots.size() == 0);
|
||||
ASSERT_EQ(empty.ranges.size(), 0);
|
||||
ASSERT_EQ(empty.logs.size(), 0);
|
||||
ASSERT_EQ(empty.snapshots.size(), 0);
|
||||
|
||||
printf("BackupContainerTest URL=%s PASSED.\n", url.c_str());
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/containers/localdir") {
|
||||
if (g_network->isSimulated())
|
||||
wait(testBackupContainer(format("file://simfdb/backups/%llx", timer_int())));
|
||||
else
|
||||
wait(testBackupContainer(format("file:///private/tmp/fdb_backups/%llx", timer_int())));
|
||||
TEST_CASE("/backup/containers/localdir/unencrypted") {
|
||||
wait(testBackupContainer(format("file://%s/fdb_backups/%llx", params.getDataDir().c_str(), timer_int()), {}));
|
||||
return Void();
|
||||
};
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/containers/localdir/encrypted") {
|
||||
wait(testBackupContainer(format("file://%s/fdb_backups/%llx", params.getDataDir().c_str(), timer_int()),
|
||||
format("%s/test_encryption_key", params.getDataDir().c_str())));
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/containers/url") {
|
||||
if (!g_network->isSimulated()) {
|
||||
const char* url = getenv("FDB_TEST_BACKUP_URL");
|
||||
ASSERT(url != nullptr);
|
||||
wait(testBackupContainer(url));
|
||||
wait(testBackupContainer(url, {}));
|
||||
}
|
||||
return Void();
|
||||
};
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/containers_list") {
|
||||
if (!g_network->isSimulated()) {
|
||||
|
@ -1683,7 +1741,7 @@ TEST_CASE("/backup/containers_list") {
|
|||
}
|
||||
}
|
||||
return Void();
|
||||
};
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/time") {
|
||||
// test formatTime()
|
||||
|
|
|
@ -153,6 +153,13 @@ public:
|
|||
bool logsOnly,
|
||||
Version beginVersion) final;
|
||||
|
||||
static Future<Void> createTestEncryptionKeyFile(std::string const& filename);
|
||||
|
||||
protected:
|
||||
bool usesEncryption() const;
|
||||
void setEncryptionKey(Optional<std::string> const& encryptionKeyFileName);
|
||||
Future<Void> encryptionSetupComplete() const;
|
||||
|
||||
private:
|
||||
struct VersionProperty {
|
||||
VersionProperty(Reference<BackupContainerFileSystem> bc, const std::string& name)
|
||||
|
@ -186,6 +193,8 @@ private:
|
|||
Future<std::vector<RangeFile>> old_listRangeFiles(Version beginVersion, Version endVersion);
|
||||
|
||||
friend class BackupContainerFileSystemImpl;
|
||||
|
||||
Future<Void> encryptionSetupFuture;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -131,7 +131,10 @@ std::string BackupContainerLocalDirectory::getURLFormat() {
|
|||
return "file://</path/to/base/dir/>";
|
||||
}
|
||||
|
||||
BackupContainerLocalDirectory::BackupContainerLocalDirectory(const std::string& url) {
|
||||
BackupContainerLocalDirectory::BackupContainerLocalDirectory(const std::string& url,
|
||||
const Optional<std::string>& encryptionKeyFileName) {
|
||||
setEncryptionKey(encryptionKeyFileName);
|
||||
|
||||
std::string path;
|
||||
if (url.find("file://") != 0) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
|
@ -193,7 +196,10 @@ Future<std::vector<std::string>> BackupContainerLocalDirectory::listURLs(const s
|
|||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::create() {
|
||||
// Nothing should be done here because create() can be called by any process working with the container URL,
|
||||
if (usesEncryption()) {
|
||||
return encryptionSetupComplete();
|
||||
}
|
||||
// No directory should be created here because create() can be called by any process working with the container URL,
|
||||
// such as fdbbackup. Since "local directory" containers are by definition local to the machine they are
|
||||
// accessed from, the container's creation (in this case the creation of a directory) must be ensured prior to
|
||||
// every file creation, which is done in openFile(). Creating the directory here will result in unnecessary
|
||||
|
@ -207,6 +213,9 @@ Future<bool> BackupContainerLocalDirectory::exists() {
|
|||
|
||||
Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED;
|
||||
if (usesEncryption()) {
|
||||
flags |= IAsyncFile::OPEN_ENCRYPTED;
|
||||
}
|
||||
// Simulation does not properly handle opening the same file from multiple machines using a shared filesystem,
|
||||
// so create a symbolic link to make each file opening appear to be unique. This could also work in production
|
||||
// but only if the source directory is writeable which shouldn't be required for a restore.
|
||||
|
@ -258,8 +267,11 @@ Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std:
|
|||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerLocalDirectory::writeFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE |
|
||||
IAsyncFile::OPEN_READWRITE;
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_CREATE |
|
||||
IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE;
|
||||
if (usesEncryption()) {
|
||||
flags |= IAsyncFile::OPEN_ENCRYPTED;
|
||||
}
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
platform::createDirectory(parentDirectory(fullPath));
|
||||
std::string temp = fullPath + "." + deterministicRandom()->randomUniqueID().toString() + ".temp";
|
||||
|
|
|
@ -33,7 +33,7 @@ public:
|
|||
|
||||
static std::string getURLFormat();
|
||||
|
||||
BackupContainerLocalDirectory(const std::string& url);
|
||||
BackupContainerLocalDirectory(const std::string& url, Optional<std::string> const& encryptionKeyFileName);
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(const std::string& url);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
#include "fdbrpc/AsyncFileEncrypted.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
|
@ -103,6 +104,10 @@ public:
|
|||
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
|
||||
}
|
||||
|
||||
if (bc->usesEncryption()) {
|
||||
wait(bc->encryptionSetupComplete());
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -137,9 +142,10 @@ std::string BackupContainerS3BlobStore::indexEntry() {
|
|||
|
||||
BackupContainerS3BlobStore::BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore,
|
||||
const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params)
|
||||
const S3BlobStoreEndpoint::ParametersT& params,
|
||||
const Optional<std::string>& encryptionKeyFileName)
|
||||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
setEncryptionKey(encryptionKeyFileName);
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (const auto& [name, value] : params) {
|
||||
if (name == "bucket") {
|
||||
|
@ -164,12 +170,16 @@ std::string BackupContainerS3BlobStore::getURLFormat() {
|
|||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerS3BlobStore::readFile(const std::string& path) {
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(
|
||||
Reference<IAsyncFile>(new AsyncFileS3BlobStoreRead(m_bstore, m_bucket, dataPath(path))),
|
||||
m_bstore->knobs.read_block_size,
|
||||
m_bstore->knobs.read_ahead_blocks,
|
||||
m_bstore->knobs.concurrent_reads_per_file,
|
||||
m_bstore->knobs.read_cache_blocks_per_file));
|
||||
Reference<IAsyncFile> f = makeReference<AsyncFileS3BlobStoreRead>(m_bstore, m_bucket, dataPath(path));
|
||||
if (usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, AsyncFileEncrypted::Mode::READ_ONLY);
|
||||
}
|
||||
f = makeReference<AsyncFileReadAheadCache>(f,
|
||||
m_bstore->knobs.read_block_size,
|
||||
m_bstore->knobs.read_ahead_blocks,
|
||||
m_bstore->knobs.concurrent_reads_per_file,
|
||||
m_bstore->knobs.read_cache_blocks_per_file);
|
||||
return f;
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<S3BlobStoreEndpoint> bstore,
|
||||
|
@ -178,8 +188,11 @@ Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<
|
|||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerS3BlobStore::writeFile(const std::string& path) {
|
||||
return Reference<IBackupFile>(new BackupContainerS3BlobStoreImpl::BackupFile(
|
||||
path, Reference<IAsyncFile>(new AsyncFileS3BlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
|
||||
Reference<IAsyncFile> f = makeReference<AsyncFileS3BlobStoreWrite>(m_bstore, m_bucket, dataPath(path));
|
||||
if (usesEncryption()) {
|
||||
f = makeReference<AsyncFileEncrypted>(f, AsyncFileEncrypted::Mode::APPEND_ONLY);
|
||||
}
|
||||
return Future<Reference<IBackupFile>>(makeReference<BackupContainerS3BlobStoreImpl::BackupFile>(path, f));
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteFile(const std::string& path) {
|
||||
|
|
|
@ -43,7 +43,8 @@ class BackupContainerS3BlobStore final : public BackupContainerFileSystem,
|
|||
public:
|
||||
BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore,
|
||||
const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params);
|
||||
const S3BlobStoreEndpoint::ParametersT& params,
|
||||
const Optional<std::string>& encryptionKeyFileName);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
|
|
@ -15,6 +15,8 @@ set(FDBCLIENT_SRCS
|
|||
BackupContainerLocalDirectory.h
|
||||
BackupContainerS3BlobStore.actor.cpp
|
||||
BackupContainerS3BlobStore.h
|
||||
ClientBooleanParams.cpp
|
||||
ClientBooleanParams.h
|
||||
ClientKnobCollection.cpp
|
||||
ClientKnobCollection.h
|
||||
ClientKnobs.cpp
|
||||
|
@ -169,7 +171,7 @@ if(BUILD_AZURE_BACKUP)
|
|||
endif()
|
||||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
add_dependencies(fdbclient fdboptions fdb_c_options)
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc PRIVATE curl uuid azure-storage-lite)
|
||||
else()
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* ClientBooleanParams.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/ClientBooleanParams.h"
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(EnableLocalityLoadBalance);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(LockAware);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Reverse);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Snapshot);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(IsInternal);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(AddConflictRange);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(UseMetrics);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(IsSwitchable);
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* ClientBooleanParams.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "flow/BooleanParam.h"
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(EnableLocalityLoadBalance);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(LockAware);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Reverse);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Snapshot);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(IsInternal);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(AddConflictRange);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(UseMetrics);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(IsSwitchable);
|
|
@ -29,8 +29,7 @@ ClientKnobs::ClientKnobs(Randomize randomize) {
|
|||
initialize(randomize);
|
||||
}
|
||||
|
||||
void ClientKnobs::initialize(Randomize _randomize) {
|
||||
bool const randomize = (_randomize == Randomize::YES);
|
||||
void ClientKnobs::initialize(Randomize randomize) {
|
||||
// clang-format off
|
||||
|
||||
init( TOO_MANY, 1000000 );
|
||||
|
@ -253,13 +252,13 @@ void ClientKnobs::initialize(Randomize _randomize) {
|
|||
|
||||
TEST_CASE("/fdbclient/knobs/initialize") {
|
||||
// This test depends on TASKBUCKET_TIMEOUT_VERSIONS being defined as a constant multiple of CORE_VERSIONSPERSECOND
|
||||
ClientKnobs clientKnobs(Randomize::NO);
|
||||
ClientKnobs clientKnobs(Randomize::False);
|
||||
int64_t initialCoreVersionsPerSecond = clientKnobs.CORE_VERSIONSPERSECOND;
|
||||
int initialTaskBucketTimeoutVersions = clientKnobs.TASKBUCKET_TIMEOUT_VERSIONS;
|
||||
clientKnobs.setKnob("core_versionspersecond", initialCoreVersionsPerSecond * 2);
|
||||
ASSERT_EQ(clientKnobs.CORE_VERSIONSPERSECOND, initialCoreVersionsPerSecond * 2);
|
||||
ASSERT_EQ(clientKnobs.TASKBUCKET_TIMEOUT_VERSIONS, initialTaskBucketTimeoutVersions);
|
||||
clientKnobs.initialize(Randomize::NO);
|
||||
clientKnobs.initialize(Randomize::False);
|
||||
ASSERT_EQ(clientKnobs.CORE_VERSIONSPERSECOND, initialCoreVersionsPerSecond * 2);
|
||||
ASSERT_EQ(clientKnobs.TASKBUCKET_TIMEOUT_VERSIONS, initialTaskBucketTimeoutVersions * 2);
|
||||
return Void();
|
||||
|
|
|
@ -22,9 +22,13 @@
|
|||
#define FDBCLIENT_KNOBS_H
|
||||
#pragma once
|
||||
|
||||
#include "flow/BooleanParam.h"
|
||||
#include "flow/Knobs.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Randomize);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(IsSimulated);
|
||||
|
||||
class ClientKnobs : public KnobsImpl<ClientKnobs> {
|
||||
public:
|
||||
int TOO_MANY; // FIXME: this should really be split up so we can control these more specifically
|
||||
|
|
|
@ -64,6 +64,7 @@ struct CommitProxyInterface {
|
|||
bool operator==(CommitProxyInterface const& r) const { return id() == r.id(); }
|
||||
bool operator!=(CommitProxyInterface const& r) const { return id() != r.id(); }
|
||||
NetworkAddress address() const { return commit.getEndpoint().getPrimaryAddress(); }
|
||||
NetworkAddressList addresses() const { return commit.getEndpoint().addresses; }
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
|
|
|
@ -47,8 +47,11 @@ DatabaseBackupAgent::DatabaseBackupAgent()
|
|||
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
|
||||
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks), true, false, true)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), true, true)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
|
||||
AccessSystemKeys::True,
|
||||
PriorityBatch::False,
|
||||
LockAware::True)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {}
|
||||
|
||||
|
@ -56,8 +59,11 @@ DatabaseBackupAgent::DatabaseBackupAgent(Database src)
|
|||
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
|
||||
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks), true, false, true)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), true, true)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
|
||||
AccessSystemKeys::True,
|
||||
PriorityBatch::False,
|
||||
LockAware::True)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {
|
||||
taskBucket->src = src;
|
||||
|
@ -234,7 +240,8 @@ struct BackupRangeTaskFunc : TaskFuncBase {
|
|||
// retrieve kvData
|
||||
state PromiseStream<RangeResultWithVersion> results;
|
||||
|
||||
state Future<Void> rc = readCommitted(taskBucket->src, results, lock, range, true, true, true);
|
||||
state Future<Void> rc = readCommitted(
|
||||
taskBucket->src, results, lock, range, Terminator::True, AccessSystemKeys::True, LockAware::True);
|
||||
state Key rangeBegin = range.begin;
|
||||
state Key rangeEnd;
|
||||
state bool endOfStream = false;
|
||||
|
@ -316,16 +323,20 @@ struct BackupRangeTaskFunc : TaskFuncBase {
|
|||
applyMutationsKeyVersionCountRange.begin);
|
||||
state Future<RangeResult> backupVersions =
|
||||
krmGetRanges(tr, prefix, KeyRangeRef(rangeBegin, rangeEnd), BUGGIFY ? 2 : 2000, 1e5);
|
||||
state Future<Optional<Value>> logVersionValue = tr->get(
|
||||
task->params[BackupAgentBase::keyConfigLogUid].withPrefix(applyMutationsEndRange.begin), true);
|
||||
state Future<Optional<Value>> rangeCountValue = tr->get(rangeCountKey, true);
|
||||
state Future<RangeResult> prevRange = tr->getRange(
|
||||
firstGreaterOrEqual(prefix), lastLessOrEqual(rangeBegin.withPrefix(prefix)), 1, true, true);
|
||||
state Future<Optional<Value>> logVersionValue =
|
||||
tr->get(task->params[BackupAgentBase::keyConfigLogUid].withPrefix(applyMutationsEndRange.begin),
|
||||
Snapshot::True);
|
||||
state Future<Optional<Value>> rangeCountValue = tr->get(rangeCountKey, Snapshot::True);
|
||||
state Future<RangeResult> prevRange = tr->getRange(firstGreaterOrEqual(prefix),
|
||||
lastLessOrEqual(rangeBegin.withPrefix(prefix)),
|
||||
1,
|
||||
Snapshot::True,
|
||||
Reverse::True);
|
||||
state Future<RangeResult> nextRange = tr->getRange(firstGreaterOrEqual(rangeEnd.withPrefix(prefix)),
|
||||
firstGreaterOrEqual(strinc(prefix)),
|
||||
1,
|
||||
true,
|
||||
false);
|
||||
Snapshot::True,
|
||||
Reverse::False);
|
||||
state Future<Void> verified = taskBucket->keepRunning(tr, task);
|
||||
|
||||
wait(checkDatabaseLock(tr,
|
||||
|
@ -363,7 +374,7 @@ struct BackupRangeTaskFunc : TaskFuncBase {
|
|||
Version logVersion =
|
||||
logVersionValue.get().present()
|
||||
? BinaryReader::fromStringRef<Version>(logVersionValue.get().get(), Unversioned())
|
||||
: -1;
|
||||
: ::invalidVersion;
|
||||
if (logVersion >= values.second) {
|
||||
task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] = rangeBegin;
|
||||
return Void();
|
||||
|
@ -633,7 +644,7 @@ struct EraseLogRangeTaskFunc : TaskFuncBase {
|
|||
task->params[BackupAgentBase::keyConfigLogUid],
|
||||
task->params[BackupAgentBase::destUid],
|
||||
Optional<Version>(endVersion),
|
||||
true,
|
||||
CheckBackupUID::True,
|
||||
BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned())));
|
||||
wait(tr->commit());
|
||||
return Void();
|
||||
|
@ -886,9 +897,9 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
|
|||
locks[j],
|
||||
ranges[j],
|
||||
decodeBKMutationLogKey,
|
||||
true,
|
||||
true,
|
||||
true));
|
||||
Terminator::True,
|
||||
AccessSystemKeys::True,
|
||||
LockAware::True));
|
||||
}
|
||||
|
||||
// copy the range
|
||||
|
@ -1191,7 +1202,7 @@ struct FinishedFullBackupTaskFunc : TaskFuncBase {
|
|||
task->params[DatabaseBackupAgent::keyFolderId], Unversioned()))
|
||||
return Void();
|
||||
|
||||
wait(eraseLogData(tr, logUidValue, destUidValue, Optional<Version>(), true, backupUid));
|
||||
wait(eraseLogData(tr, logUidValue, destUidValue, Optional<Version>(), CheckBackupUID::True, backupUid));
|
||||
wait(tr->commit());
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
|
@ -1321,6 +1332,10 @@ struct CopyDiffLogsTaskFunc : TaskFuncBase {
|
|||
.detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||
}
|
||||
|
||||
// set the log version to the state
|
||||
tr->set(StringRef(states.pack(DatabaseBackupAgent::keyStateLogBeginVersion)),
|
||||
BinaryWriter::toValue(beginVersion, Unversioned()));
|
||||
|
||||
if (!stopWhenDone.present()) {
|
||||
state Reference<TaskFuture> allPartsDone = futureBucket->future(tr);
|
||||
std::vector<Future<Key>> addTaskVector;
|
||||
|
@ -1592,9 +1607,9 @@ struct OldCopyLogRangeTaskFunc : TaskFuncBase {
|
|||
lock,
|
||||
ranges[i],
|
||||
decodeBKMutationLogKey,
|
||||
true,
|
||||
true,
|
||||
true));
|
||||
Terminator::True,
|
||||
AccessSystemKeys::True,
|
||||
LockAware::True));
|
||||
dump.push_back(dumpData(cx, task, results[i], lock.getPtr(), taskBucket));
|
||||
}
|
||||
|
||||
|
@ -1701,7 +1716,7 @@ struct AbortOldBackupTaskFunc : TaskFuncBase {
|
|||
}
|
||||
|
||||
TraceEvent("DBA_AbortOldBackup").detail("TagName", tagNameKey.printable());
|
||||
wait(srcDrAgent.abortBackup(cx, tagNameKey, false, true));
|
||||
wait(srcDrAgent.abortBackup(cx, tagNameKey, PartialBackup::False, AbortOldBackup::True));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -2445,7 +2460,7 @@ public:
|
|||
ACTOR static Future<EBackupState> waitBackup(DatabaseBackupAgent* backupAgent,
|
||||
Database cx,
|
||||
Key tagName,
|
||||
bool stopWhenDone) {
|
||||
StopWhenDone stopWhenDone) {
|
||||
state std::string backTrace;
|
||||
state UID logUid = wait(backupAgent->getLogUid(cx, tagName));
|
||||
state Key statusKey = backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
|
@ -2510,10 +2525,10 @@ public:
|
|||
Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone,
|
||||
StopWhenDone stopWhenDone,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool lockDB,
|
||||
LockDB lockDB,
|
||||
DatabaseBackupAgent::PreBackupAction backupAction) {
|
||||
state UID logUid = deterministicRandom()->randomUniqueID();
|
||||
state Key logUidValue = BinaryWriter::toValue(logUid, Unversioned());
|
||||
|
@ -2667,7 +2682,7 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool forceAction) {
|
||||
ForceAction forceAction) {
|
||||
state DatabaseBackupAgent drAgent(dest);
|
||||
state UID destlogUid = wait(backupAgent->getLogUid(dest, tagName));
|
||||
state EBackupState status = wait(backupAgent->getStateValue(dest, destlogUid));
|
||||
|
@ -2751,7 +2766,7 @@ public:
|
|||
throw;
|
||||
}
|
||||
|
||||
wait(success(backupAgent->waitBackup(dest, tagName, true)));
|
||||
wait(success(backupAgent->waitBackup(dest, tagName, StopWhenDone::True)));
|
||||
|
||||
TraceEvent("DBA_SwitchoverStopped");
|
||||
|
||||
|
@ -2780,10 +2795,10 @@ public:
|
|||
wait(drAgent.submitBackup(backupAgent->taskBucket->src,
|
||||
tagName,
|
||||
backupRanges,
|
||||
false,
|
||||
StopWhenDone::False,
|
||||
addPrefix,
|
||||
removePrefix,
|
||||
true,
|
||||
LockDB::True,
|
||||
DatabaseBackupAgent::PreBackupAction::NONE));
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_backup_duplicate)
|
||||
|
@ -2835,10 +2850,10 @@ public:
|
|||
ACTOR static Future<Void> abortBackup(DatabaseBackupAgent* backupAgent,
|
||||
Database cx,
|
||||
Key tagName,
|
||||
bool partial,
|
||||
bool abortOldBackup,
|
||||
bool dstOnly,
|
||||
bool waitForDestUID) {
|
||||
PartialBackup partial,
|
||||
AbortOldBackup abortOldBackup,
|
||||
DstOnly dstOnly,
|
||||
WaitForDestUID waitForDestUID) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state Key logUidValue, destUidValue;
|
||||
state UID logUid, destUid;
|
||||
|
@ -3063,8 +3078,8 @@ public:
|
|||
errorLimit > 0
|
||||
? tr->getRange(backupAgent->errors.get(BinaryWriter::toValue(logUid, Unversioned())).range(),
|
||||
errorLimit,
|
||||
false,
|
||||
true)
|
||||
Snapshot::False,
|
||||
Reverse::True)
|
||||
: Future<RangeResult>();
|
||||
state Future<Optional<Value>> fBackupUid =
|
||||
tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
|
@ -3080,6 +3095,9 @@ public:
|
|||
state Future<Optional<Key>> fBackupKeysPacked =
|
||||
tr->get(backupAgent->config.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
.pack(BackupAgentBase::keyConfigBackupRanges));
|
||||
state Future<Optional<Value>> flogVersionKey =
|
||||
tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
.pack(BackupAgentBase::keyStateLogBeginVersion));
|
||||
|
||||
state EBackupState backupState = wait(backupAgent->getStateValue(tr, logUid));
|
||||
|
||||
|
@ -3095,7 +3113,14 @@ public:
|
|||
}
|
||||
|
||||
state Optional<Value> stopVersionKey = wait(fStopVersionKey);
|
||||
|
||||
Optional<Value> logVersionKey = wait(flogVersionKey);
|
||||
state std::string logVersionText
|
||||
= ". Last log version is "
|
||||
+ (
|
||||
logVersionKey.present()
|
||||
? format("%lld", BinaryReader::fromStringRef<Version>(logVersionKey.get(), Unversioned()))
|
||||
: "unset"
|
||||
);
|
||||
Optional<Key> backupKeysPacked = wait(fBackupKeysPacked);
|
||||
|
||||
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
|
||||
|
@ -3115,7 +3140,7 @@ public:
|
|||
break;
|
||||
case EBackupState::STATE_RUNNING_DIFFERENTIAL:
|
||||
statusText +=
|
||||
"The DR on tag `" + tagNameDisplay + "' is a complete copy of the primary database.\n";
|
||||
"The DR on tag `" + tagNameDisplay + "' is a complete copy of the primary database" + logVersionText + ".\n";
|
||||
break;
|
||||
case EBackupState::STATE_COMPLETED: {
|
||||
Version stopVersion =
|
||||
|
@ -3127,13 +3152,13 @@ public:
|
|||
} break;
|
||||
case EBackupState::STATE_PARTIALLY_ABORTED: {
|
||||
statusText += "The previous DR on tag `" + tagNameDisplay + "' " +
|
||||
BackupAgentBase::getStateText(backupState) + ".\n";
|
||||
BackupAgentBase::getStateText(backupState) + logVersionText + ".\n";
|
||||
statusText += "Abort the DR with --cleanup before starting a new DR.\n";
|
||||
break;
|
||||
}
|
||||
default:
|
||||
statusText += "The previous DR on tag `" + tagNameDisplay + "' " +
|
||||
BackupAgentBase::getStateText(backupState) + ".\n";
|
||||
BackupAgentBase::getStateText(backupState) + logVersionText + ".\n";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3191,7 +3216,7 @@ public:
|
|||
ACTOR static Future<EBackupState> getStateValue(DatabaseBackupAgent* backupAgent,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Key statusKey = backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
|
@ -3204,7 +3229,7 @@ public:
|
|||
ACTOR static Future<UID> getDestUid(DatabaseBackupAgent* backupAgent,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Key destUidKey =
|
||||
|
@ -3217,7 +3242,7 @@ public:
|
|||
ACTOR static Future<UID> getLogUid(DatabaseBackupAgent* backupAgent,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Optional<Value> logUid = wait(tr->get(backupAgent->tagNames.pack(tagName), snapshot));
|
||||
|
@ -3235,7 +3260,7 @@ Future<Void> DatabaseBackupAgent::atomicSwitchover(Database dest,
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool forceAction) {
|
||||
ForceAction forceAction) {
|
||||
return DatabaseBackupAgentImpl::atomicSwitchover(
|
||||
this, dest, tagName, backupRanges, addPrefix, removePrefix, forceAction);
|
||||
}
|
||||
|
@ -3243,10 +3268,10 @@ Future<Void> DatabaseBackupAgent::atomicSwitchover(Database dest,
|
|||
Future<Void> DatabaseBackupAgent::submitBackup(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone,
|
||||
StopWhenDone stopWhenDone,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool lockDatabase,
|
||||
LockDB lockDatabase,
|
||||
PreBackupAction backupAction) {
|
||||
return DatabaseBackupAgentImpl::submitBackup(
|
||||
this, tr, tagName, backupRanges, stopWhenDone, addPrefix, removePrefix, lockDatabase, backupAction);
|
||||
|
@ -3258,10 +3283,10 @@ Future<Void> DatabaseBackupAgent::discontinueBackup(Reference<ReadYourWritesTran
|
|||
|
||||
Future<Void> DatabaseBackupAgent::abortBackup(Database cx,
|
||||
Key tagName,
|
||||
bool partial,
|
||||
bool abortOldBackup,
|
||||
bool dstOnly,
|
||||
bool waitForDestUID) {
|
||||
PartialBackup partial,
|
||||
AbortOldBackup abortOldBackup,
|
||||
DstOnly dstOnly,
|
||||
WaitForDestUID waitForDestUID) {
|
||||
return DatabaseBackupAgentImpl::abortBackup(this, cx, tagName, partial, abortOldBackup, dstOnly, waitForDestUID);
|
||||
}
|
||||
|
||||
|
@ -3271,15 +3296,15 @@ Future<std::string> DatabaseBackupAgent::getStatus(Database cx, int errorLimit,
|
|||
|
||||
Future<EBackupState> DatabaseBackupAgent::getStateValue(Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
return DatabaseBackupAgentImpl::getStateValue(this, tr, logUid, snapshot);
|
||||
}
|
||||
|
||||
Future<UID> DatabaseBackupAgent::getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot) {
|
||||
Future<UID> DatabaseBackupAgent::getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot snapshot) {
|
||||
return DatabaseBackupAgentImpl::getDestUid(this, tr, logUid, snapshot);
|
||||
}
|
||||
|
||||
Future<UID> DatabaseBackupAgent::getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, bool snapshot) {
|
||||
Future<UID> DatabaseBackupAgent::getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, Snapshot snapshot) {
|
||||
return DatabaseBackupAgentImpl::getLogUid(this, tr, tagName, snapshot);
|
||||
}
|
||||
|
||||
|
@ -3287,7 +3312,7 @@ Future<Void> DatabaseBackupAgent::waitUpgradeToLatestDrVersion(Database cx, Key
|
|||
return DatabaseBackupAgentImpl::waitUpgradeToLatestDrVersion(this, cx, tagName);
|
||||
}
|
||||
|
||||
Future<EBackupState> DatabaseBackupAgent::waitBackup(Database cx, Key tagName, bool stopWhenDone) {
|
||||
Future<EBackupState> DatabaseBackupAgent::waitBackup(Database cx, Key tagName, StopWhenDone stopWhenDone) {
|
||||
return DatabaseBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone);
|
||||
}
|
||||
|
||||
|
@ -3297,12 +3322,12 @@ Future<EBackupState> DatabaseBackupAgent::waitSubmitted(Database cx, Key tagName
|
|||
|
||||
Future<int64_t> DatabaseBackupAgent::getRangeBytesWritten(Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
return DRConfig(logUid).rangeBytesWritten().getD(tr, snapshot);
|
||||
}
|
||||
|
||||
Future<int64_t> DatabaseBackupAgent::getLogBytesWritten(Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
return DRConfig(logUid).logBytesWritten().getD(tr, snapshot);
|
||||
}
|
||||
|
|
|
@ -157,11 +157,11 @@ public:
|
|||
static Database create(Reference<AsyncVar<ClientDBInfo>> clientInfo,
|
||||
Future<Void> clientInfoMonitor,
|
||||
LocalityData clientLocality,
|
||||
bool enableLocalityLoadBalance,
|
||||
EnableLocalityLoadBalance,
|
||||
TaskPriority taskID = TaskPriority::DefaultEndpoint,
|
||||
bool lockAware = false,
|
||||
LockAware = LockAware::False,
|
||||
int apiVersion = Database::API_VERSION_LATEST,
|
||||
bool switchable = false);
|
||||
IsSwitchable = IsSwitchable::False);
|
||||
|
||||
~DatabaseContext();
|
||||
|
||||
|
@ -180,13 +180,13 @@ public:
|
|||
switchable));
|
||||
}
|
||||
|
||||
std::pair<KeyRange, Reference<LocationInfo>> getCachedLocation(const KeyRef&, bool isBackward = false);
|
||||
std::pair<KeyRange, Reference<LocationInfo>> getCachedLocation(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||
bool getCachedLocations(const KeyRangeRef&,
|
||||
vector<std::pair<KeyRange, Reference<LocationInfo>>>&,
|
||||
int limit,
|
||||
bool reverse);
|
||||
Reverse reverse);
|
||||
Reference<LocationInfo> setCachedLocation(const KeyRangeRef&, const vector<struct StorageServerInterface>&);
|
||||
void invalidateCache(const KeyRef&, bool isBackward = false);
|
||||
void invalidateCache(const KeyRef&, Reverse isBackward = Reverse::False);
|
||||
void invalidateCache(const KeyRangeRef&);
|
||||
|
||||
bool sampleReadTags() const;
|
||||
|
@ -217,7 +217,7 @@ public:
|
|||
void setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value);
|
||||
|
||||
Error deferredError;
|
||||
bool lockAware;
|
||||
LockAware lockAware{ LockAware::False };
|
||||
|
||||
bool isError() const { return deferredError.code() != invalid_error_code; }
|
||||
|
||||
|
@ -242,7 +242,7 @@ public:
|
|||
// new cluster.
|
||||
Future<Void> switchConnectionFile(Reference<ClusterConnectionFile> standby);
|
||||
Future<Void> connectionFileChanged();
|
||||
bool switchable = false;
|
||||
IsSwitchable switchable{ false };
|
||||
|
||||
// Management API, Attempt to kill or suspend a process, return 1 for request sent out, 0 for failure
|
||||
Future<int64_t> rebootWorker(StringRef address, bool check = false, int duration = 0);
|
||||
|
@ -259,11 +259,11 @@ public:
|
|||
Future<Void> clientInfoMonitor,
|
||||
TaskPriority taskID,
|
||||
LocalityData const& clientLocality,
|
||||
bool enableLocalityLoadBalance,
|
||||
bool lockAware,
|
||||
bool internal = true,
|
||||
EnableLocalityLoadBalance,
|
||||
LockAware,
|
||||
IsInternal = IsInternal::True,
|
||||
int apiVersion = Database::API_VERSION_LATEST,
|
||||
bool switchable = false);
|
||||
IsSwitchable = IsSwitchable::False);
|
||||
|
||||
explicit DatabaseContext(const Error& err);
|
||||
|
||||
|
@ -282,7 +282,7 @@ public:
|
|||
UID proxiesLastChange;
|
||||
LocalityData clientLocality;
|
||||
QueueModel queueModel;
|
||||
bool enableLocalityLoadBalance;
|
||||
EnableLocalityLoadBalance enableLocalityLoadBalance{ EnableLocalityLoadBalance::False };
|
||||
|
||||
struct VersionRequest {
|
||||
SpanID spanContext;
|
||||
|
@ -329,7 +329,7 @@ public:
|
|||
std::unordered_map<UID, Reference<TSSMetrics>> tssMetrics;
|
||||
|
||||
UID dbId;
|
||||
bool internal; // Only contexts created through the C client and fdbcli are non-internal
|
||||
IsInternal internal; // Only contexts created through the C client and fdbcli are non-internal
|
||||
|
||||
PrioritizedTransactionTagMap<ClientTagThrottleData> throttledTags;
|
||||
|
||||
|
|
|
@ -42,6 +42,9 @@
|
|||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(IncrementalBackupOnly);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(OnlyApplyMutationLogs);
|
||||
|
||||
#define SevFRTestInfo SevVerbose
|
||||
//#define SevFRTestInfo SevInfo
|
||||
|
||||
|
@ -117,7 +120,7 @@ Key FileBackupAgent::getPauseKey() {
|
|||
|
||||
ACTOR Future<std::vector<KeyBackedTag>> TagUidMap::getAll_impl(TagUidMap* tagsMap,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
state Key prefix = tagsMap->prefix; // Copying it here as tagsMap lifetime is not tied to this actor
|
||||
TagMap::PairsType tagPairs = wait(tagsMap->getRange(tr, std::string(), {}, 1e6, snapshot));
|
||||
std::vector<KeyBackedTag> results;
|
||||
|
@ -142,7 +145,7 @@ public:
|
|||
}
|
||||
KeyBackedProperty<Key> addPrefix() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
KeyBackedProperty<Key> removePrefix() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
KeyBackedProperty<bool> onlyAppyMutationLogs() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
KeyBackedProperty<bool> onlyApplyMutationLogs() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
KeyBackedProperty<bool> inconsistentSnapshotOnly() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
// XXX: Remove restoreRange() once it is safe to remove. It has been changed to restoreRanges
|
||||
KeyBackedProperty<KeyRange> restoreRange() { return configSpace.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
|
@ -248,9 +251,9 @@ public:
|
|||
Key applyMutationsMapPrefix() { return uidPrefixKey(applyMutationsKeyVersionMapRange.begin, uid); }
|
||||
|
||||
ACTOR static Future<int64_t> getApplyVersionLag_impl(Reference<ReadYourWritesTransaction> tr, UID uid) {
|
||||
// Both of these are snapshot reads
|
||||
state Future<Optional<Value>> beginVal = tr->get(uidPrefixKey(applyMutationsBeginRange.begin, uid), true);
|
||||
state Future<Optional<Value>> endVal = tr->get(uidPrefixKey(applyMutationsEndRange.begin, uid), true);
|
||||
state Future<Optional<Value>> beginVal =
|
||||
tr->get(uidPrefixKey(applyMutationsBeginRange.begin, uid), Snapshot::True);
|
||||
state Future<Optional<Value>> endVal = tr->get(uidPrefixKey(applyMutationsEndRange.begin, uid), Snapshot::True);
|
||||
wait(success(beginVal) && success(endVal));
|
||||
|
||||
if (!beginVal.get().present() || !endVal.get().present())
|
||||
|
@ -440,8 +443,12 @@ FileBackupAgent::FileBackupAgent()
|
|||
// The other subspaces have logUID -> value
|
||||
,
|
||||
config(subspace.get(BackupAgentBase::keyConfig)), lastRestorable(subspace.get(FileBackupAgent::keyLastRestorable)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks), true, false, true)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), true, true)) {}
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
|
||||
AccessSystemKeys::True,
|
||||
PriorityBatch::False,
|
||||
LockAware::True)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)) {
|
||||
}
|
||||
|
||||
namespace fileBackup {
|
||||
|
||||
|
@ -863,10 +870,10 @@ ACTOR static Future<Void> abortFiveOneBackup(FileBackupAgent* backupAgent,
|
|||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, Snapshot::False, backup_unneeded()));
|
||||
|
||||
state BackupConfig config(current.first);
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!backupAgent->isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -952,7 +959,7 @@ ACTOR static Future<Key> addBackupTask(StringRef name,
|
|||
Reference<TaskFuture> waitFor = Reference<TaskFuture>(),
|
||||
std::function<void(Reference<Task>)> setupTaskFn = NOP_SETUP_TASK_FN,
|
||||
int priority = 0,
|
||||
bool setValidation = true) {
|
||||
SetValidation setValidation = SetValidation::True) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
|
@ -1107,7 +1114,7 @@ struct BackupRangeTaskFunc : BackupTaskFuncBase {
|
|||
Params.beginKey().set(task, range.end);
|
||||
|
||||
// Save and extend the task with the new begin parameter
|
||||
state Version newTimeout = wait(taskBucket->extendTimeout(tr, task, true));
|
||||
state Version newTimeout = wait(taskBucket->extendTimeout(tr, task, UpdateParams::True));
|
||||
|
||||
// Update the range bytes written in the backup config
|
||||
backup.rangeBytesWritten().atomicOp(tr, file->size(), MutationRef::AddValue);
|
||||
|
@ -1201,7 +1208,13 @@ struct BackupRangeTaskFunc : BackupTaskFuncBase {
|
|||
// retrieve kvData
|
||||
state PromiseStream<RangeResultWithVersion> results;
|
||||
|
||||
state Future<Void> rc = readCommitted(cx, results, lock, KeyRangeRef(beginKey, endKey), true, true, true);
|
||||
state Future<Void> rc = readCommitted(cx,
|
||||
results,
|
||||
lock,
|
||||
KeyRangeRef(beginKey, endKey),
|
||||
Terminator::True,
|
||||
AccessSystemKeys::True,
|
||||
LockAware::True);
|
||||
state RangeFileWriter rangeFile;
|
||||
state BackupConfig backup(task);
|
||||
|
||||
|
@ -2044,7 +2057,8 @@ struct BackupLogRangeTaskFunc : BackupTaskFuncBase {
|
|||
state std::vector<Future<Void>> rc;
|
||||
|
||||
for (auto& range : ranges) {
|
||||
rc.push_back(readCommitted(cx, results, lock, range, false, true, true));
|
||||
rc.push_back(
|
||||
readCommitted(cx, results, lock, range, Terminator::False, AccessSystemKeys::True, LockAware::True));
|
||||
}
|
||||
|
||||
state Future<Void> sendEOS = map(errorOr(waitForAll(rc)), [=](ErrorOr<Void> const& result) {
|
||||
|
@ -2222,7 +2236,7 @@ struct EraseLogRangeTaskFunc : BackupTaskFuncBase {
|
|||
Params.destUidValue().set(task, destUidValue);
|
||||
},
|
||||
0,
|
||||
false));
|
||||
SetValidation::False));
|
||||
|
||||
return key;
|
||||
}
|
||||
|
@ -3580,9 +3594,9 @@ struct RestoreDispatchTaskFunc : RestoreTaskFuncBase {
|
|||
state int64_t remainingInBatch = Params.remainingInBatch().get(task);
|
||||
state bool addingToExistingBatch = remainingInBatch > 0;
|
||||
state Version restoreVersion;
|
||||
state Future<Optional<bool>> onlyAppyMutationLogs = restore.onlyAppyMutationLogs().get(tr);
|
||||
state Future<Optional<bool>> onlyApplyMutationLogs = restore.onlyApplyMutationLogs().get(tr);
|
||||
|
||||
wait(store(restoreVersion, restore.restoreVersion().getOrThrow(tr)) && success(onlyAppyMutationLogs) &&
|
||||
wait(store(restoreVersion, restore.restoreVersion().getOrThrow(tr)) && success(onlyApplyMutationLogs) &&
|
||||
checkTaskVersion(tr->getDatabase(), task, name, version));
|
||||
|
||||
// If not adding to an existing batch then update the apply mutations end version so the mutations from the
|
||||
|
@ -4058,12 +4072,13 @@ struct StartFullRestoreTaskFunc : RestoreTaskFuncBase {
|
|||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
wait(checkTaskVersion(tr->getDatabase(), task, name, version));
|
||||
wait(store(beginVersion, restore.beginVersion().getD(tr, false, invalidVersion)));
|
||||
wait(store(beginVersion, restore.beginVersion().getD(tr, Snapshot::False, ::invalidVersion)));
|
||||
|
||||
wait(store(restoreVersion, restore.restoreVersion().getOrThrow(tr)));
|
||||
wait(store(ranges, restore.getRestoreRangesOrDefault(tr)));
|
||||
wait(store(logsOnly, restore.onlyAppyMutationLogs().getD(tr, false, false)));
|
||||
wait(store(inconsistentSnapshotOnly, restore.inconsistentSnapshotOnly().getD(tr, false, false)));
|
||||
wait(store(logsOnly, restore.onlyApplyMutationLogs().getD(tr, Snapshot::False, false)));
|
||||
wait(store(inconsistentSnapshotOnly,
|
||||
restore.inconsistentSnapshotOnly().getD(tr, Snapshot::False, false)));
|
||||
|
||||
wait(taskBucket->keepRunning(tr, task));
|
||||
|
||||
|
@ -4245,7 +4260,7 @@ struct StartFullRestoreTaskFunc : RestoreTaskFuncBase {
|
|||
tr, taskBucket, task, 0, "", 0, CLIENT_KNOBS->RESTORE_DISPATCH_BATCH_SIZE)));
|
||||
|
||||
wait(taskBucket->finish(tr, task));
|
||||
state Future<Optional<bool>> logsOnly = restore.onlyAppyMutationLogs().get(tr);
|
||||
state Future<Optional<bool>> logsOnly = restore.onlyApplyMutationLogs().get(tr);
|
||||
wait(success(logsOnly));
|
||||
if (logsOnly.get().present() && logsOnly.get().get()) {
|
||||
// If this is an incremental restore, we need to set the applyMutationsMapPrefix
|
||||
|
@ -4314,7 +4329,7 @@ public:
|
|||
static constexpr int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
|
||||
|
||||
// Parallel restore
|
||||
ACTOR static Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true) {
|
||||
ACTOR static Future<Void> parallelRestoreFinish(Database cx, UID randomUID, UnlockDB unlockDB = UnlockDB::True) {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
state Optional<Value> restoreRequestDoneKeyValue;
|
||||
TraceEvent("FastRestoreToolWaitForRestoreToFinish").detail("DBLock", randomUID);
|
||||
|
@ -4365,7 +4380,7 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key bcUrl,
|
||||
Version targetVersion,
|
||||
bool lockDB,
|
||||
LockDB lockDB,
|
||||
UID randomUID,
|
||||
Key addPrefix,
|
||||
Key removePrefix) {
|
||||
|
@ -4458,7 +4473,7 @@ public:
|
|||
ACTOR static Future<EBackupState> waitBackup(FileBackupAgent* backupAgent,
|
||||
Database cx,
|
||||
std::string tagName,
|
||||
bool stopWhenDone,
|
||||
StopWhenDone stopWhenDone,
|
||||
Reference<IBackupContainer>* pContainer = nullptr,
|
||||
UID* pUID = nullptr) {
|
||||
state std::string backTrace;
|
||||
|
@ -4476,7 +4491,8 @@ public:
|
|||
}
|
||||
|
||||
state BackupConfig config(oldUidAndAborted.get().first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
state EBackupState status =
|
||||
wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
// Break, if one of the following is true
|
||||
// - no longer runnable
|
||||
|
@ -4486,7 +4502,7 @@ public:
|
|||
|
||||
if (pContainer != nullptr) {
|
||||
Reference<IBackupContainer> c =
|
||||
wait(config.backupContainer().getOrThrow(tr, false, backup_invalid_info()));
|
||||
wait(config.backupContainer().getOrThrow(tr, Snapshot::False, backup_invalid_info()));
|
||||
*pContainer = c;
|
||||
}
|
||||
|
||||
|
@ -4506,6 +4522,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: Get rid of all of these confusing boolean flags
|
||||
ACTOR static Future<Void> submitBackup(FileBackupAgent* backupAgent,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
Key outContainer,
|
||||
|
@ -4513,9 +4530,10 @@ public:
|
|||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone,
|
||||
bool partitionedLog,
|
||||
bool incrementalBackupOnly) {
|
||||
StopWhenDone stopWhenDone,
|
||||
UsePartitionedLog partitionedLog,
|
||||
IncrementalBackupOnly incrementalBackupOnly,
|
||||
Optional<std::string> encryptionKeyFileName) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::COMMIT_ON_FIRST_PROXY);
|
||||
|
@ -4531,7 +4549,7 @@ public:
|
|||
if (uidAndAbortedFlag.present()) {
|
||||
state BackupConfig prevConfig(uidAndAbortedFlag.get().first);
|
||||
state EBackupState prevBackupStatus =
|
||||
wait(prevConfig.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
wait(prevConfig.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
if (FileBackupAgent::isRunnable(prevBackupStatus)) {
|
||||
throw backup_duplicate();
|
||||
}
|
||||
|
@ -4553,7 +4571,7 @@ public:
|
|||
backupContainer = joinPath(backupContainer, std::string("backup-") + nowStr.toString());
|
||||
}
|
||||
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(backupContainer);
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(backupContainer, encryptionKeyFileName);
|
||||
try {
|
||||
wait(timeoutError(bc->create(), 30));
|
||||
} catch (Error& e) {
|
||||
|
@ -4644,9 +4662,9 @@ public:
|
|||
Version restoreVersion,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool lockDB,
|
||||
bool onlyAppyMutationLogs,
|
||||
bool inconsistentSnapshotOnly,
|
||||
LockDB lockDB,
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly,
|
||||
Version beginVersion,
|
||||
UID uid) {
|
||||
KeyRangeMap<int> restoreRangeSet;
|
||||
|
@ -4698,7 +4716,7 @@ public:
|
|||
.removePrefix(removePrefix)
|
||||
.withPrefix(addPrefix);
|
||||
RangeResult existingRows = wait(tr->getRange(restoreIntoRange, 1));
|
||||
if (existingRows.size() > 0 && !onlyAppyMutationLogs) {
|
||||
if (existingRows.size() > 0 && !onlyApplyMutationLogs) {
|
||||
throw restore_destination_not_empty();
|
||||
}
|
||||
}
|
||||
|
@ -4715,7 +4733,7 @@ public:
|
|||
restore.sourceContainer().set(tr, bc);
|
||||
restore.stateEnum().set(tr, ERestoreState::QUEUED);
|
||||
restore.restoreVersion().set(tr, restoreVersion);
|
||||
restore.onlyAppyMutationLogs().set(tr, onlyAppyMutationLogs);
|
||||
restore.onlyApplyMutationLogs().set(tr, onlyApplyMutationLogs);
|
||||
restore.inconsistentSnapshotOnly().set(tr, inconsistentSnapshotOnly);
|
||||
restore.beginVersion().set(tr, beginVersion);
|
||||
if (BUGGIFY && restoreRanges.size() == 1) {
|
||||
|
@ -4738,7 +4756,7 @@ public:
|
|||
}
|
||||
|
||||
// This method will return the final status of the backup
|
||||
ACTOR static Future<ERestoreState> waitRestore(Database cx, Key tagName, bool verbose) {
|
||||
ACTOR static Future<ERestoreState> waitRestore(Database cx, Key tagName, Verbose verbose) {
|
||||
state ERestoreState status;
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
|
@ -4794,9 +4812,9 @@ public:
|
|||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state KeyBackedTag tag = makeBackupTag(tagName.toString());
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, Snapshot::False, backup_unneeded()));
|
||||
state BackupConfig config(current.first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!FileBackupAgent::isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -4845,11 +4863,11 @@ public:
|
|||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, Snapshot::False, backup_unneeded()));
|
||||
|
||||
state BackupConfig config(current.first);
|
||||
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!backupAgent->isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -4951,7 +4969,7 @@ public:
|
|||
state BackupConfig config(uidAndAbortedFlag.get().first);
|
||||
|
||||
state EBackupState backupState =
|
||||
wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
JsonBuilderObject statusDoc;
|
||||
statusDoc.setKey("Name", BackupAgentBase::getStateName(backupState));
|
||||
statusDoc.setKey("Description", BackupAgentBase::getStateText(backupState));
|
||||
|
@ -5075,7 +5093,7 @@ public:
|
|||
|
||||
ACTOR static Future<std::string> getStatus(FileBackupAgent* backupAgent,
|
||||
Database cx,
|
||||
bool showErrors,
|
||||
ShowErrors showErrors,
|
||||
std::string tagName) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state std::string statusText;
|
||||
|
@ -5095,7 +5113,8 @@ public:
|
|||
state Future<Optional<Value>> fPaused = tr->get(backupAgent->taskBucket->getPauseKey());
|
||||
if (uidAndAbortedFlag.present()) {
|
||||
config = BackupConfig(uidAndAbortedFlag.get().first);
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status =
|
||||
wait(config.stateEnum().getD(tr, Snapshot::False, EBackupState::STATE_NEVERRAN));
|
||||
backupState = status;
|
||||
}
|
||||
|
||||
|
@ -5257,7 +5276,7 @@ public:
|
|||
ACTOR static Future<Optional<Version>> getLastRestorable(FileBackupAgent* backupAgent,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Optional<Value> version = wait(tr->get(backupAgent->lastRestorable.pack(tagName), snapshot));
|
||||
|
@ -5290,7 +5309,7 @@ public:
|
|||
// removePrefix: for each key to be restored, remove this prefix first.
|
||||
// lockDB: if set lock the database with randomUid before performing restore;
|
||||
// otherwise, check database is locked with the randomUid
|
||||
// onlyAppyMutationLogs: only perform incremental restore, by only applying mutation logs
|
||||
// onlyApplyMutationLogs: only perform incremental restore, by only applying mutation logs
|
||||
// inconsistentSnapshotOnly: Ignore mutation log files during the restore to speedup the process.
|
||||
// When set to true, gives an inconsistent snapshot, thus not recommended
|
||||
// beginVersion: restore's begin version
|
||||
|
@ -5301,15 +5320,16 @@ public:
|
|||
Key tagName,
|
||||
Key url,
|
||||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
bool waitForComplete,
|
||||
WaitForComplete waitForComplete,
|
||||
Version targetVersion,
|
||||
bool verbose,
|
||||
Verbose verbose,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool lockDB,
|
||||
bool onlyAppyMutationLogs,
|
||||
bool inconsistentSnapshotOnly,
|
||||
LockDB lockDB,
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly,
|
||||
Version beginVersion,
|
||||
Optional<std::string> encryptionKeyFileName,
|
||||
UID randomUid) {
|
||||
// The restore command line tool won't allow ranges to be empty, but correctness workloads somehow might.
|
||||
if (ranges.empty()) {
|
||||
|
@ -5327,12 +5347,12 @@ public:
|
|||
if (targetVersion == invalidVersion && desc.maxRestorableVersion.present())
|
||||
targetVersion = desc.maxRestorableVersion.get();
|
||||
|
||||
if (targetVersion == invalidVersion && onlyAppyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
if (targetVersion == invalidVersion && onlyApplyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
targetVersion = desc.contiguousLogEnd.get() - 1;
|
||||
}
|
||||
|
||||
Optional<RestorableFileSet> restoreSet =
|
||||
wait(bc->getRestoreSet(targetVersion, ranges, onlyAppyMutationLogs, beginVersion));
|
||||
wait(bc->getRestoreSet(targetVersion, ranges, onlyApplyMutationLogs, beginVersion));
|
||||
|
||||
if (!restoreSet.present()) {
|
||||
TraceEvent(SevWarn, "FileBackupAgentRestoreNotPossible")
|
||||
|
@ -5364,7 +5384,7 @@ public:
|
|||
addPrefix,
|
||||
removePrefix,
|
||||
lockDB,
|
||||
onlyAppyMutationLogs,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion,
|
||||
randomUid));
|
||||
|
@ -5395,7 +5415,7 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool fastRestore) {
|
||||
UsePartitionedLog fastRestore) {
|
||||
state Reference<ReadYourWritesTransaction> ryw_tr =
|
||||
Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
|
||||
state BackupConfig backupConfig;
|
||||
|
@ -5468,7 +5488,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
wait(success(waitBackup(backupAgent, cx, tagName.toString(), true)));
|
||||
wait(success(waitBackup(backupAgent, cx, tagName.toString(), StopWhenDone::True)));
|
||||
TraceEvent("AS_BackupStopped");
|
||||
|
||||
ryw_tr->reset();
|
||||
|
@ -5493,13 +5513,19 @@ public:
|
|||
|
||||
if (fastRestore) {
|
||||
TraceEvent("AtomicParallelRestoreStartRestore");
|
||||
Version targetVersion = -1;
|
||||
bool lockDB = true;
|
||||
wait(submitParallelRestore(
|
||||
cx, tagName, ranges, KeyRef(bc->getURL()), targetVersion, lockDB, randomUid, addPrefix, removePrefix));
|
||||
Version targetVersion = ::invalidVersion;
|
||||
wait(submitParallelRestore(cx,
|
||||
tagName,
|
||||
ranges,
|
||||
KeyRef(bc->getURL()),
|
||||
targetVersion,
|
||||
LockDB::True,
|
||||
randomUid,
|
||||
addPrefix,
|
||||
removePrefix));
|
||||
state bool hasPrefix = (addPrefix.size() > 0 || removePrefix.size() > 0);
|
||||
TraceEvent("AtomicParallelRestoreWaitForRestoreFinish").detail("HasPrefix", hasPrefix);
|
||||
wait(parallelRestoreFinish(cx, randomUid, !hasPrefix));
|
||||
wait(parallelRestoreFinish(cx, randomUid, UnlockDB{ !hasPrefix }));
|
||||
// If addPrefix or removePrefix set, we want to transform the effect by copying data
|
||||
if (hasPrefix) {
|
||||
wait(transformRestoredDatabase(cx, ranges, addPrefix, removePrefix));
|
||||
|
@ -5514,15 +5540,16 @@ public:
|
|||
tagName,
|
||||
KeyRef(bc->getURL()),
|
||||
ranges,
|
||||
true,
|
||||
-1,
|
||||
true,
|
||||
WaitForComplete::True,
|
||||
::invalidVersion,
|
||||
Verbose::True,
|
||||
addPrefix,
|
||||
removePrefix,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
invalidVersion,
|
||||
LockDB::True,
|
||||
OnlyApplyMutationLogs::False,
|
||||
InconsistentSnapshotOnly::False,
|
||||
::invalidVersion,
|
||||
{},
|
||||
randomUid));
|
||||
return ver;
|
||||
}
|
||||
|
@ -5537,16 +5564,15 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
Key addPrefix,
|
||||
Key removePrefix) {
|
||||
return success(atomicRestore(backupAgent, cx, tagName, ranges, addPrefix, removePrefix, true));
|
||||
return success(
|
||||
atomicRestore(backupAgent, cx, tagName, ranges, addPrefix, removePrefix, UsePartitionedLog::True));
|
||||
}
|
||||
};
|
||||
|
||||
const std::string BackupAgentBase::defaultTagName = "default";
|
||||
const int BackupAgentBase::logHeaderSize = 12;
|
||||
const int FileBackupAgent::dataFooterSize = 20;
|
||||
|
||||
// Return if parallel restore has finished
|
||||
Future<Void> FileBackupAgent::parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB) {
|
||||
Future<Void> FileBackupAgent::parallelRestoreFinish(Database cx, UID randomUID, UnlockDB unlockDB) {
|
||||
return FileBackupAgentImpl::parallelRestoreFinish(cx, randomUID, unlockDB);
|
||||
}
|
||||
|
||||
|
@ -5555,7 +5581,7 @@ Future<Void> FileBackupAgent::submitParallelRestore(Database cx,
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key bcUrl,
|
||||
Version targetVersion,
|
||||
bool lockDB,
|
||||
LockDB lockDB,
|
||||
UID randomUID,
|
||||
Key addPrefix,
|
||||
Key removePrefix) {
|
||||
|
@ -5576,15 +5602,16 @@ Future<Version> FileBackupAgent::restore(Database cx,
|
|||
Key tagName,
|
||||
Key url,
|
||||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
bool waitForComplete,
|
||||
WaitForComplete waitForComplete,
|
||||
Version targetVersion,
|
||||
bool verbose,
|
||||
Verbose verbose,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool lockDB,
|
||||
bool onlyAppyMutationLogs,
|
||||
bool inconsistentSnapshotOnly,
|
||||
Version beginVersion) {
|
||||
LockDB lockDB,
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly,
|
||||
Version beginVersion,
|
||||
Optional<std::string> const& encryptionKeyFileName) {
|
||||
return FileBackupAgentImpl::restore(this,
|
||||
cx,
|
||||
cxOrig,
|
||||
|
@ -5597,9 +5624,10 @@ Future<Version> FileBackupAgent::restore(Database cx,
|
|||
addPrefix,
|
||||
removePrefix,
|
||||
lockDB,
|
||||
onlyAppyMutationLogs,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion,
|
||||
encryptionKeyFileName,
|
||||
deterministicRandom()->randomUniqueID());
|
||||
}
|
||||
|
||||
|
@ -5608,7 +5636,8 @@ Future<Version> FileBackupAgent::atomicRestore(Database cx,
|
|||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
Key addPrefix,
|
||||
Key removePrefix) {
|
||||
return FileBackupAgentImpl::atomicRestore(this, cx, tagName, ranges, addPrefix, removePrefix, false);
|
||||
return FileBackupAgentImpl::atomicRestore(
|
||||
this, cx, tagName, ranges, addPrefix, removePrefix, UsePartitionedLog::False);
|
||||
}
|
||||
|
||||
Future<ERestoreState> FileBackupAgent::abortRestore(Reference<ReadYourWritesTransaction> tr, Key tagName) {
|
||||
|
@ -5623,7 +5652,7 @@ Future<std::string> FileBackupAgent::restoreStatus(Reference<ReadYourWritesTrans
|
|||
return fileBackup::restoreStatus(tr, tagName);
|
||||
}
|
||||
|
||||
Future<ERestoreState> FileBackupAgent::waitRestore(Database cx, Key tagName, bool verbose) {
|
||||
Future<ERestoreState> FileBackupAgent::waitRestore(Database cx, Key tagName, Verbose verbose) {
|
||||
return FileBackupAgentImpl::waitRestore(cx, tagName, verbose);
|
||||
};
|
||||
|
||||
|
@ -5631,11 +5660,12 @@ Future<Void> FileBackupAgent::submitBackup(Reference<ReadYourWritesTransaction>
|
|||
Key outContainer,
|
||||
int initialSnapshotIntervalSeconds,
|
||||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
std::string const& tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone,
|
||||
bool partitionedLog,
|
||||
bool incrementalBackupOnly) {
|
||||
StopWhenDone stopWhenDone,
|
||||
UsePartitionedLog partitionedLog,
|
||||
IncrementalBackupOnly incrementalBackupOnly,
|
||||
Optional<std::string> const& encryptionKeyFileName) {
|
||||
return FileBackupAgentImpl::submitBackup(this,
|
||||
tr,
|
||||
outContainer,
|
||||
|
@ -5645,7 +5675,8 @@ Future<Void> FileBackupAgent::submitBackup(Reference<ReadYourWritesTransaction>
|
|||
backupRanges,
|
||||
stopWhenDone,
|
||||
partitionedLog,
|
||||
incrementalBackupOnly);
|
||||
incrementalBackupOnly,
|
||||
encryptionKeyFileName);
|
||||
}
|
||||
|
||||
Future<Void> FileBackupAgent::discontinueBackup(Reference<ReadYourWritesTransaction> tr, Key tagName) {
|
||||
|
@ -5656,7 +5687,7 @@ Future<Void> FileBackupAgent::abortBackup(Reference<ReadYourWritesTransaction> t
|
|||
return FileBackupAgentImpl::abortBackup(this, tr, tagName);
|
||||
}
|
||||
|
||||
Future<std::string> FileBackupAgent::getStatus(Database cx, bool showErrors, std::string tagName) {
|
||||
Future<std::string> FileBackupAgent::getStatus(Database cx, ShowErrors showErrors, std::string tagName) {
|
||||
return FileBackupAgentImpl::getStatus(this, cx, showErrors, tagName);
|
||||
}
|
||||
|
||||
|
@ -5666,7 +5697,7 @@ Future<std::string> FileBackupAgent::getStatusJSON(Database cx, std::string tagN
|
|||
|
||||
Future<Optional<Version>> FileBackupAgent::getLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
return FileBackupAgentImpl::getLastRestorable(this, tr, tagName, snapshot);
|
||||
}
|
||||
|
||||
|
@ -5678,7 +5709,7 @@ void FileBackupAgent::setLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
|||
|
||||
Future<EBackupState> FileBackupAgent::waitBackup(Database cx,
|
||||
std::string tagName,
|
||||
bool stopWhenDone,
|
||||
StopWhenDone stopWhenDone,
|
||||
Reference<IBackupContainer>* pContainer,
|
||||
UID* pUID) {
|
||||
return FileBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone, pContainer, pUID);
|
||||
|
@ -5739,8 +5770,8 @@ ACTOR static Future<Void> writeKVs(Database cx, Standalone<VectorRef<KeyValueRef
|
|||
state ReadYourWritesTransaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
KeyRef k1 = kvs[begin].key;
|
||||
KeyRef k2 = end < kvs.size() ? kvs[end].key : normalKeys.end;
|
||||
TraceEvent(SevFRTestInfo, "TransformDatabaseContentsWriteKVReadBack")
|
||||
|
|
|
@ -46,6 +46,7 @@ struct GrvProxyInterface {
|
|||
bool operator==(GrvProxyInterface const& r) const { return id() == r.id(); }
|
||||
bool operator!=(GrvProxyInterface const& r) const { return id() != r.id(); }
|
||||
NetworkAddress address() const { return getConsistentReadVersion.getEndpoint().getPrimaryAddress(); }
|
||||
NetworkAddressList addresses() const { return getConsistentReadVersion.getEndpoint().addresses; }
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
|
|
|
@ -25,11 +25,3 @@
|
|||
Reference<IConfigTransaction> IConfigTransaction::createTestSimple(ConfigTransactionInterface const& cti) {
|
||||
return makeReference<SimpleConfigTransaction>(cti);
|
||||
}
|
||||
|
||||
Reference<IConfigTransaction> IConfigTransaction::createSimple(Database const& cx) {
|
||||
return makeReference<SimpleConfigTransaction>(cx);
|
||||
}
|
||||
|
||||
Reference<IConfigTransaction> IConfigTransaction::createPaxos(Database const& cx) {
|
||||
return makeReference<PaxosConfigTransaction>(cx);
|
||||
}
|
||||
|
|
|
@ -40,12 +40,12 @@ public:
|
|||
virtual ~IConfigTransaction() = default;
|
||||
|
||||
static Reference<IConfigTransaction> createTestSimple(ConfigTransactionInterface const&);
|
||||
static Reference<IConfigTransaction> createSimple(Database const&);
|
||||
static Reference<IConfigTransaction> createPaxos(Database const&);
|
||||
|
||||
// Not implemented:
|
||||
void setVersion(Version) override { throw client_invalid_operation(); }
|
||||
Future<Key> getKey(KeySelector const& key, bool snapshot = false) override { throw client_invalid_operation(); }
|
||||
Future<Key> getKey(KeySelector const& key, Snapshot snapshot = Snapshot::False) override {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
Future<Standalone<VectorRef<const char*>>> getAddressesForKey(Key const& key) override {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
|
|
|
@ -56,17 +56,17 @@ KnobValue IKnobCollection::parseKnobValue(std::string const& knobName, std::stri
|
|||
static std::unique_ptr<IKnobCollection> clientKnobCollection, serverKnobCollection, testKnobCollection;
|
||||
if (type == Type::CLIENT) {
|
||||
if (!clientKnobCollection) {
|
||||
clientKnobCollection = create(type, Randomize::NO, IsSimulated::NO);
|
||||
clientKnobCollection = create(type, Randomize::False, IsSimulated::False);
|
||||
}
|
||||
return clientKnobCollection->parseKnobValue(knobName, knobValue);
|
||||
} else if (type == Type::SERVER) {
|
||||
if (!serverKnobCollection) {
|
||||
serverKnobCollection = create(type, Randomize::NO, IsSimulated::NO);
|
||||
serverKnobCollection = create(type, Randomize::False, IsSimulated::False);
|
||||
}
|
||||
return serverKnobCollection->parseKnobValue(knobName, knobValue);
|
||||
} else if (type == Type::TEST) {
|
||||
if (!testKnobCollection) {
|
||||
testKnobCollection = create(type, Randomize::NO, IsSimulated::NO);
|
||||
testKnobCollection = create(type, Randomize::False, IsSimulated::False);
|
||||
}
|
||||
return testKnobCollection->parseKnobValue(knobName, knobValue);
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ KnobValue IKnobCollection::parseKnobValue(std::string const& knobName, std::stri
|
|||
}
|
||||
|
||||
std::unique_ptr<IKnobCollection> IKnobCollection::globalKnobCollection =
|
||||
IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::NO, IsSimulated::NO);
|
||||
IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
|
||||
void IKnobCollection::setGlobalKnobCollection(Type type, Randomize randomize, IsSimulated isSimulated) {
|
||||
globalKnobCollection = create(type, randomize, isSimulated);
|
||||
|
|
|
@ -26,33 +26,15 @@
|
|||
|
||||
ISingleThreadTransaction* ISingleThreadTransaction::allocateOnForeignThread(Type type) {
|
||||
if (type == Type::RYW) {
|
||||
auto tr =
|
||||
(ReadYourWritesTransaction*)(ReadYourWritesTransaction::operator new(sizeof(ReadYourWritesTransaction)));
|
||||
tr->preinitializeOnForeignThread();
|
||||
auto tr = new ReadYourWritesTransaction;
|
||||
return tr;
|
||||
} else if (type == Type::SIMPLE_CONFIG) {
|
||||
auto tr = (SimpleConfigTransaction*)(SimpleConfigTransaction::operator new(sizeof(SimpleConfigTransaction)));
|
||||
auto tr = new SimpleConfigTransaction;
|
||||
return tr;
|
||||
} else if (type == Type::PAXOS_CONFIG) {
|
||||
auto tr = (PaxosConfigTransaction*)(PaxosConfigTransaction::operator new(sizeof(PaxosConfigTransaction)));
|
||||
auto tr = new PaxosConfigTransaction;
|
||||
return tr;
|
||||
}
|
||||
ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ISingleThreadTransaction::create(ISingleThreadTransaction* tr, Type type, Database db) {
|
||||
switch (type) {
|
||||
case Type::RYW:
|
||||
new (tr) ReadYourWritesTransaction(db);
|
||||
break;
|
||||
case Type::SIMPLE_CONFIG:
|
||||
new (tr) SimpleConfigTransaction(db);
|
||||
break;
|
||||
case Type::PAXOS_CONFIG:
|
||||
new (tr) PaxosConfigTransaction(db);
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/KeyRangeMap.h"
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "flow/Error.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
|
@ -44,23 +45,23 @@ public:
|
|||
};
|
||||
|
||||
static ISingleThreadTransaction* allocateOnForeignThread(Type type);
|
||||
static void create(ISingleThreadTransaction* tr, Type type, Database db);
|
||||
virtual void setDatabase(Database const&) = 0;
|
||||
|
||||
virtual void setVersion(Version v) = 0;
|
||||
virtual Future<Version> getReadVersion() = 0;
|
||||
virtual Optional<Version> getCachedReadVersion() const = 0;
|
||||
virtual Future<Optional<Value>> get(const Key& key, bool snapshot = false) = 0;
|
||||
virtual Future<Key> getKey(const KeySelector& key, bool snapshot = false) = 0;
|
||||
virtual Future<Optional<Value>> get(const Key& key, Snapshot = Snapshot::False) = 0;
|
||||
virtual Future<Key> getKey(const KeySelector& key, Snapshot = Snapshot::False) = 0;
|
||||
virtual Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) = 0;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) = 0;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<VectorRef<const char*>>> getAddressesForKey(Key const& key) = 0;
|
||||
virtual Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(KeyRange const& range, int64_t chunkSize) = 0;
|
||||
virtual Future<int64_t> getEstimatedRangeSizeBytes(KeyRange const& keys) = 0;
|
||||
|
|
|
@ -150,7 +150,7 @@ template <typename T>
|
|||
class KeyBackedProperty {
|
||||
public:
|
||||
KeyBackedProperty(KeyRef key) : key(key) {}
|
||||
Future<Optional<T>> get(Reference<ReadYourWritesTransaction> tr, bool snapshot = false) const {
|
||||
Future<Optional<T>> get(Reference<ReadYourWritesTransaction> tr, Snapshot snapshot = Snapshot::False) const {
|
||||
return map(tr->get(key, snapshot), [](Optional<Value> const& val) -> Optional<T> {
|
||||
if (val.present())
|
||||
return Codec<T>::unpack(Tuple::unpack(val.get()));
|
||||
|
@ -158,12 +158,14 @@ public:
|
|||
});
|
||||
}
|
||||
// Get property's value or defaultValue if it doesn't exist
|
||||
Future<T> getD(Reference<ReadYourWritesTransaction> tr, bool snapshot = false, T defaultValue = T()) const {
|
||||
Future<T> getD(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
T defaultValue = T()) const {
|
||||
return map(get(tr, snapshot), [=](Optional<T> val) -> T { return val.present() ? val.get() : defaultValue; });
|
||||
}
|
||||
// Get property's value or throw error if it doesn't exist
|
||||
Future<T> getOrThrow(Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot = false,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Error err = key_not_found()) const {
|
||||
auto keyCopy = key;
|
||||
auto backtrace = platform::get_backtrace();
|
||||
|
@ -180,7 +182,7 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
Future<Optional<T>> get(Database cx, bool snapshot = false) const {
|
||||
Future<Optional<T>> get(Database cx, Snapshot snapshot = Snapshot::False) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -190,7 +192,7 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
Future<T> getD(Database cx, bool snapshot = false, T defaultValue = T()) const {
|
||||
Future<T> getD(Database cx, Snapshot snapshot = Snapshot::False, T defaultValue = T()) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -200,7 +202,7 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
Future<T> getOrThrow(Database cx, bool snapshot = false, Error err = key_not_found()) const {
|
||||
Future<T> getOrThrow(Database cx, Snapshot snapshot = Snapshot::False, Error err = key_not_found()) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -235,7 +237,7 @@ template <typename T>
|
|||
class KeyBackedBinaryValue {
|
||||
public:
|
||||
KeyBackedBinaryValue(KeyRef key) : key(key) {}
|
||||
Future<Optional<T>> get(Reference<ReadYourWritesTransaction> tr, bool snapshot = false) const {
|
||||
Future<Optional<T>> get(Reference<ReadYourWritesTransaction> tr, Snapshot snapshot = Snapshot::False) const {
|
||||
return map(tr->get(key, snapshot), [](Optional<Value> const& val) -> Optional<T> {
|
||||
if (val.present())
|
||||
return BinaryReader::fromStringRef<T>(val.get(), Unversioned());
|
||||
|
@ -243,8 +245,11 @@ public:
|
|||
});
|
||||
}
|
||||
// Get property's value or defaultValue if it doesn't exist
|
||||
Future<T> getD(Reference<ReadYourWritesTransaction> tr, bool snapshot = false, T defaultValue = T()) const {
|
||||
return map(get(tr, false), [=](Optional<T> val) -> T { return val.present() ? val.get() : defaultValue; });
|
||||
Future<T> getD(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
T defaultValue = T()) const {
|
||||
return map(get(tr, Snapshot::False),
|
||||
[=](Optional<T> val) -> T { return val.present() ? val.get() : defaultValue; });
|
||||
}
|
||||
void set(Reference<ReadYourWritesTransaction> tr, T const& val) {
|
||||
return tr->set(key, BinaryWriter::toValue<T>(val, Unversioned()));
|
||||
|
@ -273,8 +278,8 @@ public:
|
|||
KeyType const& begin,
|
||||
Optional<KeyType> const& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) const {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) const {
|
||||
Subspace s = space; // 'this' could be invalid inside lambda
|
||||
Key endKey = end.present() ? s.pack(Codec<KeyType>::pack(end.get())) : space.range().end;
|
||||
return map(
|
||||
|
@ -293,7 +298,7 @@ public:
|
|||
|
||||
Future<Optional<ValueType>> get(Reference<ReadYourWritesTransaction> tr,
|
||||
KeyType const& key,
|
||||
bool snapshot = false) const {
|
||||
Snapshot snapshot = Snapshot::False) const {
|
||||
return map(tr->get(space.pack(Codec<KeyType>::pack(key)), snapshot),
|
||||
[](Optional<Value> const& val) -> Optional<ValueType> {
|
||||
if (val.present())
|
||||
|
@ -339,7 +344,7 @@ public:
|
|||
ValueType const& begin,
|
||||
Optional<ValueType> const& end,
|
||||
int limit,
|
||||
bool snapshot = false) const {
|
||||
Snapshot snapshot = Snapshot::False) const {
|
||||
Subspace s = space; // 'this' could be invalid inside lambda
|
||||
Key endKey = end.present() ? s.pack(Codec<ValueType>::pack(end.get())) : space.range().end;
|
||||
return map(
|
||||
|
@ -353,7 +358,9 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
Future<bool> exists(Reference<ReadYourWritesTransaction> tr, ValueType const& val, bool snapshot = false) const {
|
||||
Future<bool> exists(Reference<ReadYourWritesTransaction> tr,
|
||||
ValueType const& val,
|
||||
Snapshot snapshot = Snapshot::False) const {
|
||||
return map(tr->get(space.pack(Codec<ValueType>::pack(val)), snapshot),
|
||||
[](Optional<Value> const& val) -> bool { return val.present(); });
|
||||
}
|
||||
|
|
|
@ -119,7 +119,8 @@ void krmSetPreviouslyEmptyRange(CommitTransactionRef& tr,
|
|||
ACTOR Future<Void> krmSetRange(Transaction* tr, Key mapPrefix, KeyRange range, Value value) {
|
||||
state KeyRange withPrefix =
|
||||
KeyRangeRef(mapPrefix.toString() + range.begin.toString(), mapPrefix.toString() + range.end.toString());
|
||||
RangeResult old = wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true));
|
||||
RangeResult old =
|
||||
wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, Snapshot::True));
|
||||
|
||||
Value oldValue;
|
||||
bool hasResult = old.size() > 0 && old[0].key.startsWith(mapPrefix);
|
||||
|
@ -140,7 +141,8 @@ ACTOR Future<Void> krmSetRange(Transaction* tr, Key mapPrefix, KeyRange range, V
|
|||
ACTOR Future<Void> krmSetRange(Reference<ReadYourWritesTransaction> tr, Key mapPrefix, KeyRange range, Value value) {
|
||||
state KeyRange withPrefix =
|
||||
KeyRangeRef(mapPrefix.toString() + range.begin.toString(), mapPrefix.toString() + range.end.toString());
|
||||
RangeResult old = wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, true));
|
||||
RangeResult old =
|
||||
wait(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end), 1, Snapshot::True));
|
||||
|
||||
Value oldValue;
|
||||
bool hasResult = old.size() > 0 && old[0].key.startsWith(mapPrefix);
|
||||
|
@ -175,8 +177,10 @@ static Future<Void> krmSetRangeCoalescing_(Transaction* tr,
|
|||
KeyRangeRef(mapPrefix.toString() + maxRange.begin.toString(), mapPrefix.toString() + maxRange.end.toString());
|
||||
|
||||
state vector<Future<RangeResult>> keys;
|
||||
keys.push_back(tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, true));
|
||||
keys.push_back(tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end) + 1, 2, true));
|
||||
keys.push_back(
|
||||
tr->getRange(lastLessThan(withPrefix.begin), firstGreaterOrEqual(withPrefix.begin), 1, Snapshot::True));
|
||||
keys.push_back(
|
||||
tr->getRange(lastLessOrEqual(withPrefix.end), firstGreaterThan(withPrefix.end) + 1, 2, Snapshot::True));
|
||||
wait(waitForAll(keys));
|
||||
|
||||
// Determine how far to extend this range at the beginning
|
||||
|
|
|
@ -143,7 +143,7 @@ std::map<std::string, std::string> configForToken(std::string const& mode) {
|
|||
}
|
||||
|
||||
if (key == "perpetual_storage_wiggle" && isInteger(value)) {
|
||||
int ppWiggle = atoi(value.c_str());
|
||||
int ppWiggle = std::stoi(value);
|
||||
if (ppWiggle >= 2 || ppWiggle < 0) {
|
||||
printf("Error: Only 0 and 1 are valid values of perpetual_storage_wiggle at present.\n");
|
||||
return out;
|
||||
|
@ -2473,7 +2473,8 @@ ACTOR Future<Void> changeCachedRange(Database cx, KeyRangeRef range, bool add) {
|
|||
tr.clear(sysRangeClear);
|
||||
tr.clear(privateRange);
|
||||
tr.addReadConflictRange(privateRange);
|
||||
RangeResult previous = wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, true));
|
||||
RangeResult previous =
|
||||
wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, Snapshot::True));
|
||||
bool prevIsCached = false;
|
||||
if (!previous.empty()) {
|
||||
std::vector<uint16_t> prevVal;
|
||||
|
@ -2489,7 +2490,7 @@ ACTOR Future<Void> changeCachedRange(Database cx, KeyRangeRef range, bool add) {
|
|||
tr.set(sysRange.begin, trueValue);
|
||||
tr.set(privateRange.begin, serverKeysTrue);
|
||||
}
|
||||
RangeResult after = wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, false));
|
||||
RangeResult after = wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, Snapshot::False));
|
||||
bool afterIsCached = false;
|
||||
if (!after.empty()) {
|
||||
std::vector<uint16_t> afterVal;
|
||||
|
|
|
@ -113,7 +113,7 @@ ThreadFuture<RangeResult> DLTransaction::getRange(const KeySelectorRef& begin,
|
|||
end.offset,
|
||||
limits.rows,
|
||||
limits.bytes,
|
||||
FDBStreamingModes::EXACT,
|
||||
FDB_STREAMING_MODE_EXACT,
|
||||
0,
|
||||
snapshot,
|
||||
reverse);
|
||||
|
@ -207,12 +207,12 @@ ThreadFuture<Standalone<VectorRef<KeyRef>>> DLTransaction::getRangeSplitPoints(c
|
|||
|
||||
void DLTransaction::addReadConflictRange(const KeyRangeRef& keys) {
|
||||
throwIfError(api->transactionAddConflictRange(
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDBConflictRangeTypes::READ));
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
}
|
||||
|
||||
void DLTransaction::atomicOp(const KeyRef& key, const ValueRef& value, uint32_t operationType) {
|
||||
api->transactionAtomicOp(
|
||||
tr, key.begin(), key.size(), value.begin(), value.size(), (FDBMutationTypes::Option)operationType);
|
||||
tr, key.begin(), key.size(), value.begin(), value.size(), static_cast<FDBMutationType>(operationType));
|
||||
}
|
||||
|
||||
void DLTransaction::set(const KeyRef& key, const ValueRef& value) {
|
||||
|
@ -239,7 +239,7 @@ ThreadFuture<Void> DLTransaction::watch(const KeyRef& key) {
|
|||
|
||||
void DLTransaction::addWriteConflictRange(const KeyRangeRef& keys) {
|
||||
throwIfError(api->transactionAddConflictRange(
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDBConflictRangeTypes::WRITE));
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
}
|
||||
|
||||
ThreadFuture<Void> DLTransaction::commit() {
|
||||
|
@ -269,8 +269,10 @@ ThreadFuture<int64_t> DLTransaction::getApproximateSize() {
|
|||
}
|
||||
|
||||
void DLTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||
throwIfError(api->transactionSetOption(
|
||||
tr, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||
throwIfError(api->transactionSetOption(tr,
|
||||
static_cast<FDBTransactionOption>(option),
|
||||
value.present() ? value.get().begin() : nullptr,
|
||||
value.present() ? value.get().size() : 0));
|
||||
}
|
||||
|
||||
ThreadFuture<Void> DLTransaction::onError(Error const& e) {
|
||||
|
@ -309,8 +311,10 @@ Reference<ITransaction> DLDatabase::createTransaction() {
|
|||
}
|
||||
|
||||
void DLDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value) {
|
||||
throwIfError(api->databaseSetOption(
|
||||
db, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||
throwIfError(api->databaseSetOption(db,
|
||||
static_cast<FDBDatabaseOption>(option),
|
||||
value.present() ? value.get().begin() : nullptr,
|
||||
value.present() ? value.get().size() : 0));
|
||||
}
|
||||
|
||||
ThreadFuture<int64_t> DLDatabase::rebootWorker(const StringRef& address, bool check, int duration) {
|
||||
|
@ -504,7 +508,7 @@ void DLApi::selectApiVersion(int apiVersion) {
|
|||
|
||||
init();
|
||||
throwIfError(api->selectApiVersion(apiVersion, headerVersion));
|
||||
throwIfError(api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT, nullptr, 0));
|
||||
throwIfError(api->setNetworkOption(static_cast<FDBNetworkOption>(FDBNetworkOptions::EXTERNAL_CLIENT), nullptr, 0));
|
||||
}
|
||||
|
||||
const char* DLApi::getClientVersion() {
|
||||
|
@ -516,8 +520,9 @@ const char* DLApi::getClientVersion() {
|
|||
}
|
||||
|
||||
void DLApi::setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value) {
|
||||
throwIfError(api->setNetworkOption(
|
||||
option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||
throwIfError(api->setNetworkOption(static_cast<FDBNetworkOption>(option),
|
||||
value.present() ? value.get().begin() : nullptr,
|
||||
value.present() ? value.get().size() : 0));
|
||||
}
|
||||
|
||||
void DLApi::setupNetwork() {
|
||||
|
@ -1884,8 +1889,6 @@ bool ClientInfo::canReplace(Reference<ClientInfo> other) const {
|
|||
}
|
||||
|
||||
// UNIT TESTS
|
||||
extern bool noUnseed;
|
||||
|
||||
TEST_CASE("/fdbclient/multiversionclient/EnvironmentVariableParsing") {
|
||||
auto vals = parseOptionValues("a");
|
||||
ASSERT(vals.size() == 1 && vals[0] == "a");
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define FDBCLIENT_MULTIVERSIONTRANSACTION_H
|
||||
#pragma once
|
||||
|
||||
#include "bindings/c/foundationdb/fdb_c_options.g.h"
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
|
@ -31,10 +32,10 @@
|
|||
// FdbCApi is used as a wrapper around the FoundationDB C API that gets loaded from an external client library.
|
||||
// All of the required functions loaded from that external library are stored in function pointers in this struct.
|
||||
struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
||||
typedef struct future FDBFuture;
|
||||
typedef struct cluster FDBCluster;
|
||||
typedef struct database FDBDatabase;
|
||||
typedef struct transaction FDBTransaction;
|
||||
typedef struct FDB_future FDBFuture;
|
||||
typedef struct FDB_cluster FDBCluster;
|
||||
typedef struct FDB_database FDBDatabase;
|
||||
typedef struct FDB_transaction FDBTransaction;
|
||||
|
||||
#pragma pack(push, 4)
|
||||
typedef struct key {
|
||||
|
@ -57,16 +58,16 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
// Network
|
||||
fdb_error_t (*selectApiVersion)(int runtimeVersion, int headerVersion);
|
||||
const char* (*getClientVersion)();
|
||||
fdb_error_t (*setNetworkOption)(FDBNetworkOptions::Option option, uint8_t const* value, int valueLength);
|
||||
fdb_error_t (*setNetworkOption)(FDBNetworkOption option, uint8_t const* value, int valueLength);
|
||||
fdb_error_t (*setupNetwork)();
|
||||
fdb_error_t (*runNetwork)();
|
||||
fdb_error_t (*stopNetwork)();
|
||||
fdb_error_t* (*createDatabase)(const char* clusterFilePath, FDBDatabase** db);
|
||||
fdb_error_t (*createDatabase)(const char* clusterFilePath, FDBDatabase** db);
|
||||
|
||||
// Database
|
||||
fdb_error_t (*databaseCreateTransaction)(FDBDatabase* database, FDBTransaction** tr);
|
||||
fdb_error_t (*databaseSetOption)(FDBDatabase* database,
|
||||
FDBDatabaseOptions::Option option,
|
||||
FDBDatabaseOption option,
|
||||
uint8_t const* value,
|
||||
int valueLength);
|
||||
void (*databaseDestroy)(FDBDatabase* database);
|
||||
|
@ -86,7 +87,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
|
||||
// Transaction
|
||||
fdb_error_t (*transactionSetOption)(FDBTransaction* tr,
|
||||
FDBTransactionOptions::Option option,
|
||||
FDBTransactionOption option,
|
||||
uint8_t const* value,
|
||||
int valueLength);
|
||||
void (*transactionDestroy)(FDBTransaction* tr);
|
||||
|
@ -113,7 +114,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
int endOffset,
|
||||
int limit,
|
||||
int targetBytes,
|
||||
FDBStreamingModes::Option mode,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
@ -135,7 +136,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
int keyNameLength,
|
||||
uint8_t const* param,
|
||||
int paramLength,
|
||||
FDBMutationTypes::Option operationType);
|
||||
FDBMutationType operationType);
|
||||
|
||||
FDBFuture* (*transactionGetEstimatedRangeSizeBytes)(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
|
@ -163,7 +164,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
|||
int beginKeyNameLength,
|
||||
uint8_t const* endKeyName,
|
||||
int endKeyNameLength,
|
||||
FDBConflictRangeTypes::Option);
|
||||
FDBConflictRangeType);
|
||||
|
||||
// Future
|
||||
fdb_error_t (*futureGetDatabase)(FDBFuture* f, FDBDatabase** outDb);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,10 +27,12 @@
|
|||
#elif !defined(FDBCLIENT_NATIVEAPI_ACTOR_H)
|
||||
#define FDBCLIENT_NATIVEAPI_ACTOR_H
|
||||
|
||||
#include "flow/BooleanParam.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
#include "fdbclient/ClientBooleanParams.h"
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/ClusterInterface.h"
|
||||
|
@ -51,7 +53,8 @@ void addref(DatabaseContext* ptr);
|
|||
template <>
|
||||
void delref(DatabaseContext* ptr);
|
||||
|
||||
void validateOptionValue(Optional<StringRef> value, bool shouldBePresent);
|
||||
void validateOptionValuePresent(Optional<StringRef> value);
|
||||
void validateOptionValueNotPresent(Optional<StringRef> value);
|
||||
|
||||
void enableClientInfoLogging();
|
||||
|
||||
|
@ -81,13 +84,13 @@ public:
|
|||
// on another thread
|
||||
static Database createDatabase(Reference<ClusterConnectionFile> connFile,
|
||||
int apiVersion,
|
||||
bool internal = true,
|
||||
IsInternal internal = IsInternal::True,
|
||||
LocalityData const& clientLocality = LocalityData(),
|
||||
DatabaseContext* preallocatedDb = nullptr);
|
||||
|
||||
static Database createDatabase(std::string connFileName,
|
||||
int apiVersion,
|
||||
bool internal = true,
|
||||
IsInternal internal = IsInternal::True,
|
||||
LocalityData const& clientLocality = LocalityData());
|
||||
|
||||
Database() {} // an uninitialized database can be destructed or reassigned safely; that's it
|
||||
|
@ -112,7 +115,7 @@ private:
|
|||
void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
|
||||
|
||||
// Configures the global networking machinery
|
||||
void setupNetwork(uint64_t transportId = 0, bool useMetrics = false);
|
||||
void setupNetwork(uint64_t transportId = 0, UseMetrics = UseMetrics::False);
|
||||
|
||||
// This call blocks while the network is running. To use the API in a single-threaded
|
||||
// environment, the calling program must have ACTORs already launched that are waiting
|
||||
|
@ -241,31 +244,29 @@ public:
|
|||
explicit Transaction(Database const& cx);
|
||||
~Transaction();
|
||||
|
||||
void preinitializeOnForeignThread() { committedVersion = invalidVersion; }
|
||||
|
||||
void setVersion(Version v);
|
||||
Future<Version> getReadVersion() { return getReadVersion(0); }
|
||||
Future<Version> getRawReadVersion();
|
||||
Optional<Version> getCachedReadVersion() const;
|
||||
|
||||
[[nodiscard]] Future<Optional<Value>> get(const Key& key, bool snapshot = false);
|
||||
[[nodiscard]] Future<Optional<Value>> get(const Key& key, Snapshot = Snapshot::False);
|
||||
[[nodiscard]] Future<Void> watch(Reference<Watch> watch);
|
||||
[[nodiscard]] Future<Key> getKey(const KeySelector& key, bool snapshot = false);
|
||||
[[nodiscard]] Future<Key> getKey(const KeySelector& key, Snapshot = Snapshot::False);
|
||||
// Future< Optional<KeyValue> > get( const KeySelectorRef& key );
|
||||
[[nodiscard]] Future<RangeResult> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false);
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
[[nodiscard]] Future<RangeResult> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false);
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
[[nodiscard]] Future<RangeResult> getRange(const KeyRange& keys,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limit,
|
||||
|
@ -274,8 +275,8 @@ public:
|
|||
}
|
||||
[[nodiscard]] Future<RangeResult> getRange(const KeyRange& keys,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limits,
|
||||
|
@ -289,19 +290,19 @@ public:
|
|||
const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false);
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
[[nodiscard]] Future<Void> getRangeStream(const PromiseStream<Standalone<RangeResultRef>>& results,
|
||||
const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false);
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
[[nodiscard]] Future<Void> getRangeStream(const PromiseStream<Standalone<RangeResultRef>>& results,
|
||||
const KeyRange& keys,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRangeStream(results,
|
||||
KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
|
@ -312,8 +313,8 @@ public:
|
|||
[[nodiscard]] Future<Void> getRangeStream(const PromiseStream<Standalone<RangeResultRef>>& results,
|
||||
const KeyRange& keys,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRangeStream(results,
|
||||
KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
|
@ -348,13 +349,13 @@ public:
|
|||
// The returned list would still be in form of [keys.begin, splitPoint1, splitPoint2, ... , keys.end]
|
||||
Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(KeyRange const& keys, int64_t chunkSize);
|
||||
// If checkWriteConflictRanges is true, existing write conflict ranges will be searched for this key
|
||||
void set(const KeyRef& key, const ValueRef& value, bool addConflictRange = true);
|
||||
void set(const KeyRef& key, const ValueRef& value, AddConflictRange = AddConflictRange::True);
|
||||
void atomicOp(const KeyRef& key,
|
||||
const ValueRef& value,
|
||||
MutationRef::Type operationType,
|
||||
bool addConflictRange = true);
|
||||
void clear(const KeyRangeRef& range, bool addConflictRange = true);
|
||||
void clear(const KeyRef& key, bool addConflictRange = true);
|
||||
AddConflictRange = AddConflictRange::True);
|
||||
void clear(const KeyRangeRef& range, AddConflictRange = AddConflictRange::True);
|
||||
void clear(const KeyRef& key, AddConflictRange = AddConflictRange::True);
|
||||
[[nodiscard]] Future<Void> commit(); // Throws not_committed or commit_unknown_result errors in normal operation
|
||||
|
||||
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
|
||||
|
@ -418,7 +419,7 @@ private:
|
|||
Database cx;
|
||||
|
||||
double backoff;
|
||||
Version committedVersion;
|
||||
Version committedVersion{ invalidVersion };
|
||||
CommitTransactionRequest tr;
|
||||
Future<Version> readVersion;
|
||||
Promise<Optional<Value>> metadataVersion;
|
||||
|
@ -451,7 +452,7 @@ inline uint64_t getWriteOperationCost(uint64_t bytes) {
|
|||
|
||||
// Create a transaction to set the value of system key \xff/conf/perpetual_storage_wiggle. If enable == true, the value
|
||||
// will be 1. Otherwise, the value will be 0.
|
||||
ACTOR Future<Void> setPerpetualStorageWiggle(Database cx, bool enable, bool lock_aware = false);
|
||||
ACTOR Future<Void> setPerpetualStorageWiggle(Database cx, bool enable, LockAware lockAware = LockAware::False);
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
||||
|
|
|
@ -33,7 +33,7 @@ Optional<Version> PaxosConfigTransaction::getCachedReadVersion() const {
|
|||
return ::invalidVersion;
|
||||
}
|
||||
|
||||
Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, bool snapshot) {
|
||||
Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, Snapshot snapshot) {
|
||||
// TODO: Implement
|
||||
return Optional<Value>{};
|
||||
}
|
||||
|
@ -41,8 +41,8 @@ Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, bool snapsho
|
|||
Future<Standalone<RangeResultRef>> PaxosConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Standalone<RangeResultRef>{};
|
||||
|
@ -51,9 +51,9 @@ Future<Standalone<RangeResultRef>> PaxosConfigTransaction::getRange(KeySelector
|
|||
Future<Standalone<RangeResultRef>> PaxosConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
// TODO: Implememnt
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Standalone<RangeResultRef>{};
|
||||
}
|
||||
|
@ -122,9 +122,14 @@ void PaxosConfigTransaction::checkDeferredError() const {
|
|||
ASSERT(false);
|
||||
}
|
||||
|
||||
PaxosConfigTransaction::PaxosConfigTransaction(Database const& cx) {
|
||||
PaxosConfigTransaction::PaxosConfigTransaction() {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
PaxosConfigTransaction::~PaxosConfigTransaction() = default;
|
||||
|
||||
void PaxosConfigTransaction::setDatabase(Database const& cx) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
}
|
||||
|
|
|
@ -33,22 +33,23 @@ class PaxosConfigTransaction final : public IConfigTransaction, public FastAlloc
|
|||
PaxosConfigTransactionImpl& impl() { return *_impl; }
|
||||
|
||||
public:
|
||||
PaxosConfigTransaction(Database const&);
|
||||
PaxosConfigTransaction();
|
||||
~PaxosConfigTransaction();
|
||||
void setDatabase(Database const&) override;
|
||||
Future<Version> getReadVersion() override;
|
||||
Optional<Version> getCachedReadVersion() const override;
|
||||
|
||||
Future<Optional<Value>> get(Key const& key, bool snapshot = false) override;
|
||||
Future<Optional<Value>> get(Key const& key, Snapshot = Snapshot::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
void set(KeyRef const& key, ValueRef const& value) override;
|
||||
void clear(KeyRangeRef const&) override { throw client_invalid_operation(); }
|
||||
void clear(KeyRef const&) override;
|
||||
|
|
|
@ -65,7 +65,7 @@ public:
|
|||
typedef Key Result;
|
||||
};
|
||||
|
||||
template <bool Reverse>
|
||||
template <bool reverse>
|
||||
struct GetRangeReq {
|
||||
GetRangeReq(KeySelector begin, KeySelector end, GetRangeLimits limits)
|
||||
: begin(begin), end(end), limits(limits) {}
|
||||
|
@ -99,7 +99,7 @@ public:
|
|||
} else if (it->is_empty_range()) {
|
||||
return Optional<Value>();
|
||||
} else {
|
||||
Optional<Value> res = wait(ryw->tr.get(read.key, true));
|
||||
Optional<Value> res = wait(ryw->tr.get(read.key, Snapshot::True));
|
||||
KeyRef k(ryw->arena, read.key);
|
||||
|
||||
if (res.present()) {
|
||||
|
@ -162,20 +162,22 @@ public:
|
|||
// transaction. Responsible for clipping results to the non-system keyspace when appropriate, since NativeAPI
|
||||
// doesn't do that.
|
||||
|
||||
static Future<Optional<Value>> readThrough(ReadYourWritesTransaction* ryw, GetValueReq read, bool snapshot) {
|
||||
static Future<Optional<Value>> readThrough(ReadYourWritesTransaction* ryw, GetValueReq read, Snapshot snapshot) {
|
||||
return ryw->tr.get(read.key, snapshot);
|
||||
}
|
||||
|
||||
ACTOR static Future<Key> readThrough(ReadYourWritesTransaction* ryw, GetKeyReq read, bool snapshot) {
|
||||
ACTOR static Future<Key> readThrough(ReadYourWritesTransaction* ryw, GetKeyReq read, Snapshot snapshot) {
|
||||
Key key = wait(ryw->tr.getKey(read.key, snapshot));
|
||||
if (ryw->getMaxReadKey() < key)
|
||||
return ryw->getMaxReadKey(); // Filter out results in the system keys if they are not accessible
|
||||
return key;
|
||||
}
|
||||
|
||||
ACTOR template <bool Reverse>
|
||||
static Future<RangeResult> readThrough(ReadYourWritesTransaction* ryw, GetRangeReq<Reverse> read, bool snapshot) {
|
||||
if (Reverse && read.end.offset > 1) {
|
||||
ACTOR template <bool backwards>
|
||||
static Future<RangeResult> readThrough(ReadYourWritesTransaction* ryw,
|
||||
GetRangeReq<backwards> read,
|
||||
Snapshot snapshot) {
|
||||
if (backwards && read.end.offset > 1) {
|
||||
// FIXME: Optimistically assume that this will not run into the system keys, and only reissue if the result
|
||||
// actually does.
|
||||
Key key = wait(ryw->tr.getKey(read.end, snapshot));
|
||||
|
@ -185,10 +187,11 @@ public:
|
|||
read.end = KeySelector(firstGreaterOrEqual(key), key.arena());
|
||||
}
|
||||
|
||||
RangeResult v = wait(ryw->tr.getRange(read.begin, read.end, read.limits, snapshot, Reverse));
|
||||
RangeResult v = wait(
|
||||
ryw->tr.getRange(read.begin, read.end, read.limits, snapshot, backwards ? Reverse::True : Reverse::False));
|
||||
KeyRef maxKey = ryw->getMaxReadKey();
|
||||
if (v.size() > 0) {
|
||||
if (!Reverse && v[v.size() - 1].key >= maxKey) {
|
||||
if (!backwards && v[v.size() - 1].key >= maxKey) {
|
||||
state RangeResult _v = v;
|
||||
int i = _v.size() - 2;
|
||||
for (; i >= 0 && _v[i].key >= maxKey; --i) {
|
||||
|
@ -299,7 +302,7 @@ public:
|
|||
ACTOR template <class Req>
|
||||
static Future<typename Req::Result> readWithConflictRangeThrough(ReadYourWritesTransaction* ryw,
|
||||
Req req,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
choose {
|
||||
when(typename Req::Result result = wait(readThrough(ryw, req, snapshot))) { return result; }
|
||||
when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
|
||||
|
@ -316,7 +319,7 @@ public:
|
|||
ACTOR template <class Req>
|
||||
static Future<typename Req::Result> readWithConflictRangeRYW(ReadYourWritesTransaction* ryw,
|
||||
Req req,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
state RYWIterator it(&ryw->cache, &ryw->writes);
|
||||
choose {
|
||||
when(typename Req::Result result = wait(read(ryw, req, &it))) {
|
||||
|
@ -332,7 +335,7 @@ public:
|
|||
template <class Req>
|
||||
static inline Future<typename Req::Result> readWithConflictRange(ReadYourWritesTransaction* ryw,
|
||||
Req const& req,
|
||||
bool snapshot) {
|
||||
Snapshot snapshot) {
|
||||
if (ryw->options.readYourWritesDisabled) {
|
||||
return readWithConflictRangeThrough(ryw, req, snapshot);
|
||||
} else if (snapshot && ryw->options.snapshotRywEnabled <= 0) {
|
||||
|
@ -690,7 +693,8 @@ public:
|
|||
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows);
|
||||
|
||||
additionalRows = 0;
|
||||
RangeResult snapshot_read = wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, false));
|
||||
RangeResult snapshot_read =
|
||||
wait(ryw->tr.getRange(read_begin, read_end, requestLimit, Snapshot::True, Reverse::False));
|
||||
KeyRangeRef range = getKnownKeyRange(snapshot_read, read_begin, read_end, ryw->arena);
|
||||
|
||||
//TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough);
|
||||
|
@ -993,7 +997,8 @@ public:
|
|||
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows);
|
||||
|
||||
additionalRows = 0;
|
||||
RangeResult snapshot_read = wait(ryw->tr.getRange(read_begin, read_end, requestLimit, true, true));
|
||||
RangeResult snapshot_read =
|
||||
wait(ryw->tr.getRange(read_begin, read_end, requestLimit, Snapshot::True, Reverse::True));
|
||||
KeyRangeRef range = getKnownKeyRangeBack(snapshot_read, read_begin, read_end, ryw->arena);
|
||||
|
||||
//TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough);
|
||||
|
@ -1110,7 +1115,7 @@ public:
|
|||
|
||||
if (!ryw->options.readYourWritesDisabled) {
|
||||
ryw->watchMap[key].push_back(watch);
|
||||
val = readWithConflictRange(ryw, GetValueReq(key), false);
|
||||
val = readWithConflictRange(ryw, GetValueReq(key), Snapshot::False);
|
||||
} else {
|
||||
ryw->approximateSize += 2 * key.expectedSize() + 1;
|
||||
val = ryw->tr.get(key);
|
||||
|
@ -1288,6 +1293,10 @@ ReadYourWritesTransaction::ReadYourWritesTransaction(Database const& cx)
|
|||
applyPersistentOptions();
|
||||
}
|
||||
|
||||
void ReadYourWritesTransaction::setDatabase(Database const& cx) {
|
||||
*this = ReadYourWritesTransaction(cx);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> timebomb(double endTime, Promise<Void> resetPromise) {
|
||||
while (now() < endTime) {
|
||||
wait(delayUntil(std::min(endTime + 0.0001, now() + CLIENT_KNOBS->TRANSACTION_TIMEOUT_DELAY_INTERVAL)));
|
||||
|
@ -1352,7 +1361,7 @@ ACTOR Future<RangeResult> getWorkerInterfaces(Reference<ClusterConnectionFile> c
|
|||
}
|
||||
}
|
||||
|
||||
Future<Optional<Value>> ReadYourWritesTransaction::get(const Key& key, bool snapshot) {
|
||||
Future<Optional<Value>> ReadYourWritesTransaction::get(const Key& key, Snapshot snapshot) {
|
||||
TEST(true); // ReadYourWritesTransaction::get
|
||||
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
|
@ -1416,7 +1425,7 @@ Future<Optional<Value>> ReadYourWritesTransaction::get(const Key& key, bool snap
|
|||
return result;
|
||||
}
|
||||
|
||||
Future<Key> ReadYourWritesTransaction::getKey(const KeySelector& key, bool snapshot) {
|
||||
Future<Key> ReadYourWritesTransaction::getKey(const KeySelector& key, Snapshot snapshot) {
|
||||
if (checkUsedDuringCommit()) {
|
||||
return used_during_commit();
|
||||
}
|
||||
|
@ -1435,8 +1444,8 @@ Future<Key> ReadYourWritesTransaction::getKey(const KeySelector& key, bool snaps
|
|||
Future<RangeResult> ReadYourWritesTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
if (specialKeys.contains(begin.getKey()) && specialKeys.begin <= end.getKey() &&
|
||||
end.getKey() <= specialKeys.end) {
|
||||
|
@ -1495,8 +1504,8 @@ Future<RangeResult> ReadYourWritesTransaction::getRange(KeySelector begin,
|
|||
Future<RangeResult> ReadYourWritesTransaction::getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse);
|
||||
}
|
||||
|
||||
|
@ -1627,13 +1636,14 @@ void ReadYourWritesTransaction::writeRangeToNativeTransaction(KeyRangeRef const&
|
|||
clearBegin = std::max(ExtStringRef(keys.begin), it.beginKey());
|
||||
inClearRange = true;
|
||||
} else if (!it.is_cleared_range() && inClearRange) {
|
||||
tr.clear(KeyRangeRef(clearBegin.toArenaOrRef(arena), it.beginKey().toArenaOrRef(arena)), false);
|
||||
tr.clear(KeyRangeRef(clearBegin.toArenaOrRef(arena), it.beginKey().toArenaOrRef(arena)),
|
||||
AddConflictRange::False);
|
||||
inClearRange = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (inClearRange) {
|
||||
tr.clear(KeyRangeRef(clearBegin.toArenaOrRef(arena), keys.end), false);
|
||||
tr.clear(KeyRangeRef(clearBegin.toArenaOrRef(arena), keys.end), AddConflictRange::False);
|
||||
}
|
||||
|
||||
it.skip(keys.begin);
|
||||
|
@ -1657,9 +1667,9 @@ void ReadYourWritesTransaction::writeRangeToNativeTransaction(KeyRangeRef const&
|
|||
switch (op[i].type) {
|
||||
case MutationRef::SetValue:
|
||||
if (op[i].value.present()) {
|
||||
tr.set(it.beginKey().assertRef(), op[i].value.get(), false);
|
||||
tr.set(it.beginKey().assertRef(), op[i].value.get(), AddConflictRange::False);
|
||||
} else {
|
||||
tr.clear(it.beginKey().assertRef(), false);
|
||||
tr.clear(it.beginKey().assertRef(), AddConflictRange::False);
|
||||
}
|
||||
break;
|
||||
case MutationRef::AddValue:
|
||||
|
@ -1676,7 +1686,7 @@ void ReadYourWritesTransaction::writeRangeToNativeTransaction(KeyRangeRef const&
|
|||
case MutationRef::MinV2:
|
||||
case MutationRef::AndV2:
|
||||
case MutationRef::CompareAndClear:
|
||||
tr.atomicOp(it.beginKey().assertRef(), op[i].value.get(), op[i].type, false);
|
||||
tr.atomicOp(it.beginKey().assertRef(), op[i].value.get(), op[i].type, AddConflictRange::False);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1729,10 +1739,6 @@ void ReadYourWritesTransaction::getWriteConflicts(KeyRangeMap<bool>* result) {
|
|||
}
|
||||
}
|
||||
|
||||
void ReadYourWritesTransaction::preinitializeOnForeignThread() {
|
||||
tr.preinitializeOnForeignThread();
|
||||
}
|
||||
|
||||
void ReadYourWritesTransaction::setTransactionID(uint64_t id) {
|
||||
tr.setTransactionID(id);
|
||||
}
|
||||
|
@ -1845,7 +1851,7 @@ RangeResult ReadYourWritesTransaction::getWriteConflictRangeIntersecting(KeyRang
|
|||
}
|
||||
|
||||
void ReadYourWritesTransaction::atomicOp(const KeyRef& key, const ValueRef& operand, uint32_t operationType) {
|
||||
bool addWriteConflict = !options.getAndResetWriteConflictDisabled();
|
||||
AddConflictRange addWriteConflict{ !options.getAndResetWriteConflictDisabled() };
|
||||
|
||||
if (checkUsedDuringCommit()) {
|
||||
throw used_during_commit();
|
||||
|
@ -1893,7 +1899,7 @@ void ReadYourWritesTransaction::atomicOp(const KeyRef& key, const ValueRef& oper
|
|||
// this does validation of the key and needs to be performed before the readYourWritesDisabled path
|
||||
KeyRangeRef range = getVersionstampKeyRange(arena, k, tr.getCachedReadVersion().orDefault(0), getMaxReadKey());
|
||||
versionStampKeys.push_back(arena, k);
|
||||
addWriteConflict = false;
|
||||
addWriteConflict = AddConflictRange::False;
|
||||
if (!options.readYourWritesDisabled) {
|
||||
writeRangeToNativeTransaction(range);
|
||||
writes.addUnmodifiedAndUnreadableRange(range);
|
||||
|
@ -1953,7 +1959,7 @@ void ReadYourWritesTransaction::set(const KeyRef& key, const ValueRef& value) {
|
|||
}
|
||||
}
|
||||
|
||||
bool addWriteConflict = !options.getAndResetWriteConflictDisabled();
|
||||
AddConflictRange addWriteConflict{ !options.getAndResetWriteConflictDisabled() };
|
||||
|
||||
if (checkUsedDuringCommit()) {
|
||||
throw used_during_commit();
|
||||
|
@ -1983,7 +1989,7 @@ void ReadYourWritesTransaction::set(const KeyRef& key, const ValueRef& value) {
|
|||
}
|
||||
|
||||
void ReadYourWritesTransaction::clear(const KeyRangeRef& range) {
|
||||
bool addWriteConflict = !options.getAndResetWriteConflictDisabled();
|
||||
AddConflictRange addWriteConflict{ !options.getAndResetWriteConflictDisabled() };
|
||||
|
||||
if (checkUsedDuringCommit()) {
|
||||
throw used_during_commit();
|
||||
|
@ -2036,7 +2042,7 @@ void ReadYourWritesTransaction::clear(const KeyRangeRef& range) {
|
|||
}
|
||||
|
||||
void ReadYourWritesTransaction::clear(const KeyRef& key) {
|
||||
bool addWriteConflict = !options.getAndResetWriteConflictDisabled();
|
||||
AddConflictRange addWriteConflict{ !options.getAndResetWriteConflictDisabled() };
|
||||
|
||||
if (checkUsedDuringCommit()) {
|
||||
throw used_during_commit();
|
||||
|
@ -2165,7 +2171,7 @@ void ReadYourWritesTransaction::setOption(FDBTransactionOptions::Option option,
|
|||
void ReadYourWritesTransaction::setOptionImpl(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||
switch (option) {
|
||||
case FDBTransactionOptions::READ_YOUR_WRITES_DISABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
if (!reading.isReady() || !cache.empty() || !writes.empty())
|
||||
throw client_invalid_operation();
|
||||
|
@ -2174,26 +2180,26 @@ void ReadYourWritesTransaction::setOptionImpl(FDBTransactionOptions::Option opti
|
|||
break;
|
||||
|
||||
case FDBTransactionOptions::READ_AHEAD_DISABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.readAheadDisabled = true;
|
||||
break;
|
||||
|
||||
case FDBTransactionOptions::NEXT_WRITE_NO_WRITE_CONFLICT_RANGE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.nextWriteDisableConflictRange = true;
|
||||
break;
|
||||
|
||||
case FDBTransactionOptions::ACCESS_SYSTEM_KEYS:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.readSystemKeys = true;
|
||||
options.writeSystemKeys = true;
|
||||
break;
|
||||
|
||||
case FDBTransactionOptions::READ_SYSTEM_KEYS:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.readSystemKeys = true;
|
||||
break;
|
||||
|
@ -2217,30 +2223,30 @@ void ReadYourWritesTransaction::setOptionImpl(FDBTransactionOptions::Option opti
|
|||
transactionDebugInfo->transactionName = value.present() ? value.get().toString() : "";
|
||||
break;
|
||||
case FDBTransactionOptions::SNAPSHOT_RYW_ENABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.snapshotRywEnabled++;
|
||||
break;
|
||||
case FDBTransactionOptions::SNAPSHOT_RYW_DISABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.snapshotRywEnabled--;
|
||||
break;
|
||||
case FDBTransactionOptions::USED_DURING_COMMIT_PROTECTION_DISABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
|
||||
options.disableUsedDuringCommitProtection = true;
|
||||
break;
|
||||
case FDBTransactionOptions::SPECIAL_KEY_SPACE_RELAXED:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
options.specialKeySpaceRelaxed = true;
|
||||
break;
|
||||
case FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
options.specialKeySpaceChangeConfiguration = true;
|
||||
break;
|
||||
case FDBTransactionOptions::BYPASS_UNREADABLE:
|
||||
validateOptionValue(value, false);
|
||||
validateOptionValueNotPresent(value);
|
||||
options.bypassUnreadable = true;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -68,25 +68,26 @@ public:
|
|||
explicit ReadYourWritesTransaction(Database const& cx);
|
||||
~ReadYourWritesTransaction();
|
||||
|
||||
void setDatabase(Database const&) override;
|
||||
void setVersion(Version v) override { tr.setVersion(v); }
|
||||
Future<Version> getReadVersion() override;
|
||||
Optional<Version> getCachedReadVersion() const override { return tr.getCachedReadVersion(); }
|
||||
Future<Optional<Value>> get(const Key& key, bool snapshot = false) override;
|
||||
Future<Key> getKey(const KeySelector& key, bool snapshot = false) override;
|
||||
Future<Optional<Value>> get(const Key& key, Snapshot = Snapshot::False) override;
|
||||
Future<Key> getKey(const KeySelector& key, Snapshot = Snapshot::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(const KeyRange& keys,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limit,
|
||||
|
@ -95,8 +96,8 @@ public:
|
|||
}
|
||||
Future<RangeResult> getRange(const KeyRange& keys,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) {
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limits,
|
||||
|
@ -153,8 +154,6 @@ public:
|
|||
|
||||
void getWriteConflicts(KeyRangeMap<bool>* result) override;
|
||||
|
||||
void preinitializeOnForeignThread();
|
||||
|
||||
Database getDatabase() const { return tr.getDatabase(); }
|
||||
|
||||
const TransactionInfo& getTransactionInfo() const { return tr.info; }
|
||||
|
|
|
@ -26,9 +26,7 @@ ServerKnobs::ServerKnobs(Randomize randomize, ClientKnobs* clientKnobs, IsSimula
|
|||
initialize(randomize, clientKnobs, isSimulated);
|
||||
}
|
||||
|
||||
void ServerKnobs::initialize(Randomize _randomize, ClientKnobs* clientKnobs, IsSimulated _isSimulated) {
|
||||
bool const randomize = _randomize == Randomize::YES;
|
||||
bool const isSimulated = _isSimulated == IsSimulated::YES;
|
||||
void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSimulated isSimulated) {
|
||||
// clang-format off
|
||||
// Versions
|
||||
init( VERSIONS_PER_SECOND, 1e6 );
|
||||
|
@ -102,6 +100,8 @@ void ServerKnobs::initialize(Randomize _randomize, ClientKnobs* clientKnobs, IsS
|
|||
init( PUSH_STATS_SLOW_AMOUNT, 2 );
|
||||
init( PUSH_STATS_SLOW_RATIO, 0.5 );
|
||||
init( TLOG_POP_BATCH_SIZE, 1000 ); if ( randomize && BUGGIFY ) TLOG_POP_BATCH_SIZE = 10;
|
||||
init( TLOG_POPPED_VER_LAG_THRESHOLD_FOR_TLOGPOP_TRACE, 250e6 );
|
||||
init( ENABLE_DETAILED_TLOG_POP_TRACE, true );
|
||||
|
||||
// disk snapshot max timeout, to be put in TLog, storage and coordinator nodes
|
||||
init( MAX_FORKED_PROCESS_OUTPUT, 1024 );
|
||||
|
@ -255,6 +255,7 @@ void ServerKnobs::initialize(Randomize _randomize, ClientKnobs* clientKnobs, IsS
|
|||
init( DD_TEAMS_INFO_PRINT_YIELD_COUNT, 100 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_YIELD_COUNT = deterministicRandom()->random01() * 1000 + 1;
|
||||
init( DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY, 120 ); if( randomize && BUGGIFY ) DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY = 5;
|
||||
init( DD_STORAGE_WIGGLE_PAUSE_THRESHOLD, 1 ); if( randomize && BUGGIFY ) DD_STORAGE_WIGGLE_PAUSE_THRESHOLD = 10;
|
||||
init( DD_STORAGE_WIGGLE_STUCK_THRESHOLD, 50 );
|
||||
|
||||
// TeamRemover
|
||||
init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true
|
||||
|
@ -462,7 +463,15 @@ void ServerKnobs::initialize(Randomize _randomize, ClientKnobs* clientKnobs, IsS
|
|||
init( REPLACE_INTERFACE_CHECK_DELAY, 5.0 );
|
||||
init( COORDINATOR_REGISTER_INTERVAL, 5.0 );
|
||||
init( CLIENT_REGISTER_INTERVAL, 600.0 );
|
||||
init( CLUSTER_CONTROLLER_ENABLE_WORKER_HEALTH_MONITOR, false );
|
||||
init( CC_ENABLE_WORKER_HEALTH_MONITOR, false );
|
||||
init( CC_WORKER_HEALTH_CHECKING_INTERVAL, 60.0 );
|
||||
init( CC_DEGRADED_LINK_EXPIRATION_INTERVAL, 300.0 );
|
||||
init( CC_MIN_DEGRADATION_INTERVAL, 120.0 );
|
||||
init( CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE, 3 );
|
||||
init( CC_MAX_EXCLUSION_DUE_TO_HEALTH, 2 );
|
||||
init( CC_HEALTH_TRIGGER_RECOVERY, false );
|
||||
init( CC_TRACKING_HEALTH_RECOVERY_INTERVAL, 3600.0 );
|
||||
init( CC_MAX_HEALTH_RECOVERY_COUNT, 2 );
|
||||
|
||||
init( INCOMPATIBLE_PEERS_LOGGING_INTERVAL, 600 ); if( randomize && BUGGIFY ) INCOMPATIBLE_PEERS_LOGGING_INTERVAL = 60.0;
|
||||
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "flow/BooleanParam.h"
|
||||
#include "flow/Knobs.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
@ -64,6 +65,8 @@ public:
|
|||
// message (measured in 1/1024ths, e.g. a value of 2048 yields a
|
||||
// factor of 2).
|
||||
int64_t VERSION_MESSAGES_ENTRY_BYTES_WITH_OVERHEAD;
|
||||
int64_t TLOG_POPPED_VER_LAG_THRESHOLD_FOR_TLOGPOP_TRACE;
|
||||
bool ENABLE_DETAILED_TLOG_POP_TRACE;
|
||||
double TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR;
|
||||
int64_t TLOG_MESSAGE_BLOCK_BYTES;
|
||||
int64_t MAX_MESSAGE_SIZE;
|
||||
|
@ -205,6 +208,7 @@ public:
|
|||
int DD_TEAMS_INFO_PRINT_YIELD_COUNT;
|
||||
int DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY;
|
||||
int DD_STORAGE_WIGGLE_PAUSE_THRESHOLD; // How many unhealthy relocations are ongoing will pause storage wiggle
|
||||
int DD_STORAGE_WIGGLE_STUCK_THRESHOLD; // How many times bestTeamStuck accumulate will pause storage wiggle
|
||||
|
||||
// TeamRemover to remove redundant teams
|
||||
bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER; // disable the machineTeamRemover actor
|
||||
|
@ -385,7 +389,23 @@ public:
|
|||
double REPLACE_INTERFACE_CHECK_DELAY;
|
||||
double COORDINATOR_REGISTER_INTERVAL;
|
||||
double CLIENT_REGISTER_INTERVAL;
|
||||
bool CLUSTER_CONTROLLER_ENABLE_WORKER_HEALTH_MONITOR;
|
||||
bool CC_ENABLE_WORKER_HEALTH_MONITOR;
|
||||
double CC_WORKER_HEALTH_CHECKING_INTERVAL; // The interval of refreshing the degraded server list.
|
||||
double CC_DEGRADED_LINK_EXPIRATION_INTERVAL; // The time period from the last degradation report after which a
|
||||
// degraded server is considered healthy.
|
||||
double CC_MIN_DEGRADATION_INTERVAL; // The minimum interval that a server is reported as degraded to be considered
|
||||
// as degraded by Cluster Controller.
|
||||
int CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE; // The maximum number of degraded peers when excluding a server. When the
|
||||
// number of degraded peers is more than this value, we will not exclude
|
||||
// this server since it may because of server overload.
|
||||
int CC_MAX_EXCLUSION_DUE_TO_HEALTH; // The max number of degraded servers to exclude by Cluster Controller due to
|
||||
// degraded health.
|
||||
bool CC_HEALTH_TRIGGER_RECOVERY; // If true, cluster controller will kill the master to trigger recovery when
|
||||
// detecting degraded servers. If false, cluster controller only prints a warning.
|
||||
double CC_TRACKING_HEALTH_RECOVERY_INTERVAL; // The number of recovery count should not exceed
|
||||
// CC_MAX_HEALTH_RECOVERY_COUNT within
|
||||
// CC_TRACKING_HEALTH_RECOVERY_INTERVAL.
|
||||
int CC_MAX_HEALTH_RECOVERY_COUNT;
|
||||
|
||||
// Knobs used to select the best policy (via monte carlo)
|
||||
int POLICY_RATING_TESTS; // number of tests per policy (in order to compare)
|
||||
|
|
|
@ -221,23 +221,23 @@ Optional<Version> SimpleConfigTransaction::getCachedReadVersion() const {
|
|||
return impl().getCachedReadVersion();
|
||||
}
|
||||
|
||||
Future<Optional<Value>> SimpleConfigTransaction::get(Key const& key, bool snapshot) {
|
||||
Future<Optional<Value>> SimpleConfigTransaction::get(Key const& key, Snapshot snapshot) {
|
||||
return impl().get(key);
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> SimpleConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> SimpleConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
|
@ -290,10 +290,13 @@ void SimpleConfigTransaction::checkDeferredError() const {
|
|||
impl().checkDeferredError(deferredError);
|
||||
}
|
||||
|
||||
SimpleConfigTransaction::SimpleConfigTransaction(Database const& cx)
|
||||
: _impl(std::make_unique<SimpleConfigTransactionImpl>(cx)) {}
|
||||
void SimpleConfigTransaction::setDatabase(Database const& cx) {
|
||||
_impl = std::make_unique<SimpleConfigTransactionImpl>(cx);
|
||||
}
|
||||
|
||||
SimpleConfigTransaction::SimpleConfigTransaction(ConfigTransactionInterface const& cti)
|
||||
: _impl(std::make_unique<SimpleConfigTransactionImpl>(cti)) {}
|
||||
|
||||
SimpleConfigTransaction::SimpleConfigTransaction() = default;
|
||||
|
||||
SimpleConfigTransaction::~SimpleConfigTransaction() = default;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue