Merge branch 'main' into metacluster-concurrent-restore-testing
This commit is contained in:
commit
2b25cfef8b
|
@ -270,7 +270,13 @@ function(stage_correctness_package)
|
|||
list(APPEND package_files "${out_file}")
|
||||
endforeach()
|
||||
|
||||
list(APPEND package_files ${test_files} ${external_files})
|
||||
add_custom_command(
|
||||
OUTPUT "${STAGE_OUT_DIR}/joshua_logtool.py"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/contrib/joshua_logtool.py" "${STAGE_OUT_DIR}/joshua_logtool.py"
|
||||
DEPENDS "${CMAKE_SOURCE_DIR}/contrib/joshua_logtool.py"
|
||||
)
|
||||
|
||||
list(APPEND package_files ${test_files} ${external_files} "${STAGE_OUT_DIR}/joshua_logtool.py")
|
||||
if(STAGE_OUT_FILES)
|
||||
set(${STAGE_OUT_FILES} ${package_files} PARENT_SCOPE)
|
||||
endif()
|
||||
|
|
|
@ -88,7 +88,8 @@ class TestPicker:
|
|||
|
||||
if not self.tests:
|
||||
raise Exception(
|
||||
"No tests to run! Please check if tests are included/excluded incorrectly or old binaries are missing for restarting tests")
|
||||
"No tests to run! Please check if tests are included/excluded incorrectly or old binaries are missing for restarting tests"
|
||||
)
|
||||
|
||||
def add_time(self, test_file: Path, run_time: int, out: SummaryTree) -> None:
|
||||
# getting the test name is fairly inefficient. But since we only have 100s of tests, I won't bother
|
||||
|
@ -144,7 +145,11 @@ class TestPicker:
|
|||
candidates: List[Path] = []
|
||||
dirs = path.parent.parts
|
||||
version_expr = dirs[-1].split("_")
|
||||
if (version_expr[0] == "from" or version_expr[0] == "to") and len(version_expr) == 4 and version_expr[2] == "until":
|
||||
if (
|
||||
(version_expr[0] == "from" or version_expr[0] == "to")
|
||||
and len(version_expr) == 4
|
||||
and version_expr[2] == "until"
|
||||
):
|
||||
max_version = Version.parse(version_expr[3])
|
||||
min_version = Version.parse(version_expr[1])
|
||||
for ver, binary in self.old_binaries.items():
|
||||
|
@ -384,6 +389,22 @@ class TestRun:
|
|||
def delete_simdir(self):
|
||||
shutil.rmtree(self.temp_path / Path("simfdb"))
|
||||
|
||||
def _run_rocksdb_logtool(self):
|
||||
"""Calls Joshua LogTool to upload the test logs if 1) test failed 2) test is RocksDB related"""
|
||||
if not os.path.exists("joshua_logtool.py"):
|
||||
raise RuntimeError("joshua_logtool.py missing")
|
||||
command = [
|
||||
"python3",
|
||||
"joshua_logtool.py",
|
||||
"upload",
|
||||
"--test-uid",
|
||||
str(self.uid),
|
||||
"--log-directory",
|
||||
str(self.temp_path),
|
||||
"--check-rocksdb"
|
||||
]
|
||||
subprocess.run(command, check=True)
|
||||
|
||||
def run(self):
|
||||
command: List[str] = []
|
||||
env: Dict[str, str] = os.environ.copy()
|
||||
|
@ -473,6 +494,9 @@ class TestRun:
|
|||
self.summary.valgrind_out_file = valgrind_file
|
||||
self.summary.error_out = err_out
|
||||
self.summary.summarize(self.temp_path, " ".join(command))
|
||||
|
||||
if not self.summary.ok():
|
||||
self._run_rocksdb_logtool()
|
||||
return self.summary.ok()
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""rocksdb_logtool.py
|
||||
|
||||
Provides uploading/downloading FoundationDB log files to Joshua cluster.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import pathlib
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import fdb
|
||||
import joshua.joshua_model as joshua
|
||||
|
||||
from typing import List
|
||||
|
||||
# Defined in SimulatedCluster.actor.cpp:SimulationConfig::setStorageEngine
|
||||
ROCKSDB_TRACEEVENT_STRING = ["RocksDBNonDeterminism", "ShardedRocksDBNonDeterminism"]
|
||||
|
||||
# e.g. /var/joshua/ensembles/20230221-051349-xiaogesu-c9fc5b230dcd91cf
|
||||
ENSEMBLE_ID_REGEXP = re.compile(r"ensembles\/(?P<ensemble_id>[0-9A-Za-z\-_]+)$")
|
||||
|
||||
# e.g. <Test TestUID="1ad90d42-824b-4693-aacf-53de3a6ccd27" Statistics="AAAA
|
||||
TEST_UID_REGEXP = re.compile(r"TestUID=\"(?P<uid>[0-9a-fA-F\-]+)\"")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _execute_grep(string: str, paths: List[pathlib.Path]) -> bool:
|
||||
command = ["grep", "-F", string] + [str(path) for path in paths]
|
||||
result = subprocess.run(command, stdout=subprocess.DEVNULL)
|
||||
return result.returncode == 0
|
||||
|
||||
|
||||
def _is_rocksdb_test(log_files: List[pathlib.Path]) -> bool:
|
||||
for event_str in ROCKSDB_TRACEEVENT_STRING:
|
||||
if _execute_grep(event_str, log_files):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _extract_ensemble_id(work_directory: str) -> str:
|
||||
match = ENSEMBLE_ID_REGEXP.search(work_directory)
|
||||
if not match:
|
||||
return None
|
||||
return match.groupdict()["ensemble_id"]
|
||||
|
||||
|
||||
def _get_log_subspace(ensemble_id: str, test_uid: str):
|
||||
subspace = joshua.dir_ensemble_results_application
|
||||
log_space = subspace.create_or_open(joshua.db, "simulation_logs")
|
||||
return log_space[bytes(ensemble_id, "utf-8")][bytes(test_uid, "utf-8")]
|
||||
|
||||
|
||||
def _tar_logs(log_files: List[pathlib.Path], output_file_name: pathlib.Path):
|
||||
command = ["tar", "-c", "-f", str(output_file_name), "--xz"] + [
|
||||
str(log_file) for log_file in log_files
|
||||
]
|
||||
logger.debug(f"Execute tar: {command}")
|
||||
subprocess.run(command, check=True, stdout=subprocess.DEVNULL)
|
||||
|
||||
|
||||
def _tar_extract(path_to_archive: pathlib.Path):
|
||||
command = ["tar", "xf", str(path_to_archive)]
|
||||
subprocess.run(command, check=True, stdout=subprocess.DEVNULL)
|
||||
|
||||
|
||||
def report_error(
|
||||
work_directory: str,
|
||||
log_directory: str,
|
||||
ensemble_id: str,
|
||||
test_uid: str,
|
||||
check_rocksdb: bool,
|
||||
):
|
||||
log_files = list(pathlib.Path(log_directory).glob("**/trace*.xml"))
|
||||
if len(log_files) == 0:
|
||||
logger.debug(f"No XML file found in directory {log_directory}")
|
||||
log_files += list(pathlib.Path(log_directory).glob("**/trace*.json"))
|
||||
if len(log_files) == 0:
|
||||
logger.debug(f"No JSON file found in directory {log_directory}")
|
||||
return
|
||||
logger.debug(f"Total {len(log_files)} files found")
|
||||
|
||||
if check_rocksdb and not _is_rocksdb_test(log_files):
|
||||
logger.debug("Not a RocksDB test")
|
||||
return
|
||||
|
||||
ensemble_id = ensemble_id or _extract_ensemble_id(work_directory)
|
||||
if not ensemble_id:
|
||||
logger.debug(f"Ensemble ID missing in work directory {work_directory}")
|
||||
raise RuntimeError(f"Ensemble ID missing in work directory {work_directory}")
|
||||
logger.debug(f"Ensemble ID: {ensemble_id}")
|
||||
|
||||
with tempfile.NamedTemporaryFile() as archive:
|
||||
logger.debug(f"Tarfile: {archive.name}")
|
||||
_tar_logs(log_files, archive.name)
|
||||
logger.debug(f"Tarfile size: {os.path.getsize(archive.name)}")
|
||||
subspace = _get_log_subspace(ensemble_id, test_uid)
|
||||
joshua._insert_blob(joshua.db, subspace, archive, offset=0)
|
||||
|
||||
|
||||
def download_logs(ensemble_id: str, test_uid: str):
|
||||
with tempfile.NamedTemporaryFile() as archive:
|
||||
subspace = _get_log_subspace(ensemble_id, test_uid)
|
||||
logger.debug(
|
||||
f"Downloading the archive to {archive.name} at subspace {subspace}"
|
||||
)
|
||||
joshua._read_blob(joshua.db, subspace, archive)
|
||||
logger.debug(f"Tarfile size: {os.path.getsize(archive.name)}")
|
||||
_tar_extract(archive.name)
|
||||
|
||||
|
||||
def list_commands(ensemble_id: str):
|
||||
for item in joshua.tail_results(ensemble_id, errors_only=True):
|
||||
test_harness_output = item[4]
|
||||
match = TEST_UID_REGEXP.search(test_harness_output)
|
||||
if not match:
|
||||
logger.warning(f"Test UID not found in {test_harness_output}")
|
||||
continue
|
||||
test_uid = match.groupdict()["uid"]
|
||||
print(
|
||||
f"python3 {__file__} download --ensemble-id {ensemble_id} --test-uid {test_uid}"
|
||||
)
|
||||
|
||||
|
||||
def _setup_args():
|
||||
parser = argparse.ArgumentParser(prog="rocksdb_logtool.py")
|
||||
|
||||
parser.add_argument(
|
||||
"--cluster-file", type=str, default=None, help="Joshua FDB cluster file"
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(help="Possible actions", dest="action")
|
||||
|
||||
upload_parser = subparsers.add_parser(
|
||||
"upload", help="Check the log file, upload them to Joshua cluster if necessary"
|
||||
)
|
||||
upload_parser.add_argument(
|
||||
"--work-directory", type=str, default=os.getcwd(), help="Work directory"
|
||||
)
|
||||
upload_parser.add_argument(
|
||||
"--log-directory",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Directory contains XML/JSON logs",
|
||||
)
|
||||
upload_parser.add_argument(
|
||||
"--ensemble-id", type=str, default=None, required=False, help="Ensemble ID"
|
||||
)
|
||||
upload_parser.add_argument("--test-uid", type=str, required=True, help="Test UID")
|
||||
upload_parser.add_argument(
|
||||
"--check-rocksdb",
|
||||
action="store_true",
|
||||
help="If true, only upload logs when RocksDB is involved; otherwise, always upload logs.",
|
||||
)
|
||||
|
||||
download_parser = subparsers.add_parser(
|
||||
"download", help="Download the log file from Joshua to local directory"
|
||||
)
|
||||
download_parser.add_argument(
|
||||
"--ensemble-id", type=str, required=True, help="Joshua ensemble ID"
|
||||
)
|
||||
download_parser.add_argument("--test-uid", type=str, required=True, help="Test UID")
|
||||
|
||||
list_parser = subparsers.add_parser(
|
||||
"list",
|
||||
help="List the possible download commands for failed tests in a given ensemble. NOTE: It is possible that the test is not relevant to RocksDB and no log file is available. It is the user's responsibility to verify if this happens.",
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--ensemble-id", type=str, required=True, help="Joshua ensemble ID"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def _main():
|
||||
args = _setup_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
logger.debug(f"Using cluster file {args.cluster_file}")
|
||||
joshua.open(args.cluster_file)
|
||||
|
||||
if args.action == "upload":
|
||||
report_error(
|
||||
work_directory=args.work_directory,
|
||||
log_directory=args.log_directory,
|
||||
ensemble_id=args.ensemble_id,
|
||||
test_uid=args.test_uid,
|
||||
check_rocksdb=args.check_rocksdb,
|
||||
)
|
||||
elif args.action == "download":
|
||||
download_logs(ensemble_id=args.ensemble_id, test_uid=args.test_uid)
|
||||
elif args.action == "list":
|
||||
list_commands(ensemble_id=args.ensemble_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_main()
|
|
@ -36,6 +36,22 @@
|
|||
|
||||
namespace fdb_cli {
|
||||
|
||||
template <class TenantGroupEntryImpl>
|
||||
void tenantGroupListOutput(std::vector<std::pair<TenantGroupName, TenantGroupEntryImpl>> tenantGroups, int tokensSize) {
|
||||
if (tenantGroups.empty()) {
|
||||
if (tokensSize == 2) {
|
||||
fmt::print("The cluster has no tenant groups\n");
|
||||
} else {
|
||||
fmt::print("The cluster has no tenant groups in the specified range\n");
|
||||
}
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
for (auto tenantGroup : tenantGroups) {
|
||||
fmt::print(" {}. {}\n", ++index, printable(tenantGroup.first));
|
||||
}
|
||||
}
|
||||
|
||||
// tenantgroup list command
|
||||
ACTOR Future<bool> tenantGroupListCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
if (tokens.size() > 5) {
|
||||
|
@ -74,27 +90,15 @@ ACTOR Future<bool> tenantGroupListCommand(Reference<IDatabase> db, std::vector<S
|
|||
try {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
|
||||
state std::vector<TenantGroupName> tenantGroupNames;
|
||||
state std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups;
|
||||
|
||||
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
|
||||
wait(store(tenantGroups,
|
||||
MetaclusterAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit)));
|
||||
std::vector<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>> metaclusterTenantGroups =
|
||||
wait(MetaclusterAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit));
|
||||
tenantGroupListOutput(metaclusterTenantGroups, tokens.size());
|
||||
} else {
|
||||
wait(store(tenantGroups,
|
||||
TenantAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit)));
|
||||
}
|
||||
|
||||
if (tenantGroups.empty()) {
|
||||
if (tokens.size() == 2) {
|
||||
fmt::print("The cluster has no tenant groups\n");
|
||||
} else {
|
||||
fmt::print("The cluster has no tenant groups in the specified range\n");
|
||||
}
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
for (auto tenantGroup : tenantGroups) {
|
||||
fmt::print(" {}. {}\n", ++index, printable(tenantGroup.first));
|
||||
std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups =
|
||||
wait(TenantAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit));
|
||||
tenantGroupListOutput(tenantGroups, tokens.size());
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -104,6 +108,29 @@ ACTOR Future<bool> tenantGroupListCommand(Reference<IDatabase> db, std::vector<S
|
|||
}
|
||||
}
|
||||
|
||||
void tenantGroupGetOutput(MetaclusterTenantGroupEntry entry, bool useJson) {
|
||||
if (useJson) {
|
||||
json_spirit::mObject resultObj;
|
||||
resultObj["tenant_group"] = entry.toJson();
|
||||
resultObj["type"] = "success";
|
||||
fmt::print("{}\n", json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print));
|
||||
} else {
|
||||
fmt::print(" assigned cluster: {}\n", printable(entry.assignedCluster));
|
||||
}
|
||||
}
|
||||
void tenantGroupGetOutput(TenantGroupEntry entry, bool useJson) {
|
||||
if (useJson) {
|
||||
json_spirit::mObject resultObj;
|
||||
resultObj["tenant_group"] = entry.toJson();
|
||||
resultObj["type"] = "success";
|
||||
fmt::print("{}\n", json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print));
|
||||
} else {
|
||||
// This is a placeholder output for when a tenant group is read in a non-metacluster, where
|
||||
// it currently has no metadata. When metadata is eventually added, we can print that instead.
|
||||
fmt::print("The tenant group is present in the cluster\n");
|
||||
}
|
||||
}
|
||||
|
||||
// tenantgroup get command
|
||||
ACTOR Future<bool> tenantGroupGetCommand(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
if (tokens.size() > 4 || (tokens.size() == 4 && tokens[3] != "JSON"_sr)) {
|
||||
|
@ -120,43 +147,21 @@ ACTOR Future<bool> tenantGroupGetCommand(Reference<IDatabase> db, std::vector<St
|
|||
try {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
state ClusterType clusterType = wait(TenantAPI::getClusterType(tr));
|
||||
state std::string tenantJson;
|
||||
state Optional<TenantGroupEntry> entry;
|
||||
if (clusterType == ClusterType::METACLUSTER_MANAGEMENT) {
|
||||
wait(store(entry, MetaclusterAPI::tryGetTenantGroupTransaction(tr, tokens[2])));
|
||||
} else {
|
||||
wait(store(entry, TenantAPI::tryGetTenantGroupTransaction(tr, tokens[2])));
|
||||
Optional<MetaclusterRegistrationEntry> metaclusterRegistration =
|
||||
wait(MetaclusterMetadata::metaclusterRegistration().get(tr));
|
||||
|
||||
// We don't store assigned clusters in the tenant group entry on data clusters, so we can instead
|
||||
// populate it from the metacluster registration
|
||||
if (entry.present() && metaclusterRegistration.present() &&
|
||||
metaclusterRegistration.get().clusterType == ClusterType::METACLUSTER_DATA &&
|
||||
!entry.get().assignedCluster.present()) {
|
||||
entry.get().assignedCluster = metaclusterRegistration.get().name;
|
||||
state Optional<MetaclusterTenantGroupEntry> mEntry =
|
||||
wait(MetaclusterAPI::tryGetTenantGroupTransaction(tr, tokens[2]));
|
||||
if (!mEntry.present()) {
|
||||
throw tenant_not_found();
|
||||
}
|
||||
}
|
||||
|
||||
if (!entry.present()) {
|
||||
throw tenant_not_found();
|
||||
}
|
||||
|
||||
if (useJson) {
|
||||
json_spirit::mObject resultObj;
|
||||
resultObj["tenant_group"] = entry.get().toJson();
|
||||
resultObj["type"] = "success";
|
||||
fmt::print("{}\n",
|
||||
json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print));
|
||||
tenantGroupGetOutput(mEntry.get(), useJson);
|
||||
} else {
|
||||
if (entry.get().assignedCluster.present()) {
|
||||
fmt::print(" assigned cluster: {}\n", printable(entry.get().assignedCluster));
|
||||
} else {
|
||||
// This is a placeholder output for when a tenant group is read in a non-metacluster, where
|
||||
// it currently has no metadata. When metadata is eventually added, we can print that instead.
|
||||
fmt::print("The tenant group is present in the cluster\n");
|
||||
state Optional<TenantGroupEntry> entry = wait(TenantAPI::tryGetTenantGroupTransaction(tr, tokens[2]));
|
||||
if (!entry.present()) {
|
||||
throw tenant_not_found();
|
||||
}
|
||||
tenantGroupGetOutput(entry.get(), useJson);
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
try {
|
||||
|
|
|
@ -234,6 +234,19 @@ bool MetaclusterTenantMapEntry::operator!=(MetaclusterTenantMapEntry const& othe
|
|||
return !(*this == other);
|
||||
}
|
||||
|
||||
json_spirit::mObject MetaclusterTenantGroupEntry::toJson() const {
|
||||
json_spirit::mObject tenantGroupEntry;
|
||||
tenantGroupEntry["assigned_cluster"] = binaryToJson(assignedCluster);
|
||||
return tenantGroupEntry;
|
||||
}
|
||||
|
||||
bool MetaclusterTenantGroupEntry::operator==(MetaclusterTenantGroupEntry const& other) const {
|
||||
return assignedCluster == other.assignedCluster;
|
||||
}
|
||||
bool MetaclusterTenantGroupEntry::operator!=(MetaclusterTenantGroupEntry const& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
KeyBackedObjectProperty<MetaclusterRegistrationEntry, decltype(IncludeVersion())>&
|
||||
MetaclusterMetadata::metaclusterRegistration() {
|
||||
static KeyBackedObjectProperty<MetaclusterRegistrationEntry, decltype(IncludeVersion())> instance(
|
||||
|
|
|
@ -71,8 +71,8 @@ KeyBackedMap<ClusterName, int64_t, TupleCodec<ClusterName>, BinaryCodec<int64_t>
|
|||
KeyBackedSet<Tuple> ManagementClusterMetadata::clusterTenantIndex("metacluster/dataCluster/tenantMap/"_sr);
|
||||
KeyBackedSet<Tuple> ManagementClusterMetadata::clusterTenantGroupIndex("metacluster/dataCluster/tenantGroupMap/"_sr);
|
||||
|
||||
TenantMetadataSpecification<MetaclusterTenantMapEntry>& ManagementClusterMetadata::tenantMetadata() {
|
||||
static TenantMetadataSpecification<MetaclusterTenantMapEntry> instance(""_sr);
|
||||
TenantMetadataSpecification<MetaclusterTenantTypes>& ManagementClusterMetadata::tenantMetadata() {
|
||||
static TenantMetadataSpecification<MetaclusterTenantTypes> instance(""_sr);
|
||||
return instance;
|
||||
}
|
||||
|
||||
|
|
|
@ -851,7 +851,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
// This exists for flexibility but assigning each ReadType to its own unique priority number makes the most sense
|
||||
// The enumeration is currently: eager, fetch, low, normal, high
|
||||
init( STORAGESERVER_READTYPE_PRIORITY_MAP, "0,1,2,3,4" );
|
||||
init( SPLIT_METRICS_MAX_ROWS, 10000 );
|
||||
init( SPLIT_METRICS_MAX_ROWS, 10000 ); if( randomize && BUGGIFY ) SPLIT_METRICS_MAX_ROWS = 10;
|
||||
|
||||
//Wait Failure
|
||||
init( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS, 250 ); if( randomize && BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2;
|
||||
|
|
|
@ -167,15 +167,12 @@ bool TenantMapEntry::operator!=(TenantMapEntry const& other) const {
|
|||
|
||||
json_spirit::mObject TenantGroupEntry::toJson() const {
|
||||
json_spirit::mObject tenantGroupEntry;
|
||||
if (assignedCluster.present()) {
|
||||
tenantGroupEntry["assigned_cluster"] = binaryToJson(assignedCluster.get());
|
||||
}
|
||||
|
||||
// No fields currently
|
||||
return tenantGroupEntry;
|
||||
}
|
||||
|
||||
bool TenantGroupEntry::operator==(TenantGroupEntry const& other) const {
|
||||
return assignedCluster == other.assignedCluster;
|
||||
return true;
|
||||
}
|
||||
bool TenantGroupEntry::operator!=(TenantGroupEntry const& other) const {
|
||||
return !(*this == other);
|
||||
|
@ -191,8 +188,8 @@ bool TenantTombstoneCleanupData::operator!=(TenantTombstoneCleanupData const& ot
|
|||
return !(*this == other);
|
||||
}
|
||||
|
||||
TenantMetadataSpecification<TenantMapEntry>& TenantMetadata::instance() {
|
||||
static TenantMetadataSpecification _instance = TenantMetadataSpecification<TenantMapEntry>("\xff/"_sr);
|
||||
TenantMetadataSpecification<StandardTenantTypes>& TenantMetadata::instance() {
|
||||
static TenantMetadataSpecification _instance = TenantMetadataSpecification<StandardTenantTypes>("\xff/"_sr);
|
||||
return _instance;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,6 +195,36 @@ struct MetaclusterTenantMapEntry {
|
|||
}
|
||||
};
|
||||
|
||||
struct MetaclusterTenantGroupEntry {
|
||||
constexpr static FileIdentifier file_identifier = 1082739;
|
||||
|
||||
ClusterName assignedCluster;
|
||||
|
||||
MetaclusterTenantGroupEntry() = default;
|
||||
MetaclusterTenantGroupEntry(ClusterName assignedCluster) : assignedCluster(assignedCluster) {}
|
||||
|
||||
json_spirit::mObject toJson() const;
|
||||
|
||||
Value encode() { return ObjectWriter::toValue(*this, IncludeVersion()); }
|
||||
static MetaclusterTenantGroupEntry decode(ValueRef const& value) {
|
||||
return ObjectReader::fromStringRef<MetaclusterTenantGroupEntry>(value, IncludeVersion());
|
||||
}
|
||||
|
||||
bool operator==(MetaclusterTenantGroupEntry const& other) const;
|
||||
bool operator!=(MetaclusterTenantGroupEntry const& other) const;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, assignedCluster);
|
||||
}
|
||||
};
|
||||
|
||||
class MetaclusterTenantTypes {
|
||||
public:
|
||||
using TenantMapEntryT = MetaclusterTenantMapEntry;
|
||||
using TenantGroupEntryT = MetaclusterTenantGroupEntry;
|
||||
};
|
||||
|
||||
struct MetaclusterMetrics {
|
||||
int numTenants = 0;
|
||||
int numDataClusters = 0;
|
||||
|
|
|
@ -123,7 +123,7 @@ struct ManagementClusterMetadata {
|
|||
}
|
||||
};
|
||||
|
||||
static TenantMetadataSpecification<MetaclusterTenantMapEntry>& tenantMetadata();
|
||||
static TenantMetadataSpecification<MetaclusterTenantTypes>& tenantMetadata();
|
||||
|
||||
// A map from cluster name to the metadata associated with a cluster
|
||||
static KeyBackedObjectMap<ClusterName, DataClusterEntry, decltype(IncludeVersion())>& dataClusters();
|
||||
|
@ -1294,7 +1294,7 @@ void managementClusterAddTenantToGroup(Transaction tr,
|
|||
|
||||
if (!groupAlreadyExists) {
|
||||
ManagementClusterMetadata::tenantMetadata().tenantGroupMap.set(
|
||||
tr, tenantEntry.tenantGroup.get(), TenantGroupEntry(tenantEntry.assignedCluster));
|
||||
tr, tenantEntry.tenantGroup.get(), MetaclusterTenantGroupEntry(tenantEntry.assignedCluster));
|
||||
ManagementClusterMetadata::clusterTenantGroupIndex.insert(
|
||||
tr, Tuple::makeTuple(tenantEntry.assignedCluster, tenantEntry.tenantGroup.get()));
|
||||
}
|
||||
|
@ -1951,7 +1951,7 @@ struct RestoreClusterImpl {
|
|||
ACTOR static Future<bool> addTenantToManagementCluster(RestoreClusterImpl* self,
|
||||
Reference<ITransaction> tr,
|
||||
TenantMapEntry tenantEntry) {
|
||||
state Future<Optional<TenantGroupEntry>> tenantGroupEntry = Optional<TenantGroupEntry>();
|
||||
state Future<Optional<MetaclusterTenantGroupEntry>> tenantGroupEntry = Optional<MetaclusterTenantGroupEntry>();
|
||||
if (tenantEntry.tenantGroup.present()) {
|
||||
tenantGroupEntry =
|
||||
ManagementClusterMetadata::tenantMetadata().tenantGroupMap.get(tr, tenantEntry.tenantGroup.get());
|
||||
|
@ -2338,23 +2338,22 @@ struct CreateTenantImpl {
|
|||
ACTOR static Future<std::pair<ClusterName, bool>> assignTenant(CreateTenantImpl* self,
|
||||
Reference<typename DB::TransactionT> tr) {
|
||||
// If our tenant group is already assigned, then we just use that assignment
|
||||
state Optional<TenantGroupEntry> groupEntry;
|
||||
state Optional<MetaclusterTenantGroupEntry> groupEntry;
|
||||
if (self->tenantEntry.tenantGroup.present()) {
|
||||
Optional<TenantGroupEntry> _groupEntry =
|
||||
Optional<MetaclusterTenantGroupEntry> _groupEntry =
|
||||
wait(ManagementClusterMetadata::tenantMetadata().tenantGroupMap.get(
|
||||
tr, self->tenantEntry.tenantGroup.get()));
|
||||
groupEntry = _groupEntry;
|
||||
|
||||
if (groupEntry.present()) {
|
||||
ASSERT(groupEntry.get().assignedCluster.present());
|
||||
if (!self->assignClusterAutomatically &&
|
||||
groupEntry.get().assignedCluster.get() != self->tenantEntry.assignedCluster) {
|
||||
groupEntry.get().assignedCluster != self->tenantEntry.assignedCluster) {
|
||||
TraceEvent("MetaclusterCreateTenantGroupClusterMismatch")
|
||||
.detail("TenantGroupCluster", groupEntry.get().assignedCluster.get())
|
||||
.detail("TenantGroupCluster", groupEntry.get().assignedCluster)
|
||||
.detail("SpecifiedCluster", self->tenantEntry.assignedCluster);
|
||||
throw invalid_tenant_configuration();
|
||||
}
|
||||
return std::make_pair(groupEntry.get().assignedCluster.get(), true);
|
||||
return std::make_pair(groupEntry.get().assignedCluster, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2885,7 +2884,7 @@ struct ConfigureTenantImpl {
|
|||
return Void();
|
||||
}
|
||||
|
||||
state Optional<TenantGroupEntry> tenantGroupEntry =
|
||||
state Optional<MetaclusterTenantGroupEntry> tenantGroupEntry =
|
||||
wait(ManagementClusterMetadata::tenantMetadata().tenantGroupMap.get(tr, desiredGroup.get()));
|
||||
|
||||
// If we are creating a new tenant group, we need to have capacity on the current cluster
|
||||
|
@ -3203,20 +3202,20 @@ Future<Void> renameTenant(Reference<DB> db, TenantName oldName, TenantName newNa
|
|||
}
|
||||
|
||||
template <class Transaction>
|
||||
Future<Optional<TenantGroupEntry>> tryGetTenantGroupTransaction(Transaction tr, TenantGroupName name) {
|
||||
Future<Optional<MetaclusterTenantGroupEntry>> tryGetTenantGroupTransaction(Transaction tr, TenantGroupName name) {
|
||||
tr->setOption(FDBTransactionOptions::RAW_ACCESS);
|
||||
return ManagementClusterMetadata::tenantMetadata().tenantGroupMap.get(tr, name);
|
||||
}
|
||||
|
||||
ACTOR template <class DB>
|
||||
Future<Optional<TenantGroupEntry>> tryGetTenantGroup(Reference<DB> db, TenantGroupName name) {
|
||||
Future<Optional<MetaclusterTenantGroupEntry>> tryGetTenantGroup(Reference<DB> db, TenantGroupName name) {
|
||||
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
Optional<TenantGroupEntry> entry = wait(tryGetTenantGroupTransaction(tr, name));
|
||||
Optional<MetaclusterTenantGroupEntry> entry = wait(tryGetTenantGroupTransaction(tr, name));
|
||||
return entry;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
|
@ -3225,30 +3224,28 @@ Future<Optional<TenantGroupEntry>> tryGetTenantGroup(Reference<DB> db, TenantGro
|
|||
}
|
||||
|
||||
ACTOR template <class Transaction>
|
||||
Future<std::vector<std::pair<TenantGroupName, TenantGroupEntry>>> listTenantGroupsTransaction(Transaction tr,
|
||||
TenantGroupName begin,
|
||||
TenantGroupName end,
|
||||
int limit) {
|
||||
Future<std::vector<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>>>
|
||||
listTenantGroupsTransaction(Transaction tr, TenantGroupName begin, TenantGroupName end, int limit) {
|
||||
tr->setOption(FDBTransactionOptions::RAW_ACCESS);
|
||||
|
||||
KeyBackedRangeResult<std::pair<TenantGroupName, TenantGroupEntry>> results =
|
||||
KeyBackedRangeResult<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>> results =
|
||||
wait(ManagementClusterMetadata::tenantMetadata().tenantGroupMap.getRange(tr, begin, end, limit));
|
||||
|
||||
return results.results;
|
||||
}
|
||||
|
||||
ACTOR template <class DB>
|
||||
Future<std::vector<std::pair<TenantGroupName, TenantGroupEntry>>> listTenantGroups(Reference<DB> db,
|
||||
TenantGroupName begin,
|
||||
TenantGroupName end,
|
||||
int limit) {
|
||||
Future<std::vector<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>>> listTenantGroups(Reference<DB> db,
|
||||
TenantGroupName begin,
|
||||
TenantGroupName end,
|
||||
int limit) {
|
||||
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups =
|
||||
std::vector<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>> tenantGroups =
|
||||
wait(listTenantGroupsTransaction(tr, begin, end, limit));
|
||||
return tenantGroups;
|
||||
} catch (Error& e) {
|
||||
|
|
|
@ -121,10 +121,7 @@ struct TenantMapEntry {
|
|||
struct TenantGroupEntry {
|
||||
constexpr static FileIdentifier file_identifier = 10764222;
|
||||
|
||||
Optional<ClusterName> assignedCluster;
|
||||
|
||||
TenantGroupEntry() = default;
|
||||
TenantGroupEntry(Optional<ClusterName> assignedCluster) : assignedCluster(assignedCluster) {}
|
||||
|
||||
json_spirit::mObject toJson() const;
|
||||
|
||||
|
@ -138,10 +135,16 @@ struct TenantGroupEntry {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, assignedCluster);
|
||||
serializer(ar);
|
||||
}
|
||||
};
|
||||
|
||||
class StandardTenantTypes {
|
||||
public:
|
||||
using TenantMapEntryT = TenantMapEntry;
|
||||
using TenantGroupEntryT = TenantGroupEntry;
|
||||
};
|
||||
|
||||
struct TenantTombstoneCleanupData {
|
||||
constexpr static FileIdentifier file_identifier = 3291339;
|
||||
|
||||
|
@ -193,11 +196,12 @@ struct TenantIdCodec {
|
|||
}
|
||||
};
|
||||
|
||||
template <class TenantMapEntryImpl>
|
||||
template <class TenantTypes>
|
||||
struct TenantMetadataSpecification {
|
||||
Key subspace;
|
||||
|
||||
KeyBackedObjectMap<int64_t, TenantMapEntryImpl, decltype(IncludeVersion()), TenantIdCodec> tenantMap;
|
||||
KeyBackedObjectMap<int64_t, typename TenantTypes::TenantMapEntryT, decltype(IncludeVersion()), TenantIdCodec>
|
||||
tenantMap;
|
||||
KeyBackedMap<TenantName, int64_t> tenantNameIndex;
|
||||
KeyBackedMap<int64_t, UID> lockId;
|
||||
KeyBackedProperty<int64_t> lastTenantId;
|
||||
|
@ -205,7 +209,8 @@ struct TenantMetadataSpecification {
|
|||
KeyBackedSet<int64_t> tenantTombstones;
|
||||
KeyBackedObjectProperty<TenantTombstoneCleanupData, decltype(IncludeVersion())> tombstoneCleanupData;
|
||||
KeyBackedSet<Tuple> tenantGroupTenantIndex;
|
||||
KeyBackedObjectMap<TenantGroupName, TenantGroupEntry, decltype(IncludeVersion()), NullCodec> tenantGroupMap;
|
||||
KeyBackedObjectMap<TenantGroupName, typename TenantTypes::TenantGroupEntryT, decltype(IncludeVersion()), NullCodec>
|
||||
tenantGroupMap;
|
||||
KeyBackedMap<TenantGroupName, int64_t> storageQuota;
|
||||
KeyBackedBinaryValue<Versionstamp> lastTenantModification;
|
||||
|
||||
|
@ -222,7 +227,7 @@ struct TenantMetadataSpecification {
|
|||
};
|
||||
|
||||
struct TenantMetadata {
|
||||
static TenantMetadataSpecification<TenantMapEntry>& instance();
|
||||
static TenantMetadataSpecification<StandardTenantTypes>& instance();
|
||||
|
||||
static inline auto& subspace() { return instance().subspace; }
|
||||
static inline auto& tenantMap() { return instance().tenantMap; }
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <string>
|
||||
#include "fdbclient/ClientBooleanParams.h"
|
||||
#include "fdbserver/RestoreUtil.h"
|
||||
#include "flow/CodeProbe.h"
|
||||
#include "flow/network.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
|
@ -477,14 +478,8 @@ private:
|
|||
self->actors_.add(processWaitMetricsRequest(self, req));
|
||||
}
|
||||
when(SplitMetricsRequest req = waitNext(ssi.splitMetrics.getFuture())) {
|
||||
dprint("Handle SplitMetrics {}\n", req.keys.toString());
|
||||
SplitMetricsReply rep;
|
||||
for (auto granule : self->blobGranules_) {
|
||||
// TODO: Use granule boundary as split point. A better approach is to split by size
|
||||
if (granule.keyRange.begin > req.keys.begin && granule.keyRange.end < req.keys.end)
|
||||
rep.splits.push_back_deep(rep.splits.arena(), granule.keyRange.begin);
|
||||
}
|
||||
req.reply.send(rep);
|
||||
dprint("Handle SplitMetrics {} limit {} bytes\n", req.keys.toString(), req.limits.bytes);
|
||||
processSplitMetricsRequest(self, req);
|
||||
}
|
||||
when(GetStorageMetricsRequest req = waitNext(ssi.getStorageMetrics.getFuture())) {
|
||||
StorageMetrics metrics;
|
||||
|
@ -573,6 +568,34 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
// This API is used by DD to figure out split points for data movement.
|
||||
static void processSplitMetricsRequest(Reference<BlobMigrator> self, SplitMetricsRequest req) {
|
||||
SplitMetricsReply rep;
|
||||
int64_t bytes = 0; // number of bytes accumulated for current split
|
||||
for (auto& granule : self->blobGranules_) {
|
||||
if (!req.keys.contains(granule.keyRange)) {
|
||||
continue;
|
||||
}
|
||||
bytes += granule.sizeInBytes;
|
||||
if (bytes < req.limits.bytes) {
|
||||
continue;
|
||||
}
|
||||
// Add a split point if the key range exceeds expected minimal size in bytes
|
||||
rep.splits.push_back_deep(rep.splits.arena(), granule.keyRange.end);
|
||||
bytes = 0;
|
||||
// Limit number of splits in single response for fast RPC processing
|
||||
if (rep.splits.size() > SERVER_KNOBS->SPLIT_METRICS_MAX_ROWS) {
|
||||
CODE_PROBE(true, "Blob Migrator SplitMetrics API has more");
|
||||
TraceEvent("BlobMigratorSplitMetricsContinued", self->interf_.id())
|
||||
.detail("Range", req.keys)
|
||||
.detail("Splits", rep.splits.size());
|
||||
rep.more = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
req.reply.send(rep);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> processWaitMetricsRequest(Reference<BlobMigrator> self, WaitMetricsRequest req) {
|
||||
state WaitMetricsRequest waitMetricsRequest = req;
|
||||
// FIXME get rid of this delay. it's a temp solution to avoid starvaion scheduling of DD
|
||||
|
@ -600,7 +623,7 @@ private:
|
|||
static int64_t sizeInBytes(Reference<BlobMigrator> self, KeyRangeRef range) {
|
||||
int64_t bytes = 0;
|
||||
for (auto granule : self->blobGranules_) {
|
||||
if (range.intersects(granule.keyRange))
|
||||
if (range.contains(granule.keyRange))
|
||||
bytes += granule.sizeInBytes;
|
||||
}
|
||||
return bytes;
|
||||
|
|
|
@ -164,8 +164,7 @@ private:
|
|||
// Each tenant group in the tenant group map should be present in the cluster tenant group map
|
||||
// and have the correct cluster assigned to it.
|
||||
for (auto const& [name, entry] : data.tenantData.tenantGroupMap) {
|
||||
ASSERT(entry.assignedCluster.present());
|
||||
auto clusterItr = data.clusterTenantGroupMap.find(entry.assignedCluster.get());
|
||||
auto clusterItr = data.clusterTenantGroupMap.find(entry.assignedCluster);
|
||||
ASSERT(clusterItr->second.count(name));
|
||||
}
|
||||
|
||||
|
@ -181,8 +180,8 @@ private:
|
|||
ClusterName clusterName,
|
||||
DataClusterMetadata clusterMetadata) {
|
||||
state Reference<IDatabase> dataDb = wait(MetaclusterAPI::openDatabase(clusterMetadata.connectionString));
|
||||
state TenantConsistencyCheck<IDatabase, TenantMapEntry> tenantConsistencyCheck(dataDb,
|
||||
&TenantMetadata::instance());
|
||||
state TenantConsistencyCheck<IDatabase, StandardTenantTypes> tenantConsistencyCheck(
|
||||
dataDb, &TenantMetadata::instance());
|
||||
wait(tenantConsistencyCheck.run());
|
||||
|
||||
auto dataClusterItr = self->metaclusterData.dataClusterMetadata.find(clusterName);
|
||||
|
@ -262,7 +261,6 @@ private:
|
|||
}
|
||||
for (auto const& [name, entry] : data.tenantData.tenantGroupMap) {
|
||||
ASSERT(expectedTenantGroups.count(name));
|
||||
ASSERT(!entry.assignedCluster.present());
|
||||
expectedTenantGroups.erase(name);
|
||||
}
|
||||
|
||||
|
@ -274,7 +272,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> run(MetaclusterConsistencyCheck* self) {
|
||||
state TenantConsistencyCheck<DB, MetaclusterTenantMapEntry> managementTenantConsistencyCheck(
|
||||
state TenantConsistencyCheck<DB, MetaclusterTenantTypes> managementTenantConsistencyCheck(
|
||||
self->managementDb, &MetaclusterAPI::ManagementClusterMetadata::tenantMetadata());
|
||||
|
||||
wait(managementTenantConsistencyCheck.run() && self->metaclusterData.load() && checkManagementSystemKeys(self));
|
||||
|
|
|
@ -53,7 +53,7 @@ public:
|
|||
std::map<ClusterName, std::set<TenantGroupName>> clusterTenantGroupMap;
|
||||
|
||||
Optional<int64_t> tenantIdPrefix;
|
||||
TenantData<DB, MetaclusterTenantMapEntry> tenantData;
|
||||
TenantData<DB, MetaclusterTenantTypes> tenantData;
|
||||
|
||||
// Similar to operator==, but useful in assertions for identifying which member is different
|
||||
void assertEquals(ManagementClusterData const& other) const {
|
||||
|
@ -83,7 +83,7 @@ public:
|
|||
|
||||
struct DataClusterData {
|
||||
Optional<MetaclusterRegistrationEntry> metaclusterRegistration;
|
||||
TenantData<DB, TenantMapEntry> tenantData;
|
||||
TenantData<DB, StandardTenantTypes> tenantData;
|
||||
|
||||
// Similar to operator==, but useful in assertions for identifying which member is different
|
||||
void assertEquals(DataClusterData const& other) const {
|
||||
|
@ -114,7 +114,7 @@ private:
|
|||
state KeyBackedRangeResult<Tuple> clusterTenantTuples;
|
||||
state KeyBackedRangeResult<Tuple> clusterTenantGroupTuples;
|
||||
|
||||
self->managementMetadata.tenantData = TenantData<DB, MetaclusterTenantMapEntry>(
|
||||
self->managementMetadata.tenantData = TenantData<DB, MetaclusterTenantTypes>(
|
||||
self->managementDb, &MetaclusterAPI::ManagementClusterMetadata::tenantMetadata());
|
||||
|
||||
loop {
|
||||
|
@ -193,7 +193,7 @@ private:
|
|||
state Reference<ITransaction> tr = dataDb->createTransaction();
|
||||
|
||||
clusterItr.first->second.tenantData =
|
||||
TenantData<IDatabase, TenantMapEntry>(dataDb, &TenantMetadata::instance());
|
||||
TenantData<IDatabase, StandardTenantTypes>(dataDb, &TenantMetadata::instance());
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
|
|
|
@ -39,10 +39,10 @@
|
|||
#include "fdbserver/workloads/TenantData.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
template <class DB, class TenantMapEntryT>
|
||||
template <class DB, class TenantTypes>
|
||||
class TenantConsistencyCheck {
|
||||
private:
|
||||
TenantData<DB, TenantMapEntryT> tenantData;
|
||||
TenantData<DB, TenantTypes> tenantData;
|
||||
|
||||
// Note: this check can only be run on metaclusters with a reasonable number of tenants, as should be
|
||||
// the case with the current metacluster simulation workloads
|
||||
|
@ -80,7 +80,7 @@ private:
|
|||
}
|
||||
|
||||
// Specialization for TenantMapEntry, used on data and standalone clusters
|
||||
void validateTenantMetadata(TenantData<DB, TenantMapEntry> tenantData) {
|
||||
void validateTenantMetadata(TenantData<DB, StandardTenantTypes> tenantData) {
|
||||
ASSERT(tenantData.clusterType == ClusterType::METACLUSTER_DATA ||
|
||||
tenantData.clusterType == ClusterType::STANDALONE);
|
||||
ASSERT_LE(tenantData.tenantMap.size(), CLIENT_KNOBS->MAX_TENANTS_PER_CLUSTER);
|
||||
|
@ -90,7 +90,7 @@ private:
|
|||
}
|
||||
|
||||
// Specialization for MetaclusterTenantMapEntry, used on management clusters
|
||||
void validateTenantMetadata(TenantData<DB, MetaclusterTenantMapEntry> tenantData) {
|
||||
void validateTenantMetadata(TenantData<DB, MetaclusterTenantTypes> tenantData) {
|
||||
ASSERT(tenantData.clusterType == ClusterType::METACLUSTER_MANAGEMENT);
|
||||
ASSERT_LE(tenantData.tenantMap.size(), metaclusterMaxTenants);
|
||||
|
||||
|
@ -99,7 +99,7 @@ private:
|
|||
for (auto [tenantId, tenantMapEntry] : tenantData.tenantMap) {
|
||||
if (tenantMapEntry.tenantGroup.present()) {
|
||||
auto tenantGroupMapItr = tenantData.tenantGroupMap.find(tenantMapEntry.tenantGroup.get());
|
||||
ASSERT(tenantMapEntry.assignedCluster == tenantGroupMapItr->second.assignedCluster.get());
|
||||
ASSERT(tenantMapEntry.assignedCluster == tenantGroupMapItr->second.assignedCluster);
|
||||
}
|
||||
if (tenantMapEntry.renameDestination.present()) {
|
||||
ASSERT(tenantMapEntry.tenantState == MetaclusterAPI::TenantState::RENAMING ||
|
||||
|
@ -148,7 +148,7 @@ private:
|
|||
|
||||
public:
|
||||
TenantConsistencyCheck() {}
|
||||
TenantConsistencyCheck(Reference<DB> db, TenantMetadataSpecification<TenantMapEntryT>* tenantMetadata)
|
||||
TenantConsistencyCheck(Reference<DB> db, TenantMetadataSpecification<TenantTypes>* tenantMetadata)
|
||||
: tenantData(db, tenantMetadata) {}
|
||||
|
||||
Future<Void> run() { return run(this); }
|
||||
|
|
|
@ -38,22 +38,22 @@
|
|||
#include "fdbclient/TenantManagement.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
template <class DB, class TenantMapEntryT>
|
||||
template <class DB, class TenantTypes>
|
||||
class TenantData {
|
||||
public:
|
||||
Reference<DB> db;
|
||||
TenantMetadataSpecification<TenantMapEntryT>* tenantMetadata;
|
||||
TenantMetadataSpecification<TenantTypes>* tenantMetadata;
|
||||
|
||||
Optional<MetaclusterRegistrationEntry> metaclusterRegistration;
|
||||
ClusterType clusterType;
|
||||
|
||||
std::map<int64_t, TenantMapEntryT> tenantMap;
|
||||
std::map<int64_t, typename TenantTypes::TenantMapEntryT> tenantMap;
|
||||
std::map<TenantName, int64_t> tenantNameIndex;
|
||||
int64_t lastTenantId;
|
||||
int64_t tenantCount;
|
||||
std::set<int64_t> tenantTombstones;
|
||||
Optional<TenantTombstoneCleanupData> tombstoneCleanupData;
|
||||
std::map<TenantGroupName, TenantGroupEntry> tenantGroupMap;
|
||||
std::map<TenantGroupName, typename TenantTypes::TenantGroupEntryT> tenantGroupMap;
|
||||
std::map<TenantGroupName, std::set<int64_t>> tenantGroupIndex;
|
||||
std::map<TenantGroupName, int64_t> storageQuotas;
|
||||
|
||||
|
@ -64,10 +64,10 @@ private:
|
|||
|
||||
ACTOR template <class Transaction>
|
||||
static Future<Void> loadTenantMetadata(TenantData* self, Transaction tr) {
|
||||
state KeyBackedRangeResult<std::pair<int64_t, TenantMapEntryT>> tenantList;
|
||||
state KeyBackedRangeResult<std::pair<int64_t, typename TenantTypes::TenantMapEntryT>> tenantList;
|
||||
state KeyBackedRangeResult<std::pair<TenantName, int64_t>> tenantNameIndexList;
|
||||
state KeyBackedRangeResult<int64_t> tenantTombstoneList;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroupList;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, typename TenantTypes::TenantGroupEntryT>> tenantGroupList;
|
||||
state KeyBackedRangeResult<Tuple> tenantGroupTenantTuples;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, int64_t>> storageQuotaList;
|
||||
|
||||
|
@ -90,7 +90,8 @@ private:
|
|||
store(storageQuotaList, self->tenantMetadata->storageQuota.getRange(tr, {}, {}, metaclusterMaxTenants)));
|
||||
|
||||
ASSERT(!tenantList.more);
|
||||
self->tenantMap = std::map<int64_t, TenantMapEntryT>(tenantList.results.begin(), tenantList.results.end());
|
||||
self->tenantMap = std::map<int64_t, typename TenantTypes::TenantMapEntryT>(tenantList.results.begin(),
|
||||
tenantList.results.end());
|
||||
|
||||
ASSERT(!tenantNameIndexList.more);
|
||||
self->tenantNameIndex =
|
||||
|
@ -101,8 +102,8 @@ private:
|
|||
std::set<int64_t>(tenantTombstoneList.results.begin(), tenantTombstoneList.results.end());
|
||||
|
||||
ASSERT(!tenantGroupList.more);
|
||||
self->tenantGroupMap =
|
||||
std::map<TenantGroupName, TenantGroupEntry>(tenantGroupList.results.begin(), tenantGroupList.results.end());
|
||||
self->tenantGroupMap = std::map<TenantGroupName, typename TenantTypes::TenantGroupEntryT>(
|
||||
tenantGroupList.results.begin(), tenantGroupList.results.end());
|
||||
|
||||
ASSERT(!storageQuotaList.more);
|
||||
self->storageQuotas =
|
||||
|
@ -123,7 +124,7 @@ private:
|
|||
|
||||
public:
|
||||
TenantData() {}
|
||||
TenantData(Reference<DB> db, TenantMetadataSpecification<TenantMapEntryT>* tenantMetadata)
|
||||
TenantData(Reference<DB> db, TenantMetadataSpecification<TenantTypes>* tenantMetadata)
|
||||
: db(db), tenantMetadata(tenantMetadata) {}
|
||||
|
||||
Future<Void> load() {
|
||||
|
|
|
@ -45,12 +45,12 @@ FDB_DEFINE_BOOLEAN_PARAM(AllowPartialMetaclusterOperations);
|
|||
struct MetaclusterManagementWorkload : TestWorkload {
|
||||
static constexpr auto NAME = "MetaclusterManagement";
|
||||
|
||||
struct TenantData : ReferenceCounted<TenantData> {
|
||||
struct TenantTestData : ReferenceCounted<TenantTestData> {
|
||||
ClusterName cluster;
|
||||
Optional<TenantGroupName> tenantGroup;
|
||||
|
||||
TenantData() {}
|
||||
TenantData(ClusterName cluster, Optional<TenantGroupName> tenantGroup)
|
||||
TenantTestData() {}
|
||||
TenantTestData(ClusterName cluster, Optional<TenantGroupName> tenantGroup)
|
||||
: cluster(cluster), tenantGroup(tenantGroup) {}
|
||||
};
|
||||
|
||||
|
@ -68,7 +68,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
bool detached = false;
|
||||
int tenantGroupCapacity = 0;
|
||||
|
||||
std::map<TenantName, Reference<TenantData>> tenants;
|
||||
std::map<TenantName, Reference<TenantTestData>> tenants;
|
||||
std::map<TenantGroupName, Reference<TenantGroupData>> tenantGroups;
|
||||
std::set<TenantName> ungroupedTenants;
|
||||
|
||||
|
@ -83,7 +83,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
std::vector<ClusterName> dataDbIndex;
|
||||
|
||||
int64_t totalTenantGroupCapacity = 0;
|
||||
std::map<TenantName, Reference<TenantData>> createdTenants;
|
||||
std::map<TenantName, Reference<TenantTestData>> createdTenants;
|
||||
|
||||
int maxTenants;
|
||||
int maxTenantGroups;
|
||||
|
@ -709,7 +709,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
ASSERT(entry.tenantGroup == tenantGroup);
|
||||
ASSERT(TenantAPI::getTenantIdPrefix(entry.id) == self->tenantIdPrefix);
|
||||
|
||||
Reference<TenantData> tenantData = makeReference<TenantData>(entry.assignedCluster, tenantGroup);
|
||||
Reference<TenantTestData> tenantData = makeReference<TenantTestData>(entry.assignedCluster, tenantGroup);
|
||||
self->createdTenants[tenant] = tenantData;
|
||||
|
||||
auto assignedCluster = self->dataDbs.find(entry.assignedCluster);
|
||||
|
|
|
@ -53,7 +53,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
DataClusterData(Database db) : db(db) {}
|
||||
};
|
||||
|
||||
struct TenantData {
|
||||
struct RestoreTenantData {
|
||||
enum class AccessTime { NONE, BEFORE_BACKUP, DURING_BACKUP, AFTER_BACKUP };
|
||||
|
||||
TenantName name;
|
||||
|
@ -63,8 +63,11 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
AccessTime renameTime = AccessTime::NONE;
|
||||
AccessTime configureTime = AccessTime::NONE;
|
||||
|
||||
TenantData() {}
|
||||
TenantData(TenantName name, ClusterName cluster, Optional<TenantGroupName> tenantGroup, AccessTime createTime)
|
||||
RestoreTenantData() {}
|
||||
RestoreTenantData(TenantName name,
|
||||
ClusterName cluster,
|
||||
Optional<TenantGroupName> tenantGroup,
|
||||
AccessTime createTime)
|
||||
: name(name), cluster(cluster), tenantGroup(tenantGroup), createTime(createTime) {}
|
||||
};
|
||||
|
||||
|
@ -77,7 +80,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
std::map<ClusterName, DataClusterData> dataDbs;
|
||||
std::vector<ClusterName> dataDbIndex;
|
||||
|
||||
std::map<int64_t, TenantData> createdTenants;
|
||||
std::map<int64_t, RestoreTenantData> createdTenants;
|
||||
std::map<TenantName, int64_t> tenantNameIndex;
|
||||
std::map<TenantGroupName, TenantGroupData> tenantGroups;
|
||||
|
||||
|
@ -202,7 +205,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
TraceEvent(SevDebug, "MetaclusterRestoreWorkloadCreateTenants").detail("NumTenants", self->initialTenants);
|
||||
|
||||
while (self->createdTenants.size() < self->initialTenants) {
|
||||
wait(createTenant(self, TenantData::AccessTime::BEFORE_BACKUP));
|
||||
wait(createTenant(self, RestoreTenantData::AccessTime::BEFORE_BACKUP));
|
||||
}
|
||||
|
||||
TraceEvent(SevDebug, "MetaclusterRestoreWorkloadCreateTenantsComplete");
|
||||
|
@ -380,10 +383,10 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
return waitForAll(deleteFutures);
|
||||
}
|
||||
|
||||
ACTOR template <class Transaction, class TenantMapEntryImpl>
|
||||
ACTOR template <class Transaction, class TenantTypes>
|
||||
static Future<std::unordered_set<int64_t>> getTenantsInGroup(
|
||||
Transaction tr,
|
||||
TenantMetadataSpecification<TenantMapEntryImpl> tenantMetadata,
|
||||
TenantMetadataSpecification<TenantTypes> tenantMetadata,
|
||||
TenantGroupName tenantGroup) {
|
||||
KeyBackedRangeResult<Tuple> groupTenants =
|
||||
wait(tenantMetadata.tenantGroupTenantIndex.getRange(tr,
|
||||
|
@ -471,7 +474,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
ACTOR static Future<std::pair<TenantCollisions, GroupCollisions>> getCollisions(MetaclusterRestoreWorkload* self,
|
||||
Database db) {
|
||||
state KeyBackedRangeResult<std::pair<TenantName, int64_t>> managementTenantList;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, TenantGroupEntry>> managementGroupList;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>> managementGroupList;
|
||||
state KeyBackedRangeResult<std::pair<TenantName, int64_t>> dataClusterTenants;
|
||||
state KeyBackedRangeResult<std::pair<TenantGroupName, TenantGroupEntry>> dataClusterGroups;
|
||||
|
||||
|
@ -506,8 +509,8 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
|
||||
std::unordered_map<TenantName, int64_t> managementTenants(managementTenantList.results.begin(),
|
||||
managementTenantList.results.end());
|
||||
std::unordered_map<TenantGroupName, TenantGroupEntry> managementGroups(managementGroupList.results.begin(),
|
||||
managementGroupList.results.end());
|
||||
std::unordered_map<TenantGroupName, MetaclusterTenantGroupEntry> managementGroups(
|
||||
managementGroupList.results.begin(), managementGroupList.results.end());
|
||||
|
||||
ASSERT(managementTenants.size() <= CLIENT_KNOBS->MAX_TENANTS_PER_CLUSTER);
|
||||
ASSERT(managementGroups.size() <= CLIENT_KNOBS->MAX_TENANTS_PER_CLUSTER);
|
||||
|
@ -745,7 +748,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> createTenant(MetaclusterRestoreWorkload* self, TenantData::AccessTime createTime) {
|
||||
ACTOR static Future<Void> createTenant(MetaclusterRestoreWorkload* self, RestoreTenantData::AccessTime createTime) {
|
||||
state TenantName tenantName;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
tenantName = self->chooseTenantName();
|
||||
|
@ -771,7 +774,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
.detail("TenantId", createdEntry.id)
|
||||
.detail("AccessTime", createTime);
|
||||
self->createdTenants[createdEntry.id] =
|
||||
TenantData(tenantName, createdEntry.assignedCluster, createdEntry.tenantGroup, createTime);
|
||||
RestoreTenantData(tenantName, createdEntry.assignedCluster, createdEntry.tenantGroup, createTime);
|
||||
self->tenantNameIndex[tenantName] = createdEntry.id;
|
||||
auto& dataDb = self->dataDbs[createdEntry.assignedCluster];
|
||||
dataDb.tenants.insert(createdEntry.id);
|
||||
|
@ -792,7 +795,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteTenant(MetaclusterRestoreWorkload* self, TenantData::AccessTime accessTime) {
|
||||
ACTOR static Future<Void> deleteTenant(MetaclusterRestoreWorkload* self, RestoreTenantData::AccessTime accessTime) {
|
||||
state TenantName tenantName;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
tenantName = self->chooseTenantName();
|
||||
|
@ -832,7 +835,8 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> configureTenant(MetaclusterRestoreWorkload* self, TenantData::AccessTime accessTime) {
|
||||
ACTOR static Future<Void> configureTenant(MetaclusterRestoreWorkload* self,
|
||||
RestoreTenantData::AccessTime accessTime) {
|
||||
state TenantName tenantName;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
tenantName = self->chooseTenantName();
|
||||
|
@ -894,7 +898,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> renameTenant(MetaclusterRestoreWorkload* self, TenantData::AccessTime accessTime) {
|
||||
ACTOR static Future<Void> renameTenant(MetaclusterRestoreWorkload* self, RestoreTenantData::AccessTime accessTime) {
|
||||
state TenantName oldTenantName;
|
||||
state TenantName newTenantName;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
|
@ -923,7 +927,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
.detail("AccessTime", accessTime);
|
||||
wait(MetaclusterAPI::renameTenant(self->managementDb, oldTenantName, newTenantName));
|
||||
|
||||
TenantData& tenantData = self->createdTenants[tenantId];
|
||||
RestoreTenantData& tenantData = self->createdTenants[tenantId];
|
||||
tenantData.name = newTenantName;
|
||||
tenantData.renameTime = accessTime;
|
||||
self->tenantNameIndex[newTenantName] = tenantId;
|
||||
|
@ -935,8 +939,9 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
ACTOR static Future<Void> runOperations(MetaclusterRestoreWorkload* self) {
|
||||
while (now() < self->endTime) {
|
||||
state int operation = deterministicRandom()->randomInt(0, 4);
|
||||
state TenantData::AccessTime accessTime =
|
||||
self->backupComplete ? TenantData::AccessTime::AFTER_BACKUP : TenantData::AccessTime::DURING_BACKUP;
|
||||
state RestoreTenantData::AccessTime accessTime = self->backupComplete
|
||||
? RestoreTenantData::AccessTime::AFTER_BACKUP
|
||||
: RestoreTenantData::AccessTime::DURING_BACKUP;
|
||||
if (operation == 0) {
|
||||
wait(createTenant(self, accessTime));
|
||||
} else if (operation == 1) {
|
||||
|
@ -1082,21 +1087,21 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
int expectedTenantCount = 0;
|
||||
std::map<int64_t, TenantMapEntry> tenantMap(tenants.results.begin(), tenants.results.end());
|
||||
for (auto tenantId : clusterData.tenants) {
|
||||
TenantData tenantData = self->createdTenants[tenantId];
|
||||
RestoreTenantData tenantData = self->createdTenants[tenantId];
|
||||
auto tenantItr = tenantMap.find(tenantId);
|
||||
if (tenantData.createTime == TenantData::AccessTime::BEFORE_BACKUP) {
|
||||
if (tenantData.createTime == RestoreTenantData::AccessTime::BEFORE_BACKUP) {
|
||||
++expectedTenantCount;
|
||||
ASSERT(tenantItr != tenantMap.end());
|
||||
ASSERT(tenantData.cluster == clusterName);
|
||||
if (!self->recoverManagementCluster ||
|
||||
tenantData.configureTime <= TenantData::AccessTime::BEFORE_BACKUP) {
|
||||
tenantData.configureTime <= RestoreTenantData::AccessTime::BEFORE_BACKUP) {
|
||||
ASSERT(tenantItr->second.tenantGroup == tenantData.tenantGroup);
|
||||
}
|
||||
if (!self->recoverManagementCluster ||
|
||||
tenantData.renameTime <= TenantData::AccessTime::BEFORE_BACKUP) {
|
||||
tenantData.renameTime <= RestoreTenantData::AccessTime::BEFORE_BACKUP) {
|
||||
ASSERT(tenantItr->second.tenantName == tenantData.name);
|
||||
}
|
||||
} else if (tenantData.createTime == TenantData::AccessTime::AFTER_BACKUP) {
|
||||
} else if (tenantData.createTime == RestoreTenantData::AccessTime::AFTER_BACKUP) {
|
||||
ASSERT(tenantItr == tenantMap.end());
|
||||
} else if (tenantItr != tenantMap.end()) {
|
||||
++expectedTenantCount;
|
||||
|
@ -1156,14 +1161,14 @@ struct MetaclusterRestoreWorkload : TestWorkload {
|
|||
if (tenantItr == tenantMap.end()) {
|
||||
// A tenant that we expected to have been created can only be missing from the management cluster if we
|
||||
// lost data in the process of recovering both the management and some data clusters
|
||||
ASSERT_NE(tenantData.createTime, TenantData::AccessTime::BEFORE_BACKUP);
|
||||
ASSERT_NE(tenantData.createTime, RestoreTenantData::AccessTime::BEFORE_BACKUP);
|
||||
ASSERT(self->dataDbs[tenantData.cluster].restored && self->recoverManagementCluster);
|
||||
} else {
|
||||
if (tenantData.createTime != TenantData::AccessTime::BEFORE_BACKUP &&
|
||||
if (tenantData.createTime != RestoreTenantData::AccessTime::BEFORE_BACKUP &&
|
||||
self->dataDbs[tenantData.cluster].restored) {
|
||||
ASSERT(tenantItr->second.tenantState == MetaclusterAPI::TenantState::ERROR ||
|
||||
(tenantItr->second.tenantState == MetaclusterAPI::TenantState::READY &&
|
||||
tenantData.createTime == TenantData::AccessTime::DURING_BACKUP));
|
||||
tenantData.createTime == RestoreTenantData::AccessTime::DURING_BACKUP));
|
||||
if (tenantItr->second.tenantState == MetaclusterAPI::TenantState::ERROR) {
|
||||
ASSERT(self->dataDbs[tenantData.cluster].restoreHasMessages);
|
||||
}
|
||||
|
|
|
@ -412,7 +412,7 @@ struct TenantManagementConcurrencyWorkload : TestWorkload {
|
|||
self->mvDb, AllowPartialMetaclusterOperations::True);
|
||||
wait(metaclusterConsistencyCheck.run());
|
||||
} else {
|
||||
state TenantConsistencyCheck<DatabaseContext, TenantMapEntry> tenantConsistencyCheck(
|
||||
state TenantConsistencyCheck<DatabaseContext, StandardTenantTypes> tenantConsistencyCheck(
|
||||
self->dataDb.getReference(), &TenantMetadata::instance());
|
||||
wait(tenantConsistencyCheck.run());
|
||||
}
|
||||
|
|
|
@ -50,15 +50,15 @@
|
|||
struct TenantManagementWorkload : TestWorkload {
|
||||
static constexpr auto NAME = "TenantManagement";
|
||||
|
||||
struct TenantData {
|
||||
struct TenantTestData {
|
||||
Reference<Tenant> tenant;
|
||||
Optional<TenantGroupName> tenantGroup;
|
||||
bool empty;
|
||||
|
||||
TenantData() : empty(true) {}
|
||||
TenantData(int64_t id, Optional<TenantGroupName> tenantGroup, bool empty)
|
||||
TenantTestData() : empty(true) {}
|
||||
TenantTestData(int64_t id, Optional<TenantGroupName> tenantGroup, bool empty)
|
||||
: tenant(makeReference<Tenant>(id)), tenantGroup(tenantGroup), empty(empty) {}
|
||||
TenantData(int64_t id, Optional<TenantName> tName, Optional<TenantGroupName> tenantGroup, bool empty)
|
||||
TenantTestData(int64_t id, Optional<TenantName> tName, Optional<TenantGroupName> tenantGroup, bool empty)
|
||||
: tenant(makeReference<Tenant>(id, tName)), tenantGroup(tenantGroup), empty(empty) {}
|
||||
};
|
||||
|
||||
|
@ -66,7 +66,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
int64_t tenantCount = 0;
|
||||
};
|
||||
|
||||
std::map<TenantName, TenantData> createdTenants;
|
||||
std::map<TenantName, TenantTestData> createdTenants;
|
||||
std::map<TenantGroupName, TenantGroupData> createdTenantGroups;
|
||||
// Contains references to ALL tenants that were created by this client
|
||||
// Possible to have been deleted, but will be tracked historically here
|
||||
|
@ -430,7 +430,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
|
||||
// Update our local tenant state to include the newly created one
|
||||
self->maxId = entry.get().id;
|
||||
TenantData tData = TenantData(entry.get().id, itrName, tGroup, true);
|
||||
TenantTestData tData = TenantTestData(entry.get().id, itrName, tGroup, true);
|
||||
self->createdTenants[itrName] = tData;
|
||||
self->allTestTenants.push_back(tData.tenant);
|
||||
return Void();
|
||||
|
@ -1039,7 +1039,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
// Performs some validation on a tenant's contents
|
||||
ACTOR static Future<Void> checkTenantContents(TenantManagementWorkload* self,
|
||||
TenantName tenantName,
|
||||
TenantData tenantData) {
|
||||
TenantTestData tenantData) {
|
||||
state Transaction tr(self->dataDb, self->createdTenants[tenantName].tenant);
|
||||
loop {
|
||||
try {
|
||||
|
@ -1147,7 +1147,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
auto itr = self->createdTenants.find(tenant);
|
||||
state bool alreadyExists = itr != self->createdTenants.end() &&
|
||||
!(operationType == OperationType::METACLUSTER && !self->useMetacluster);
|
||||
state TenantData tenantData = alreadyExists ? itr->second : TenantData();
|
||||
state TenantTestData tenantData = alreadyExists ? itr->second : TenantTestData();
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
@ -1314,7 +1314,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
wait(TenantAPI::tryGetTenant(self->dataDb.getReference(), newTenantName));
|
||||
ASSERT(!oldTenantEntry.present());
|
||||
ASSERT(newTenantEntry.present());
|
||||
TenantData tData = self->createdTenants[oldTenantName];
|
||||
TenantTestData tData = self->createdTenants[oldTenantName];
|
||||
tData.tenant->name = newTenantName;
|
||||
self->createdTenants[newTenantName] = tData;
|
||||
self->createdTenants.erase(oldTenantName);
|
||||
|
@ -1645,20 +1645,20 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
|
||||
// Gets the metadata for a tenant group using the specified operation type
|
||||
ACTOR static Future<Optional<TenantGroupEntry>> getTenantGroupImpl(Reference<ReadYourWritesTransaction> tr,
|
||||
TenantGroupName tenant,
|
||||
TenantGroupName tenantGroupName,
|
||||
OperationType operationType,
|
||||
TenantManagementWorkload* self) {
|
||||
state Optional<TenantGroupEntry> entry;
|
||||
if (operationType == OperationType::MANAGEMENT_DATABASE) {
|
||||
wait(store(entry, TenantAPI::tryGetTenantGroup(self->dataDb.getReference(), tenant)));
|
||||
wait(store(entry, TenantAPI::tryGetTenantGroup(self->dataDb.getReference(), tenantGroupName)));
|
||||
} else if (operationType == OperationType::MANAGEMENT_TRANSACTION ||
|
||||
operationType == OperationType::SPECIAL_KEYS) {
|
||||
// There is no special-keys interface for reading tenant groups currently, so read them
|
||||
// using the TenantAPI.
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
wait(store(entry, TenantAPI::tryGetTenantGroupTransaction(tr, tenant)));
|
||||
wait(store(entry, TenantAPI::tryGetTenantGroupTransaction(tr, tenantGroupName)));
|
||||
} else {
|
||||
wait(store(entry, MetaclusterAPI::tryGetTenantGroup(self->mvDb, tenant)));
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
return entry;
|
||||
|
@ -1677,10 +1677,14 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
loop {
|
||||
try {
|
||||
// Get the tenant group metadata and check that it matches our local state
|
||||
state Optional<TenantGroupEntry> entry = wait(getTenantGroupImpl(tr, tenantGroup, operationType, self));
|
||||
ASSERT(alreadyExists == entry.present());
|
||||
if (entry.present()) {
|
||||
ASSERT(entry.get().assignedCluster.present() == (operationType == OperationType::METACLUSTER));
|
||||
if (operationType == OperationType::METACLUSTER) {
|
||||
state Optional<MetaclusterTenantGroupEntry> mEntry =
|
||||
wait(MetaclusterAPI::tryGetTenantGroup(self->mvDb, tenantGroup));
|
||||
ASSERT(alreadyExists == mEntry.present());
|
||||
} else {
|
||||
state Optional<TenantGroupEntry> entry =
|
||||
wait(getTenantGroupImpl(tr, tenantGroup, operationType, self));
|
||||
ASSERT(alreadyExists == entry.present());
|
||||
}
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
|
@ -1727,13 +1731,33 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
wait(store(tenantGroups,
|
||||
TenantAPI::listTenantGroupsTransaction(tr, beginTenantGroup, endTenantGroup, limit)));
|
||||
} else {
|
||||
wait(store(tenantGroups,
|
||||
MetaclusterAPI::listTenantGroups(self->mvDb, beginTenantGroup, endTenantGroup, limit)));
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
return tenantGroups;
|
||||
}
|
||||
|
||||
template <class TenantMapEntryImpl>
|
||||
static void verifyTenantList(TenantManagementWorkload* self,
|
||||
std::vector<std::pair<TenantGroupName, TenantMapEntryImpl>> tenantGroups,
|
||||
TenantGroupName beginTenantGroup,
|
||||
TenantGroupName endTenantGroup,
|
||||
int limit) {
|
||||
ASSERT(tenantGroups.size() <= limit);
|
||||
|
||||
// Compare the resulting tenant group list to the list we expected to get
|
||||
auto localItr = self->createdTenantGroups.lower_bound(beginTenantGroup);
|
||||
auto tenantMapItr = tenantGroups.begin();
|
||||
for (; tenantMapItr != tenantGroups.end(); ++tenantMapItr, ++localItr) {
|
||||
ASSERT(localItr != self->createdTenantGroups.end());
|
||||
ASSERT(localItr->first == tenantMapItr->first);
|
||||
}
|
||||
|
||||
// Make sure the list terminated at the right spot
|
||||
ASSERT(tenantGroups.size() == limit || localItr == self->createdTenantGroups.end() ||
|
||||
localItr->first >= endTenantGroup);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> listTenantGroups(TenantManagementWorkload* self) {
|
||||
state TenantGroupName beginTenantGroup = self->chooseTenantGroup(false, false).get();
|
||||
state TenantGroupName endTenantGroup = self->chooseTenantGroup(false, false).get();
|
||||
|
@ -1749,29 +1773,21 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
loop {
|
||||
try {
|
||||
// Attempt to read the chosen list of tenant groups
|
||||
state std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups =
|
||||
wait(listTenantGroupsImpl(tr, beginTenantGroup, endTenantGroup, limit, operationType, self));
|
||||
|
||||
// Attempting to read the list of tenant groups using the metacluster API in a non-metacluster should
|
||||
// return nothing in this test
|
||||
if (operationType == OperationType::METACLUSTER && !self->useMetacluster) {
|
||||
ASSERT(tenantGroups.size() == 0);
|
||||
return Void();
|
||||
if (operationType == OperationType::METACLUSTER) {
|
||||
state std::vector<std::pair<TenantGroupName, MetaclusterTenantGroupEntry>> mTenantGroups =
|
||||
wait(MetaclusterAPI::listTenantGroups(self->mvDb, beginTenantGroup, endTenantGroup, limit));
|
||||
// Attempting to read the list of tenant groups using the metacluster API in a non-metacluster
|
||||
// should return nothing in this test
|
||||
if (!self->useMetacluster) {
|
||||
ASSERT(mTenantGroups.size() == 0);
|
||||
return Void();
|
||||
}
|
||||
verifyTenantList(self, mTenantGroups, beginTenantGroup, endTenantGroup, limit);
|
||||
} else {
|
||||
state std::vector<std::pair<TenantGroupName, TenantGroupEntry>> tenantGroups =
|
||||
wait(listTenantGroupsImpl(tr, beginTenantGroup, endTenantGroup, limit, operationType, self));
|
||||
verifyTenantList(self, tenantGroups, beginTenantGroup, endTenantGroup, limit);
|
||||
}
|
||||
|
||||
ASSERT(tenantGroups.size() <= limit);
|
||||
|
||||
// Compare the resulting tenant group list to the list we expected to get
|
||||
auto localItr = self->createdTenantGroups.lower_bound(beginTenantGroup);
|
||||
auto tenantMapItr = tenantGroups.begin();
|
||||
for (; tenantMapItr != tenantGroups.end(); ++tenantMapItr, ++localItr) {
|
||||
ASSERT(localItr != self->createdTenantGroups.end());
|
||||
ASSERT(localItr->first == tenantMapItr->first);
|
||||
}
|
||||
|
||||
// Make sure the list terminated at the right spot
|
||||
ASSERT(tenantGroups.size() == limit || localItr == self->createdTenantGroups.end() ||
|
||||
localItr->first >= endTenantGroup);
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
state bool retry = false;
|
||||
|
@ -1809,7 +1825,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->dataDb, tenant);
|
||||
state TenantName tName = tenant->name.get();
|
||||
state bool tenantPresent = false;
|
||||
state TenantData tData = TenantData();
|
||||
state TenantTestData tData = TenantTestData();
|
||||
auto itr = self->createdTenants.find(tName);
|
||||
if (itr != self->createdTenants.end() && itr->second.tenant->id() == tenant->id()) {
|
||||
tenantPresent = true;
|
||||
|
@ -1890,7 +1906,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
|
||||
// Verify that the set of tenants in the database matches our local state
|
||||
ACTOR static Future<Void> compareTenants(TenantManagementWorkload* self) {
|
||||
state std::map<TenantName, TenantData>::iterator localItr = self->createdTenants.begin();
|
||||
state std::map<TenantName, TenantTestData>::iterator localItr = self->createdTenants.begin();
|
||||
state std::vector<Future<Void>> checkTenants;
|
||||
state TenantName beginTenant = ""_sr.withPrefix(self->localTenantNamePrefix);
|
||||
state TenantName endTenant = "\xff\xff"_sr.withPrefix(self->localTenantNamePrefix);
|
||||
|
@ -1973,7 +1989,6 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
while (dataItr != dataClusterTenantGroups.results.end()) {
|
||||
ASSERT(localItr != self->createdTenantGroups.end());
|
||||
ASSERT(dataItr->first == localItr->first);
|
||||
ASSERT(!dataItr->second.assignedCluster.present());
|
||||
lastTenantGroup = dataItr->first;
|
||||
|
||||
checkTenantGroups.push_back(checkTenantGroupTenantCount(
|
||||
|
@ -2039,7 +2054,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
wait(metaclusterConsistencyCheck.run());
|
||||
wait(checkTombstoneCleanup(self));
|
||||
} else {
|
||||
state TenantConsistencyCheck<DatabaseContext, TenantMapEntry> tenantConsistencyCheck(
|
||||
state TenantConsistencyCheck<DatabaseContext, StandardTenantTypes> tenantConsistencyCheck(
|
||||
self->dataDb.getReference(), &TenantMetadata::instance());
|
||||
wait(tenantConsistencyCheck.run());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue