Add tenant group support to the tenant management workload and test configure and rename tenant operations. Fix a few error types picked up by the test.
This commit is contained in:
parent
b7fc2f1e5c
commit
093db25d11
|
@ -1609,7 +1609,7 @@ struct ConfigureTenantImpl {
|
|||
// Removing a tenant group is only possible if we have capacity for more groups on the current cluster
|
||||
else if (!desiredGroup.present()) {
|
||||
if (!self->ctx.dataClusterMetadata.get().entry.hasCapacity()) {
|
||||
throw metacluster_no_capacity();
|
||||
throw cluster_no_capacity();
|
||||
}
|
||||
|
||||
wait(managementClusterRemoveTenantFromGroup(
|
||||
|
@ -1625,7 +1625,7 @@ struct ConfigureTenantImpl {
|
|||
// If we are creating a new tenant group, we need to have capacity on the current cluster
|
||||
if (!tenantGroupEntry.present()) {
|
||||
if (!self->ctx.dataClusterMetadata.get().entry.hasCapacity()) {
|
||||
throw metacluster_no_capacity();
|
||||
throw cluster_no_capacity();
|
||||
}
|
||||
wait(managementClusterRemoveTenantFromGroup(
|
||||
tr, self->tenantName, tenantEntry, &self->ctx.dataClusterMetadata.get()));
|
||||
|
@ -1645,7 +1645,14 @@ struct ConfigureTenantImpl {
|
|||
|
||||
// We don't currently support movement between groups on different clusters
|
||||
else {
|
||||
throw cluster_no_capacity();
|
||||
TraceEvent("TenantGroupChangeToDifferentCluster")
|
||||
.detail("Tenant", self->tenantName)
|
||||
.detail("OriginalGroup", tenantEntry.tenantGroup)
|
||||
.detail("DesiredGroup", desiredGroup)
|
||||
.detail("TenantAssignedCluster", tenantEntry.assignedCluster)
|
||||
.detail("DesiredGroupAssignedCluster", tenantGroupEntry.get().assignedCluster);
|
||||
|
||||
throw invalid_tenant_configuration();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
int tenantGroupCapacity = 0;
|
||||
|
||||
std::set<TenantName> tenants;
|
||||
std::set<TenantGroupName> tenantGroups;
|
||||
std::set<TenantName> ungroupedTenants;
|
||||
|
||||
DataClusterData() {}
|
||||
DataClusterData(Database db) : db(db) {}
|
||||
|
@ -57,13 +59,25 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
|
||||
struct TenantData {
|
||||
ClusterName cluster;
|
||||
Optional<TenantGroupName> tenantGroup;
|
||||
|
||||
TenantData() {}
|
||||
TenantData(ClusterName cluster) : cluster(cluster) {}
|
||||
TenantData(ClusterName cluster, Optional<TenantGroupName> tenantGroup)
|
||||
: cluster(cluster), tenantGroup(tenantGroup) {}
|
||||
};
|
||||
|
||||
struct TenantGroupData {
|
||||
ClusterName cluster;
|
||||
std::set<TenantName> tenants;
|
||||
|
||||
TenantGroupData() {}
|
||||
TenantGroupData(ClusterName cluster) : cluster(cluster) {}
|
||||
};
|
||||
|
||||
Reference<IDatabase> managementDb;
|
||||
std::map<ClusterName, DataClusterData> dataDbs;
|
||||
std::map<TenantGroupName, TenantGroupData> tenantGroups;
|
||||
std::set<TenantName> ungroupedTenants;
|
||||
std::vector<ClusterName> dataDbIndex;
|
||||
|
||||
int64_t totalTenantGroupCapacity = 0;
|
||||
|
@ -369,9 +383,10 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
}
|
||||
|
||||
if (updatedEntry.present()) {
|
||||
int allocatedGroups = dataDb->tenantGroups.size() + dataDb->ungroupedTenants.size();
|
||||
int64_t tenantGroupDelta =
|
||||
std::max<int64_t>(updatedEntry.get().capacity.numTenantGroups, dataDb->tenants.size()) -
|
||||
std::max<int64_t>(dataDb->tenantGroupCapacity, dataDb->tenants.size());
|
||||
std::max<int64_t>(updatedEntry.get().capacity.numTenantGroups, allocatedGroups) -
|
||||
std::max<int64_t>(dataDb->tenantGroupCapacity, allocatedGroups);
|
||||
|
||||
self->totalTenantGroupCapacity += tenantGroupDelta;
|
||||
dataDb->tenantGroupCapacity = updatedEntry.get().capacity.numTenantGroups;
|
||||
|
@ -387,10 +402,13 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
|
||||
ACTOR static Future<Void> createTenant(MetaclusterManagementWorkload* self) {
|
||||
state TenantName tenant = self->chooseTenantName();
|
||||
state Optional<TenantGroupName> tenantGroup = self->chooseTenantGroup();
|
||||
|
||||
auto itr = self->createdTenants.find(tenant);
|
||||
state bool exists = itr != self->createdTenants.end();
|
||||
state bool hasCapacity = self->createdTenants.size() < self->totalTenantGroupCapacity;
|
||||
state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.count(tenantGroup.get());
|
||||
state bool hasCapacity = tenantGroupExists || self->ungroupedTenants.size() + self->tenantGroups.size() <
|
||||
self->totalTenantGroupCapacity;
|
||||
state bool retried = false;
|
||||
state bool preferAssignedCluster = deterministicRandom()->coinflip();
|
||||
// Choose between two preferred clusters because if we get a partial completion and
|
||||
|
@ -403,18 +421,21 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
preferredClusters.second = self->chooseClusterName();
|
||||
}
|
||||
|
||||
state TenantMapEntry tenantMapEntry;
|
||||
tenantMapEntry.tenantGroup = tenantGroup;
|
||||
|
||||
try {
|
||||
state TenantMapEntry createEntry;
|
||||
loop {
|
||||
try {
|
||||
if (preferAssignedCluster && (!retried || deterministicRandom()->coinflip())) {
|
||||
createEntry.assignedCluster =
|
||||
tenantMapEntry.assignedCluster =
|
||||
deterministicRandom()->coinflip() ? preferredClusters.first : preferredClusters.second;
|
||||
if (!originalPreferredCluster.present()) {
|
||||
originalPreferredCluster = createEntry.assignedCluster.get();
|
||||
originalPreferredCluster = tenantMapEntry.assignedCluster.get();
|
||||
}
|
||||
}
|
||||
Future<Void> createFuture = MetaclusterAPI::createTenant(self->managementDb, tenant, createEntry);
|
||||
Future<Void> createFuture =
|
||||
MetaclusterAPI::createTenant(self->managementDb, tenant, tenantMapEntry);
|
||||
Optional<Void> result = wait(timeout(createFuture, deterministicRandom()->randomInt(1, 30)));
|
||||
if (result.present()) {
|
||||
break;
|
||||
|
@ -425,10 +446,10 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
if (e.code() == error_code_tenant_already_exists && retried && !exists) {
|
||||
Optional<TenantMapEntry> entry = wait(MetaclusterAPI::tryGetTenant(self->managementDb, tenant));
|
||||
ASSERT(entry.present());
|
||||
createEntry = entry.get();
|
||||
tenantMapEntry = entry.get();
|
||||
break;
|
||||
} else if (preferAssignedCluster && retried &&
|
||||
originalPreferredCluster.get() != createEntry.assignedCluster.get() &&
|
||||
originalPreferredCluster.get() != tenantMapEntry.assignedCluster.get() &&
|
||||
(e.code() == error_code_cluster_no_capacity ||
|
||||
e.code() == error_code_cluster_not_found ||
|
||||
e.code() == error_code_invalid_tenant_configuration)) {
|
||||
|
@ -448,14 +469,33 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
ASSERT(!exists);
|
||||
ASSERT(hasCapacity);
|
||||
ASSERT(entry.assignedCluster.present());
|
||||
ASSERT(entry.tenantGroup == tenantGroup);
|
||||
|
||||
if (tenantGroup.present()) {
|
||||
auto tenantGroupData =
|
||||
self->tenantGroups.try_emplace(tenantGroup.get(), entry.assignedCluster.get()).first;
|
||||
ASSERT(tenantGroupData->second.cluster == entry.assignedCluster.get());
|
||||
tenantGroupData->second.tenants.insert(tenant);
|
||||
} else {
|
||||
self->ungroupedTenants.insert(tenant);
|
||||
}
|
||||
|
||||
auto assignedCluster = self->dataDbs.find(entry.assignedCluster.get());
|
||||
ASSERT(!preferAssignedCluster || createEntry.assignedCluster.get() == assignedCluster->first);
|
||||
ASSERT(!preferAssignedCluster || tenantMapEntry.assignedCluster.get() == assignedCluster->first);
|
||||
ASSERT(assignedCluster != self->dataDbs.end());
|
||||
ASSERT(assignedCluster->second.tenants.insert(tenant).second);
|
||||
ASSERT(assignedCluster->second.tenantGroupCapacity >= assignedCluster->second.tenants.size());
|
||||
|
||||
self->createdTenants[tenant] = TenantData(entry.assignedCluster.get());
|
||||
if (tenantGroup.present()) {
|
||||
assignedCluster->second.tenantGroups.insert(tenantGroup.get());
|
||||
} else {
|
||||
assignedCluster->second.ungroupedTenants.insert(tenant);
|
||||
}
|
||||
|
||||
ASSERT(tenantGroupExists ||
|
||||
assignedCluster->second.tenantGroupCapacity >=
|
||||
assignedCluster->second.tenantGroups.size() + assignedCluster->second.ungroupedTenants.size());
|
||||
|
||||
self->createdTenants[tenant] = TenantData(entry.assignedCluster.get(), tenantGroup);
|
||||
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
|
@ -511,11 +551,36 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
ASSERT(exists);
|
||||
auto tenantData = self->createdTenants.find(tenant);
|
||||
ASSERT(tenantData != self->createdTenants.end());
|
||||
|
||||
bool erasedTenantGroup = false;
|
||||
if (tenantData->second.tenantGroup.present()) {
|
||||
auto tenantGroupData = self->tenantGroups.find(tenantData->second.tenantGroup.get());
|
||||
ASSERT(tenantGroupData != self->tenantGroups.end());
|
||||
tenantGroupData->second.tenants.erase(tenant);
|
||||
if (tenantGroupData->second.tenants.empty()) {
|
||||
erasedTenantGroup = true;
|
||||
self->tenantGroups.erase(tenantData->second.tenantGroup.get());
|
||||
}
|
||||
} else {
|
||||
self->ungroupedTenants.erase(tenant);
|
||||
}
|
||||
|
||||
auto& dataDb = self->dataDbs[tenantData->second.cluster];
|
||||
ASSERT(dataDb.registered);
|
||||
auto tenantItr = dataDb.tenants.find(tenant);
|
||||
ASSERT(tenantItr != dataDb.tenants.end());
|
||||
if (dataDb.tenants.size() > dataDb.tenantGroupCapacity) {
|
||||
|
||||
bool reducedAllocatedCount = false;
|
||||
if (erasedTenantGroup) {
|
||||
reducedAllocatedCount = true;
|
||||
dataDb.tenantGroups.erase(tenantData->second.tenantGroup.get());
|
||||
} else if (!tenantData->second.tenantGroup.present()) {
|
||||
reducedAllocatedCount = true;
|
||||
dataDb.ungroupedTenants.erase(tenant);
|
||||
}
|
||||
|
||||
if (reducedAllocatedCount &&
|
||||
dataDb.ungroupedTenants.size() + dataDb.tenantGroups.size() >= dataDb.tenantGroupCapacity) {
|
||||
--self->totalTenantGroupCapacity;
|
||||
}
|
||||
dataDb.tenants.erase(tenantItr);
|
||||
|
@ -534,6 +599,189 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> configureTenant(MetaclusterManagementWorkload* self) {
|
||||
state TenantName tenant = self->chooseTenantName();
|
||||
state Optional<TenantGroupName> newTenantGroup = self->chooseTenantGroup();
|
||||
|
||||
auto itr = self->createdTenants.find(tenant);
|
||||
state bool exists = itr != self->createdTenants.end();
|
||||
state bool tenantGroupExists =
|
||||
newTenantGroup.present() && self->tenantGroups.find(newTenantGroup.get()) != self->tenantGroups.end();
|
||||
|
||||
state bool hasCapacity = false;
|
||||
if (exists) {
|
||||
auto& dataDb = self->dataDbs[itr->second.cluster];
|
||||
hasCapacity = dataDb.ungroupedTenants.size() + dataDb.tenantGroups.size() < dataDb.tenantGroupCapacity;
|
||||
}
|
||||
|
||||
state std::map<Standalone<StringRef>, Optional<Value>> configurationParameters = { { "tenant_group"_sr,
|
||||
newTenantGroup } };
|
||||
|
||||
try {
|
||||
loop {
|
||||
Future<Void> configureFuture =
|
||||
MetaclusterAPI::configureTenant(self->managementDb, tenant, configurationParameters);
|
||||
Optional<Void> result = wait(timeout(configureFuture, deterministicRandom()->randomInt(1, 30)));
|
||||
|
||||
if (result.present()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(exists);
|
||||
auto tenantData = self->createdTenants.find(tenant);
|
||||
ASSERT(tenantData != self->createdTenants.end());
|
||||
|
||||
auto& dataDb = self->dataDbs[tenantData->second.cluster];
|
||||
ASSERT(dataDb.registered);
|
||||
|
||||
bool allocationRemoved = false;
|
||||
bool allocationAdded = false;
|
||||
if (tenantData->second.tenantGroup != newTenantGroup) {
|
||||
if (tenantData->second.tenantGroup.present()) {
|
||||
auto& tenantGroupData = self->tenantGroups[tenantData->second.tenantGroup.get()];
|
||||
tenantGroupData.tenants.erase(tenant);
|
||||
if (tenantGroupData.tenants.empty()) {
|
||||
allocationRemoved = true;
|
||||
self->tenantGroups.erase(tenantData->second.tenantGroup.get());
|
||||
dataDb.tenantGroups.erase(tenantData->second.tenantGroup.get());
|
||||
}
|
||||
} else {
|
||||
allocationRemoved = true;
|
||||
self->ungroupedTenants.erase(tenant);
|
||||
dataDb.ungroupedTenants.erase(tenant);
|
||||
}
|
||||
|
||||
if (newTenantGroup.present()) {
|
||||
auto [tenantGroupData, inserted] = self->tenantGroups.try_emplace(
|
||||
newTenantGroup.get(), TenantGroupData(tenantData->second.cluster));
|
||||
tenantGroupData->second.tenants.insert(tenant);
|
||||
if (inserted) {
|
||||
allocationAdded = true;
|
||||
dataDb.tenantGroups.insert(newTenantGroup.get());
|
||||
}
|
||||
} else {
|
||||
allocationAdded = true;
|
||||
self->ungroupedTenants.insert(tenant);
|
||||
dataDb.ungroupedTenants.insert(tenant);
|
||||
}
|
||||
|
||||
tenantData->second.tenantGroup = newTenantGroup;
|
||||
|
||||
if (allocationAdded && !allocationRemoved) {
|
||||
ASSERT(hasCapacity);
|
||||
} else if (allocationRemoved && !allocationAdded &&
|
||||
dataDb.ungroupedTenants.size() + dataDb.tenantGroups.size() >= dataDb.tenantGroupCapacity) {
|
||||
--self->totalTenantGroupCapacity;
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_tenant_not_found) {
|
||||
ASSERT(!exists);
|
||||
return Void();
|
||||
} else if (e.code() == error_code_cluster_no_capacity) {
|
||||
ASSERT(exists && !hasCapacity);
|
||||
return Void();
|
||||
} else if (e.code() == error_code_invalid_tenant_configuration) {
|
||||
ASSERT(exists && tenantGroupExists &&
|
||||
self->createdTenants[tenant].cluster != self->tenantGroups[newTenantGroup.get()].cluster);
|
||||
return Void();
|
||||
}
|
||||
|
||||
TraceEvent(SevError, "ConfigureTenantFailure")
|
||||
.error(e)
|
||||
.detail("TenantName", tenant)
|
||||
.detail("TenantGroup", newTenantGroup);
|
||||
ASSERT(false);
|
||||
throw internal_error();
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> renameTenant(MetaclusterManagementWorkload* self) {
|
||||
state TenantName tenant = self->chooseTenantName();
|
||||
state TenantName newTenantName = self->chooseTenantName();
|
||||
|
||||
auto itr = self->createdTenants.find(tenant);
|
||||
state bool exists = itr != self->createdTenants.end();
|
||||
|
||||
itr = self->createdTenants.find(newTenantName);
|
||||
state bool newTenantExists = itr != self->createdTenants.end();
|
||||
|
||||
try {
|
||||
state bool retried = false;
|
||||
loop {
|
||||
try {
|
||||
Future<Void> renameFuture = MetaclusterAPI::renameTenant(self->managementDb, tenant, newTenantName);
|
||||
Optional<Void> result = wait(timeout(renameFuture, deterministicRandom()->randomInt(1, 30)));
|
||||
|
||||
if (result.present()) {
|
||||
break;
|
||||
}
|
||||
|
||||
retried = true;
|
||||
} catch (Error& e) {
|
||||
// If we retry the rename after it had succeeded, we will get an error that we should ignore
|
||||
if (e.code() == error_code_tenant_not_found && exists && !newTenantExists && retried) {
|
||||
break;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(exists);
|
||||
ASSERT(!newTenantExists);
|
||||
|
||||
Optional<TenantMapEntry> oldEntry = wait(MetaclusterAPI::tryGetTenant(self->managementDb, tenant));
|
||||
ASSERT(!oldEntry.present());
|
||||
|
||||
TenantMapEntry newEntry = wait(MetaclusterAPI::getTenant(self->managementDb, newTenantName));
|
||||
|
||||
auto tenantData = self->createdTenants.find(tenant);
|
||||
ASSERT(tenantData != self->createdTenants.end());
|
||||
ASSERT(tenantData->second.tenantGroup == newEntry.tenantGroup);
|
||||
ASSERT(newEntry.assignedCluster.present() && tenantData->second.cluster == newEntry.assignedCluster.get());
|
||||
|
||||
self->createdTenants[newTenantName] = tenantData->second;
|
||||
self->createdTenants.erase(tenantData);
|
||||
|
||||
auto& dataDb = self->dataDbs[tenantData->second.cluster];
|
||||
ASSERT(dataDb.registered);
|
||||
|
||||
dataDb.tenants.erase(tenant);
|
||||
dataDb.tenants.insert(newTenantName);
|
||||
|
||||
if (tenantData->second.tenantGroup.present()) {
|
||||
auto& tenantGroup = self->tenantGroups[tenantData->second.tenantGroup.get()];
|
||||
tenantGroup.tenants.erase(tenant);
|
||||
tenantGroup.tenants.insert(newTenantName);
|
||||
} else {
|
||||
dataDb.ungroupedTenants.erase(tenant);
|
||||
dataDb.ungroupedTenants.insert(newTenantName);
|
||||
self->ungroupedTenants.erase(tenant);
|
||||
self->ungroupedTenants.insert(newTenantName);
|
||||
}
|
||||
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_tenant_not_found) {
|
||||
ASSERT(!exists);
|
||||
return Void();
|
||||
} else if (e.code() == error_code_tenant_already_exists) {
|
||||
ASSERT(newTenantExists);
|
||||
return Void();
|
||||
}
|
||||
|
||||
TraceEvent(SevError, "RenameTenantFailure")
|
||||
.error(e)
|
||||
.detail("OldTenantName", tenant)
|
||||
.detail("NewTenantName", newTenantName);
|
||||
ASSERT(false);
|
||||
throw internal_error();
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> start(Database const& cx) override {
|
||||
if (clientId == 0) {
|
||||
return _start(cx, this);
|
||||
|
@ -546,21 +794,25 @@ struct MetaclusterManagementWorkload : TestWorkload {
|
|||
|
||||
// Run a random sequence of operations for the duration of the test
|
||||
while (now() < start + self->testDuration) {
|
||||
state int operation = deterministicRandom()->randomInt(0, 7);
|
||||
state int operation = deterministicRandom()->randomInt(0, 9);
|
||||
if (operation == 0) {
|
||||
wait(self->registerCluster(self));
|
||||
wait(registerCluster(self));
|
||||
} else if (operation == 1) {
|
||||
wait(self->removeCluster(self));
|
||||
wait(removeCluster(self));
|
||||
} else if (operation == 2) {
|
||||
wait(self->listClusters(self));
|
||||
wait(listClusters(self));
|
||||
} else if (operation == 3) {
|
||||
wait(self->getCluster(self));
|
||||
wait(getCluster(self));
|
||||
} else if (operation == 4) {
|
||||
wait(self->configureCluster(self));
|
||||
wait(configureCluster(self));
|
||||
} else if (operation == 5) {
|
||||
wait(self->createTenant(self));
|
||||
wait(createTenant(self));
|
||||
} else if (operation == 6) {
|
||||
wait(self->deleteTenant(self));
|
||||
wait(deleteTenant(self));
|
||||
} else if (operation == 7) {
|
||||
wait(configureTenant(self));
|
||||
} else if (operation == 8) {
|
||||
wait(renameTenant(self));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue