Merge branch 'apple:main' into block-down
This commit is contained in:
commit
51ef860612
|
@ -253,25 +253,43 @@ endif()
|
||||||
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
|
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
|
||||||
)
|
)
|
||||||
|
|
||||||
if(NOT USE_SANITIZER)
|
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT USE_SANITIZER)
|
||||||
add_test(NAME fdb_c_upgrade_single_threaded
|
add_test(NAME fdb_c_upgrade_single_threaded_630api
|
||||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||||
--build-dir ${CMAKE_BINARY_DIR}
|
--build-dir ${CMAKE_BINARY_DIR}
|
||||||
--disable-log-dump
|
--disable-log-dump
|
||||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
|
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
|
||||||
--upgrade-path "6.3.23" "7.0.0" "6.3.23"
|
--upgrade-path "6.3.23" "7.0.0" "7.2.0"
|
||||||
--process-number 1
|
--process-number 1
|
||||||
)
|
)
|
||||||
|
|
||||||
add_test(NAME fdb_c_upgrade_multi_threaded
|
add_test(NAME fdb_c_upgrade_single_threaded_700api
|
||||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||||
--build-dir ${CMAKE_BINARY_DIR}
|
--build-dir ${CMAKE_BINARY_DIR}
|
||||||
--disable-log-dump
|
--disable-log-dump
|
||||||
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
|
||||||
--upgrade-path "6.3.23" "7.0.0" "6.3.23"
|
--upgrade-path "7.0.0" "7.2.0"
|
||||||
--process-number 3
|
--process-number 1
|
||||||
)
|
)
|
||||||
endif()
|
|
||||||
|
add_test(NAME fdb_c_upgrade_multi_threaded_630api
|
||||||
|
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||||
|
--build-dir ${CMAKE_BINARY_DIR}
|
||||||
|
--disable-log-dump
|
||||||
|
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||||
|
--upgrade-path "6.3.23" "7.0.0" "7.2.0"
|
||||||
|
--process-number 3
|
||||||
|
)
|
||||||
|
|
||||||
|
add_test(NAME fdb_c_upgrade_multi_threaded_700api
|
||||||
|
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
|
||||||
|
--build-dir ${CMAKE_BINARY_DIR}
|
||||||
|
--disable-log-dump
|
||||||
|
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
|
||||||
|
--upgrade-path "7.0.0" "7.2.0"
|
||||||
|
--process-number 3
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
|
@ -113,8 +113,8 @@ Implementing a C++ Workload
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
In order to implement a workload, one has to build a shared library that links against the fdb client library. This library has to
|
In order to implement a workload, one has to build a shared library that links against the fdb client library. This library has to
|
||||||
exppse a function (with C linkage) called workloadFactory which needs to return a pointer to an object of type ``FDBWorkloadFactory``.
|
expose a function (with C linkage) called workloadFactory which needs to return a pointer to an object of type ``FDBWorkloadFactory``.
|
||||||
This mechanism allows the other to implement as many workloads within one library as she wants. To do this the pure virtual classes
|
This mechanism allows the author to implement as many workloads within one library as she wants. To do this the pure virtual classes
|
||||||
``FDBWorkloadFactory`` and ``FDBWorkload`` have to be implemented.
|
``FDBWorkloadFactory`` and ``FDBWorkload`` have to be implemented.
|
||||||
|
|
||||||
.. function:: FDBWorkloadFactory* workloadFactory(FDBLogger*)
|
.. function:: FDBWorkloadFactory* workloadFactory(FDBLogger*)
|
||||||
|
|
|
@ -1055,7 +1055,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||||
|
|
||||||
auto& ni = rep.get().mutate();
|
auto& ni = rep.get().mutate();
|
||||||
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
||||||
clientInfo->set(ni);
|
clientInfo->setUnconditional(ni);
|
||||||
successIndex = index;
|
successIndex = index;
|
||||||
} else {
|
} else {
|
||||||
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cant talk to cluster controller
|
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cant talk to cluster controller
|
||||||
|
|
|
@ -581,8 +581,10 @@ ACTOR Future<GetReadVersionReply> getLiveCommittedVersion(SpanID parentSpan,
|
||||||
GetRawCommittedVersionReply repFromMaster = wait(replyFromMasterFuture);
|
GetRawCommittedVersionReply repFromMaster = wait(replyFromMasterFuture);
|
||||||
grvProxyData->minKnownCommittedVersion =
|
grvProxyData->minKnownCommittedVersion =
|
||||||
std::max(grvProxyData->minKnownCommittedVersion, repFromMaster.minKnownCommittedVersion);
|
std::max(grvProxyData->minKnownCommittedVersion, repFromMaster.minKnownCommittedVersion);
|
||||||
// TODO add to "status json"
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR) {
|
||||||
grvProxyData->ssVersionVectorCache.applyDelta(repFromMaster.ssVersionVectorDelta);
|
// TODO add to "status json"
|
||||||
|
grvProxyData->ssVersionVectorCache.applyDelta(repFromMaster.ssVersionVectorDelta);
|
||||||
|
}
|
||||||
grvProxyData->stats.grvGetCommittedVersionRpcDist->sampleSeconds(now() - grvConfirmEpochLive);
|
grvProxyData->stats.grvGetCommittedVersionRpcDist->sampleSeconds(now() - grvConfirmEpochLive);
|
||||||
GetReadVersionReply rep;
|
GetReadVersionReply rep;
|
||||||
rep.version = repFromMaster.version;
|
rep.version = repFromMaster.version;
|
||||||
|
@ -646,8 +648,10 @@ ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture,
|
||||||
}
|
}
|
||||||
reply.midShardSize = midShardSize;
|
reply.midShardSize = midShardSize;
|
||||||
reply.tagThrottleInfo.clear();
|
reply.tagThrottleInfo.clear();
|
||||||
grvProxyData->ssVersionVectorCache.getDelta(request.maxVersion, reply.ssVersionVectorDelta);
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR) {
|
||||||
grvProxyData->versionVectorSizeOnGRVReply.addMeasurement(reply.ssVersionVectorDelta.size());
|
grvProxyData->ssVersionVectorCache.getDelta(request.maxVersion, reply.ssVersionVectorDelta);
|
||||||
|
grvProxyData->versionVectorSizeOnGRVReply.addMeasurement(reply.ssVersionVectorDelta.size());
|
||||||
|
}
|
||||||
reply.proxyId = grvProxyData->dbgid;
|
reply.proxyId = grvProxyData->dbgid;
|
||||||
|
|
||||||
if (!request.tags.empty()) {
|
if (!request.tags.empty()) {
|
||||||
|
|
|
@ -275,8 +275,10 @@ ACTOR Future<Void> serveLiveCommittedVersion(Reference<MasterData> self) {
|
||||||
reply.locked = self->databaseLocked;
|
reply.locked = self->databaseLocked;
|
||||||
reply.metadataVersion = self->proxyMetadataVersion;
|
reply.metadataVersion = self->proxyMetadataVersion;
|
||||||
reply.minKnownCommittedVersion = self->minKnownCommittedVersion;
|
reply.minKnownCommittedVersion = self->minKnownCommittedVersion;
|
||||||
self->ssVersionVector.getDelta(req.maxVersion, reply.ssVersionVectorDelta);
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR) {
|
||||||
self->versionVectorSizeOnCVReply.addMeasurement(reply.ssVersionVectorDelta.size());
|
self->ssVersionVector.getDelta(req.maxVersion, reply.ssVersionVectorDelta);
|
||||||
|
self->versionVectorSizeOnCVReply.addMeasurement(reply.ssVersionVectorDelta.size());
|
||||||
|
}
|
||||||
req.reply.send(reply);
|
req.reply.send(reply);
|
||||||
}
|
}
|
||||||
when(ReportRawCommittedVersionRequest req =
|
when(ReportRawCommittedVersionRequest req =
|
||||||
|
|
|
@ -806,7 +806,7 @@ public:
|
||||||
Promise<UID> clusterId;
|
Promise<UID> clusterId;
|
||||||
// The version the cluster starts on. This value is not persisted and may
|
// The version the cluster starts on. This value is not persisted and may
|
||||||
// not be valid after a recovery.
|
// not be valid after a recovery.
|
||||||
Version initialClusterVersion = invalidVersion;
|
Version initialClusterVersion = 1;
|
||||||
UID thisServerID;
|
UID thisServerID;
|
||||||
Optional<UID> tssPairID; // if this server is a tss, this is the id of its (ss) pair
|
Optional<UID> tssPairID; // if this server is a tss, this is the id of its (ss) pair
|
||||||
Optional<UID> ssPairID; // if this server is an ss, this is the id of its (tss) pair
|
Optional<UID> ssPairID; // if this server is an ss, this is the id of its (tss) pair
|
||||||
|
|
|
@ -151,7 +151,7 @@ ACTOR static Future<Void> extractClientInfo(Reference<AsyncVar<ServerDBInfo> con
|
||||||
loop {
|
loop {
|
||||||
ClientDBInfo ni = db->get().client;
|
ClientDBInfo ni = db->get().client;
|
||||||
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
||||||
info->set(ni);
|
info->setUnconditional(ni);
|
||||||
wait(db->onChange());
|
wait(db->onChange());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,6 +121,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
EncryptCipherDomainId maxDomainId;
|
EncryptCipherDomainId maxDomainId;
|
||||||
EncryptCipherBaseKeyId minBaseCipherId;
|
EncryptCipherBaseKeyId minBaseCipherId;
|
||||||
EncryptCipherBaseKeyId headerBaseCipherId;
|
EncryptCipherBaseKeyId headerBaseCipherId;
|
||||||
|
EncryptCipherRandomSalt headerRandomSalt;
|
||||||
|
|
||||||
EncryptionOpsWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
EncryptionOpsWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
||||||
mode = getOption(options, LiteralStringRef("fixedSize"), 1);
|
mode = getOption(options, LiteralStringRef("fixedSize"), 1);
|
||||||
|
@ -134,6 +135,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
maxDomainId = deterministicRandom()->randomInt(minDomainId, minDomainId + 10) + 5;
|
maxDomainId = deterministicRandom()->randomInt(minDomainId, minDomainId + 10) + 5;
|
||||||
minBaseCipherId = 100;
|
minBaseCipherId = 100;
|
||||||
headerBaseCipherId = wcx.clientId * 100 + 1;
|
headerBaseCipherId = wcx.clientId * 100 + 1;
|
||||||
|
headerRandomSalt = wcx.clientId * 100 + 1;
|
||||||
|
|
||||||
metrics = std::make_unique<WorkloadMetrics>();
|
metrics = std::make_unique<WorkloadMetrics>();
|
||||||
|
|
||||||
|
@ -183,7 +185,8 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
|
|
||||||
// insert the Encrypt Header cipherKey
|
// insert the Encrypt Header cipherKey
|
||||||
generateRandomBaseCipher(AES_256_KEY_LENGTH, &buff[0], &cipherLen);
|
generateRandomBaseCipher(AES_256_KEY_LENGTH, &buff[0], &cipherLen);
|
||||||
cipherKeyCache->insertCipherKey(ENCRYPT_HEADER_DOMAIN_ID, headerBaseCipherId, buff, cipherLen);
|
cipherKeyCache->insertCipherKey(
|
||||||
|
ENCRYPT_HEADER_DOMAIN_ID, headerBaseCipherId, buff, cipherLen, headerRandomSalt);
|
||||||
|
|
||||||
TraceEvent("SetupCipherEssentials_Done").detail("MinDomainId", minDomainId).detail("MaxDomainId", maxDomainId);
|
TraceEvent("SetupCipherEssentials_Done").detail("MinDomainId", minDomainId).detail("MaxDomainId", maxDomainId);
|
||||||
}
|
}
|
||||||
|
@ -209,6 +212,29 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
TraceEvent("UpdateBaseCipher").detail("DomainId", encryptDomainId).detail("BaseCipherId", *nextBaseCipherId);
|
TraceEvent("UpdateBaseCipher").detail("DomainId", encryptDomainId).detail("BaseCipherId", *nextBaseCipherId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference<BlobCipherKey> getEncryptionKey(const EncryptCipherDomainId& domainId,
|
||||||
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
const bool simCacheMiss = deterministicRandom()->randomInt(1, 100) < 15;
|
||||||
|
|
||||||
|
Reference<BlobCipherKeyCache> cipherKeyCache = BlobCipherKeyCache::getInstance();
|
||||||
|
Reference<BlobCipherKey> cipherKey = cipherKeyCache->getCipherKey(domainId, baseCipherId, salt);
|
||||||
|
|
||||||
|
if (simCacheMiss) {
|
||||||
|
TraceEvent("SimKeyCacheMiss").detail("EncyrptDomainId", domainId).detail("BaseCipherId", baseCipherId);
|
||||||
|
// simulate KeyCache miss that may happen during decryption; insert a CipherKey with known 'salt'
|
||||||
|
cipherKeyCache->insertCipherKey(domainId,
|
||||||
|
baseCipherId,
|
||||||
|
cipherKey->rawBaseCipher(),
|
||||||
|
cipherKey->getBaseCipherLen(),
|
||||||
|
cipherKey->getSalt());
|
||||||
|
// Ensure the update was a NOP
|
||||||
|
Reference<BlobCipherKey> cKey = cipherKeyCache->getCipherKey(domainId, baseCipherId, salt);
|
||||||
|
ASSERT(cKey->isEqual(cipherKey));
|
||||||
|
}
|
||||||
|
return cipherKey;
|
||||||
|
}
|
||||||
|
|
||||||
Reference<EncryptBuf> doEncryption(Reference<BlobCipherKey> textCipherKey,
|
Reference<EncryptBuf> doEncryption(Reference<BlobCipherKey> textCipherKey,
|
||||||
Reference<BlobCipherKey> headerCipherKey,
|
Reference<BlobCipherKey> headerCipherKey,
|
||||||
uint8_t* payload,
|
uint8_t* payload,
|
||||||
|
@ -240,11 +266,12 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
ASSERT_EQ(header.flags.headerVersion, EncryptBlobCipherAes265Ctr::ENCRYPT_HEADER_VERSION);
|
ASSERT_EQ(header.flags.headerVersion, EncryptBlobCipherAes265Ctr::ENCRYPT_HEADER_VERSION);
|
||||||
ASSERT_EQ(header.flags.encryptMode, ENCRYPT_CIPHER_MODE_AES_256_CTR);
|
ASSERT_EQ(header.flags.encryptMode, ENCRYPT_CIPHER_MODE_AES_256_CTR);
|
||||||
|
|
||||||
Reference<BlobCipherKeyCache> cipherKeyCache = BlobCipherKeyCache::getInstance();
|
Reference<BlobCipherKey> cipherKey = getEncryptionKey(header.cipherTextDetails.encryptDomainId,
|
||||||
Reference<BlobCipherKey> cipherKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
header.cipherTextDetails.baseCipherId,
|
||||||
header.cipherTextDetails.baseCipherId);
|
header.cipherTextDetails.salt);
|
||||||
Reference<BlobCipherKey> headerCipherKey = cipherKeyCache->getCipherKey(
|
Reference<BlobCipherKey> headerCipherKey = getEncryptionKey(header.cipherHeaderDetails.encryptDomainId,
|
||||||
header.cipherHeaderDetails.encryptDomainId, header.cipherHeaderDetails.baseCipherId);
|
header.cipherHeaderDetails.baseCipherId,
|
||||||
|
header.cipherHeaderDetails.salt);
|
||||||
ASSERT(cipherKey.isValid());
|
ASSERT(cipherKey.isValid());
|
||||||
ASSERT(cipherKey->isEqual(orgCipherKey));
|
ASSERT(cipherKey->isEqual(orgCipherKey));
|
||||||
|
|
||||||
|
@ -297,7 +324,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
||||||
Reference<BlobCipherKey> cipherKey = cipherKeyCache->getLatestCipherKey(encryptDomainId);
|
Reference<BlobCipherKey> cipherKey = cipherKeyCache->getLatestCipherKey(encryptDomainId);
|
||||||
// Each client working with their own version of encryptHeaderCipherKey, avoid using getLatest()
|
// Each client working with their own version of encryptHeaderCipherKey, avoid using getLatest()
|
||||||
Reference<BlobCipherKey> headerCipherKey =
|
Reference<BlobCipherKey> headerCipherKey =
|
||||||
cipherKeyCache->getCipherKey(ENCRYPT_HEADER_DOMAIN_ID, headerBaseCipherId);
|
cipherKeyCache->getCipherKey(ENCRYPT_HEADER_DOMAIN_ID, headerBaseCipherId, headerRandomSalt);
|
||||||
auto end = std::chrono::high_resolution_clock::now();
|
auto end = std::chrono::high_resolution_clock::now();
|
||||||
metrics->updateKeyDerivationTime(std::chrono::duration<double, std::nano>(end - start).count());
|
metrics->updateKeyDerivationTime(std::chrono::duration<double, std::nano>(end - start).count());
|
||||||
|
|
||||||
|
|
|
@ -90,17 +90,19 @@ struct PrivateEndpoints : TestWorkload {
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit PrivateEndpoints(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
explicit PrivateEndpoints(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
||||||
|
// The commented out request streams below can't be default initialized properly
|
||||||
|
// as they won't initialize all of their memory which causes valgrind to complain.
|
||||||
startAfter = getOption(options, "startAfter"_sr, 10.0);
|
startAfter = getOption(options, "startAfter"_sr, 10.0);
|
||||||
runFor = getOption(options, "runFor"_sr, 10.0);
|
runFor = getOption(options, "runFor"_sr, 10.0);
|
||||||
addTestFor(&GrvProxyInterface::waitFailure);
|
addTestFor(&GrvProxyInterface::waitFailure);
|
||||||
addTestFor(&GrvProxyInterface::getHealthMetrics);
|
addTestFor(&GrvProxyInterface::getHealthMetrics);
|
||||||
addTestFor(&CommitProxyInterface::getStorageServerRejoinInfo);
|
// addTestFor(&CommitProxyInterface::getStorageServerRejoinInfo);
|
||||||
addTestFor(&CommitProxyInterface::waitFailure);
|
addTestFor(&CommitProxyInterface::waitFailure);
|
||||||
addTestFor(&CommitProxyInterface::txnState);
|
// addTestFor(&CommitProxyInterface::txnState);
|
||||||
addTestFor(&CommitProxyInterface::getHealthMetrics);
|
// addTestFor(&CommitProxyInterface::getHealthMetrics);
|
||||||
addTestFor(&CommitProxyInterface::proxySnapReq);
|
// addTestFor(&CommitProxyInterface::proxySnapReq);
|
||||||
addTestFor(&CommitProxyInterface::exclusionSafetyCheckReq);
|
addTestFor(&CommitProxyInterface::exclusionSafetyCheckReq);
|
||||||
addTestFor(&CommitProxyInterface::getDDMetrics);
|
// addTestFor(&CommitProxyInterface::getDDMetrics);
|
||||||
}
|
}
|
||||||
std::string description() const override { return WorkloadName; }
|
std::string description() const override { return WorkloadName; }
|
||||||
Future<Void> start(Database const& cx) override { return _start(this, cx); }
|
Future<Void> start(Database const& cx) override { return _start(this, cx); }
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "flow/BlobCipher.h"
|
#include "flow/BlobCipher.h"
|
||||||
|
|
||||||
#include "flow/EncryptUtils.h"
|
#include "flow/EncryptUtils.h"
|
||||||
#include "flow/Knobs.h"
|
#include "flow/Knobs.h"
|
||||||
#include "flow/Error.h"
|
#include "flow/Error.h"
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#if ENCRYPTION_ENABLED
|
#if ENCRYPTION_ENABLED
|
||||||
|
|
||||||
|
@ -54,12 +56,14 @@ BlobCipherKey::BlobCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
salt = nondeterministicRandom()->randomUInt64();
|
salt = nondeterministicRandom()->randomUInt64();
|
||||||
}
|
}
|
||||||
initKey(domainId, baseCiph, baseCiphLen, baseCiphId, salt);
|
initKey(domainId, baseCiph, baseCiphLen, baseCiphId, salt);
|
||||||
/*TraceEvent("BlobCipherKey")
|
}
|
||||||
.detail("DomainId", domainId)
|
|
||||||
.detail("BaseCipherId", baseCipherId)
|
BlobCipherKey::BlobCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
.detail("BaseCipherLen", baseCipherLen)
|
const EncryptCipherBaseKeyId& baseCiphId,
|
||||||
.detail("RandomSalt", randomSalt)
|
const uint8_t* baseCiph,
|
||||||
.detail("CreationTime", creationTime);*/
|
int baseCiphLen,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
initKey(domainId, baseCiph, baseCiphLen, baseCiphId, salt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobCipherKey::initKey(const EncryptCipherDomainId& domainId,
|
void BlobCipherKey::initKey(const EncryptCipherDomainId& domainId,
|
||||||
|
@ -82,6 +86,13 @@ void BlobCipherKey::initKey(const EncryptCipherDomainId& domainId,
|
||||||
applyHmacSha256Derivation();
|
applyHmacSha256Derivation();
|
||||||
// update the key creation time
|
// update the key creation time
|
||||||
creationTime = now();
|
creationTime = now();
|
||||||
|
|
||||||
|
TraceEvent("BlobCipherKey")
|
||||||
|
.detail("DomainId", domainId)
|
||||||
|
.detail("BaseCipherId", baseCipherId)
|
||||||
|
.detail("BaseCipherLen", baseCipherLen)
|
||||||
|
.detail("RandomSalt", randomSalt)
|
||||||
|
.detail("CreationTime", creationTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobCipherKey::applyHmacSha256Derivation() {
|
void BlobCipherKey::applyHmacSha256Derivation() {
|
||||||
|
@ -112,25 +123,77 @@ BlobCipherKeyIdCache::BlobCipherKeyIdCache(EncryptCipherDomainId dId)
|
||||||
TraceEvent("Init_BlobCipherKeyIdCache").detail("DomainId", domainId);
|
TraceEvent("Init_BlobCipherKeyIdCache").detail("DomainId", domainId);
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<BlobCipherKey> BlobCipherKeyIdCache::getLatestCipherKey() {
|
BlobCipherKeyIdCacheKey BlobCipherKeyIdCache::getCacheKey(const EncryptCipherBaseKeyId& baseCipherKeyId,
|
||||||
return getCipherByBaseCipherId(latestBaseCipherKeyId);
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
return std::make_pair(baseCipherKeyId, salt);
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<BlobCipherKey> BlobCipherKeyIdCache::getCipherByBaseCipherId(EncryptCipherBaseKeyId baseCipherKeyId) {
|
Reference<BlobCipherKey> BlobCipherKeyIdCache::getLatestCipherKey() {
|
||||||
BlobCipherKeyIdCacheMapCItr itr = keyIdCache.find(baseCipherKeyId);
|
return getCipherByBaseCipherId(latestBaseCipherKeyId, latestRandomSalt);
|
||||||
|
}
|
||||||
|
|
||||||
|
Reference<BlobCipherKey> BlobCipherKeyIdCache::getCipherByBaseCipherId(const EncryptCipherBaseKeyId& baseCipherKeyId,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
BlobCipherKeyIdCacheMapCItr itr = keyIdCache.find(getCacheKey(baseCipherKeyId, salt));
|
||||||
if (itr == keyIdCache.end()) {
|
if (itr == keyIdCache.end()) {
|
||||||
|
TraceEvent("CipherByBaseCipherId_KeyMissing")
|
||||||
|
.detail("DomainId", domainId)
|
||||||
|
.detail("BaseCipherId", baseCipherKeyId)
|
||||||
|
.detail("Salt", salt);
|
||||||
throw encrypt_key_not_found();
|
throw encrypt_key_not_found();
|
||||||
}
|
}
|
||||||
return itr->second;
|
return itr->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobCipherKeyIdCache::insertBaseCipherKey(EncryptCipherBaseKeyId baseCipherId,
|
void BlobCipherKeyIdCache::insertBaseCipherKey(const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
const uint8_t* baseCipher,
|
const uint8_t* baseCipher,
|
||||||
int baseCipherLen) {
|
int baseCipherLen) {
|
||||||
ASSERT_GT(baseCipherId, ENCRYPT_INVALID_CIPHER_KEY_ID);
|
ASSERT_GT(baseCipherId, ENCRYPT_INVALID_CIPHER_KEY_ID);
|
||||||
|
|
||||||
|
// BaseCipherKeys are immutable, given the routine invocation updates 'latestCipher',
|
||||||
|
// ensure no key-tampering is done
|
||||||
|
try {
|
||||||
|
Reference<BlobCipherKey> cipherKey = getLatestCipherKey();
|
||||||
|
if (cipherKey->getBaseCipherId() == baseCipherId) {
|
||||||
|
if (memcmp(cipherKey->rawBaseCipher(), baseCipher, baseCipherLen) == 0) {
|
||||||
|
TraceEvent("InsertBaseCipherKey_AlreadyPresent")
|
||||||
|
.detail("BaseCipherKeyId", baseCipherId)
|
||||||
|
.detail("DomainId", domainId);
|
||||||
|
// Key is already present; nothing more to do.
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
TraceEvent("InsertBaseCipherKey_UpdateCipher")
|
||||||
|
.detail("BaseCipherKeyId", baseCipherId)
|
||||||
|
.detail("DomainId", domainId);
|
||||||
|
throw encrypt_update_cipher();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() != error_code_encrypt_key_not_found) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Reference<BlobCipherKey> cipherKey =
|
||||||
|
makeReference<BlobCipherKey>(domainId, baseCipherId, baseCipher, baseCipherLen);
|
||||||
|
BlobCipherKeyIdCacheKey cacheKey = getCacheKey(cipherKey->getBaseCipherId(), cipherKey->getSalt());
|
||||||
|
keyIdCache.emplace(cacheKey, cipherKey);
|
||||||
|
|
||||||
|
// Update the latest BaseCipherKeyId for the given encryption domain
|
||||||
|
latestBaseCipherKeyId = baseCipherId;
|
||||||
|
latestRandomSalt = cipherKey->getSalt();
|
||||||
|
}
|
||||||
|
|
||||||
|
void BlobCipherKeyIdCache::insertBaseCipherKey(const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const uint8_t* baseCipher,
|
||||||
|
int baseCipherLen,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
ASSERT_GT(baseCipherId, ENCRYPT_INVALID_CIPHER_KEY_ID);
|
||||||
|
|
||||||
|
BlobCipherKeyIdCacheKey cacheKey = getCacheKey(baseCipherId, salt);
|
||||||
|
|
||||||
// BaseCipherKeys are immutable, ensure that cached value doesn't get updated.
|
// BaseCipherKeys are immutable, ensure that cached value doesn't get updated.
|
||||||
BlobCipherKeyIdCacheMapCItr itr = keyIdCache.find(baseCipherId);
|
BlobCipherKeyIdCacheMapCItr itr = keyIdCache.find(cacheKey);
|
||||||
if (itr != keyIdCache.end()) {
|
if (itr != keyIdCache.end()) {
|
||||||
if (memcmp(itr->second->rawBaseCipher(), baseCipher, baseCipherLen) == 0) {
|
if (memcmp(itr->second->rawBaseCipher(), baseCipher, baseCipherLen) == 0) {
|
||||||
TraceEvent("InsertBaseCipherKey_AlreadyPresent")
|
TraceEvent("InsertBaseCipherKey_AlreadyPresent")
|
||||||
|
@ -146,9 +209,9 @@ void BlobCipherKeyIdCache::insertBaseCipherKey(EncryptCipherBaseKeyId baseCipher
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keyIdCache.emplace(baseCipherId, makeReference<BlobCipherKey>(domainId, baseCipherId, baseCipher, baseCipherLen));
|
Reference<BlobCipherKey> cipherKey =
|
||||||
// Update the latest BaseCipherKeyId for the given encryption domain
|
makeReference<BlobCipherKey>(domainId, baseCipherId, baseCipher, baseCipherLen, salt);
|
||||||
latestBaseCipherKeyId = baseCipherId;
|
keyIdCache.emplace(cacheKey, cipherKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobCipherKeyIdCache::cleanup() {
|
void BlobCipherKeyIdCache::cleanup() {
|
||||||
|
@ -197,6 +260,41 @@ void BlobCipherKeyCache::insertCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BlobCipherKeyCache::insertCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const uint8_t* baseCipher,
|
||||||
|
int baseCipherLen,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
|
if (domainId == ENCRYPT_INVALID_DOMAIN_ID || baseCipherId == ENCRYPT_INVALID_CIPHER_KEY_ID) {
|
||||||
|
throw encrypt_invalid_id();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
auto domainItr = domainCacheMap.find(domainId);
|
||||||
|
if (domainItr == domainCacheMap.end()) {
|
||||||
|
// Add mapping to track new encryption domain
|
||||||
|
Reference<BlobCipherKeyIdCache> keyIdCache = makeReference<BlobCipherKeyIdCache>(domainId);
|
||||||
|
keyIdCache->insertBaseCipherKey(baseCipherId, baseCipher, baseCipherLen, salt);
|
||||||
|
domainCacheMap.emplace(domainId, keyIdCache);
|
||||||
|
} else {
|
||||||
|
// Track new baseCipher keys
|
||||||
|
Reference<BlobCipherKeyIdCache> keyIdCache = domainItr->second;
|
||||||
|
keyIdCache->insertBaseCipherKey(baseCipherId, baseCipher, baseCipherLen, salt);
|
||||||
|
}
|
||||||
|
|
||||||
|
TraceEvent("InsertCipherKey")
|
||||||
|
.detail("DomainId", domainId)
|
||||||
|
.detail("BaseCipherKeyId", baseCipherId)
|
||||||
|
.detail("Salt", salt);
|
||||||
|
} catch (Error& e) {
|
||||||
|
TraceEvent("InsertCipherKey_Failed")
|
||||||
|
.detail("BaseCipherKeyId", baseCipherId)
|
||||||
|
.detail("DomainId", domainId)
|
||||||
|
.detail("Salt", salt);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Reference<BlobCipherKey> BlobCipherKeyCache::getLatestCipherKey(const EncryptCipherDomainId& domainId) {
|
Reference<BlobCipherKey> BlobCipherKeyCache::getLatestCipherKey(const EncryptCipherDomainId& domainId) {
|
||||||
auto domainItr = domainCacheMap.find(domainId);
|
auto domainItr = domainCacheMap.find(domainId);
|
||||||
if (domainItr == domainCacheMap.end()) {
|
if (domainItr == domainCacheMap.end()) {
|
||||||
|
@ -217,17 +315,19 @@ Reference<BlobCipherKey> BlobCipherKeyCache::getLatestCipherKey(const EncryptCip
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<BlobCipherKey> BlobCipherKeyCache::getCipherKey(const EncryptCipherDomainId& domainId,
|
Reference<BlobCipherKey> BlobCipherKeyCache::getCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
const EncryptCipherBaseKeyId& baseCipherId) {
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const EncryptCipherRandomSalt& salt) {
|
||||||
auto domainItr = domainCacheMap.find(domainId);
|
auto domainItr = domainCacheMap.find(domainId);
|
||||||
if (domainItr == domainCacheMap.end()) {
|
if (domainItr == domainCacheMap.end()) {
|
||||||
|
TraceEvent("GetCipherKey_MissingDomainId").detail("DomainId", domainId);
|
||||||
throw encrypt_key_not_found();
|
throw encrypt_key_not_found();
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<BlobCipherKeyIdCache> keyIdCache = domainItr->second;
|
Reference<BlobCipherKeyIdCache> keyIdCache = domainItr->second;
|
||||||
return keyIdCache->getCipherByBaseCipherId(baseCipherId);
|
return keyIdCache->getCipherByBaseCipherId(baseCipherId, salt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobCipherKeyCache::resetEncyrptDomainId(const EncryptCipherDomainId domainId) {
|
void BlobCipherKeyCache::resetEncryptDomainId(const EncryptCipherDomainId domainId) {
|
||||||
auto domainItr = domainCacheMap.find(domainId);
|
auto domainItr = domainCacheMap.find(domainId);
|
||||||
if (domainItr == domainCacheMap.end()) {
|
if (domainItr == domainCacheMap.end()) {
|
||||||
throw encrypt_key_not_found();
|
throw encrypt_key_not_found();
|
||||||
|
@ -291,8 +391,8 @@ Reference<EncryptBuf> EncryptBlobCipherAes265Ctr::encrypt(const uint8_t* plainte
|
||||||
|
|
||||||
memset(reinterpret_cast<uint8_t*>(header), 0, sizeof(BlobCipherEncryptHeader));
|
memset(reinterpret_cast<uint8_t*>(header), 0, sizeof(BlobCipherEncryptHeader));
|
||||||
|
|
||||||
// Alloc buffer computation accounts for 'header authentication' generation scheme. If single-auth-token needs to be
|
// Alloc buffer computation accounts for 'header authentication' generation scheme. If single-auth-token needs
|
||||||
// generated, allocate buffer sufficient to append header to the cipherText optimizing memcpy cost.
|
// to be generated, allocate buffer sufficient to append header to the cipherText optimizing memcpy cost.
|
||||||
|
|
||||||
const int allocSize = authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE
|
const int allocSize = authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE
|
||||||
? plaintextLen + AES_BLOCK_SIZE + sizeof(BlobCipherEncryptHeader)
|
? plaintextLen + AES_BLOCK_SIZE + sizeof(BlobCipherEncryptHeader)
|
||||||
|
@ -340,6 +440,7 @@ Reference<EncryptBuf> EncryptBlobCipherAes265Ctr::encrypt(const uint8_t* plainte
|
||||||
// Populate header encryption-key details
|
// Populate header encryption-key details
|
||||||
header->cipherHeaderDetails.encryptDomainId = headerCipherKey->getDomainId();
|
header->cipherHeaderDetails.encryptDomainId = headerCipherKey->getDomainId();
|
||||||
header->cipherHeaderDetails.baseCipherId = headerCipherKey->getBaseCipherId();
|
header->cipherHeaderDetails.baseCipherId = headerCipherKey->getBaseCipherId();
|
||||||
|
header->cipherHeaderDetails.salt = headerCipherKey->getSalt();
|
||||||
|
|
||||||
// Populate header authToken details
|
// Populate header authToken details
|
||||||
if (header->flags.authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE) {
|
if (header->flags.authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_SINGLE) {
|
||||||
|
@ -624,8 +725,8 @@ void forceLinkBlobCipherTests() {}
|
||||||
// 3. Inserting of 'identical' cipherKey (already cached) more than once works as desired.
|
// 3. Inserting of 'identical' cipherKey (already cached) more than once works as desired.
|
||||||
// 4. Inserting of 'non-identical' cipherKey (already cached) more than once works as desired.
|
// 4. Inserting of 'non-identical' cipherKey (already cached) more than once works as desired.
|
||||||
// 5. Validation encryption ops (correctness):
|
// 5. Validation encryption ops (correctness):
|
||||||
// 5.1. Encyrpt a buffer followed by decryption of the buffer, validate the contents.
|
// 5.1. Encrypt a buffer followed by decryption of the buffer, validate the contents.
|
||||||
// 5.2. Simulate anomalies such as: EncyrptionHeader corruption, authToken mismatch / encryptionMode mismatch etc.
|
// 5.2. Simulate anomalies such as: EncryptionHeader corruption, authToken mismatch / encryptionMode mismatch etc.
|
||||||
// 6. Cache cleanup
|
// 6. Cache cleanup
|
||||||
// 6.1 cleanup cipherKeys by given encryptDomainId
|
// 6.1 cleanup cipherKeys by given encryptDomainId
|
||||||
// 6.2. Cleanup all cached cipherKeys
|
// 6.2. Cleanup all cached cipherKeys
|
||||||
|
@ -639,6 +740,7 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
int len;
|
int len;
|
||||||
EncryptCipherBaseKeyId keyId;
|
EncryptCipherBaseKeyId keyId;
|
||||||
std::unique_ptr<uint8_t[]> key;
|
std::unique_ptr<uint8_t[]> key;
|
||||||
|
EncryptCipherRandomSalt generatedSalt;
|
||||||
|
|
||||||
BaseCipher(const EncryptCipherDomainId& dId, const EncryptCipherBaseKeyId& kId)
|
BaseCipher(const EncryptCipherDomainId& dId, const EncryptCipherBaseKeyId& kId)
|
||||||
: domainId(dId), len(deterministicRandom()->randomInt(AES_256_KEY_LENGTH / 2, AES_256_KEY_LENGTH + 1)),
|
: domainId(dId), len(deterministicRandom()->randomInt(AES_256_KEY_LENGTH / 2, AES_256_KEY_LENGTH + 1)),
|
||||||
|
@ -671,6 +773,8 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
|
|
||||||
cipherKeyCache->insertCipherKey(
|
cipherKeyCache->insertCipherKey(
|
||||||
baseCipher->domainId, baseCipher->keyId, baseCipher->key.get(), baseCipher->len);
|
baseCipher->domainId, baseCipher->keyId, baseCipher->key.get(), baseCipher->len);
|
||||||
|
Reference<BlobCipherKey> fetchedKey = cipherKeyCache->getLatestCipherKey(baseCipher->domainId);
|
||||||
|
baseCipher->generatedSalt = fetchedKey->getSalt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// insert EncryptHeader BlobCipher key
|
// insert EncryptHeader BlobCipher key
|
||||||
|
@ -684,7 +788,8 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
for (auto& domainItr : domainKeyMap) {
|
for (auto& domainItr : domainKeyMap) {
|
||||||
for (auto& baseKeyItr : domainItr.second) {
|
for (auto& baseKeyItr : domainItr.second) {
|
||||||
Reference<BaseCipher> baseCipher = baseKeyItr.second;
|
Reference<BaseCipher> baseCipher = baseKeyItr.second;
|
||||||
Reference<BlobCipherKey> cipherKey = cipherKeyCache->getCipherKey(baseCipher->domainId, baseCipher->keyId);
|
Reference<BlobCipherKey> cipherKey =
|
||||||
|
cipherKeyCache->getCipherKey(baseCipher->domainId, baseCipher->keyId, baseCipher->generatedSalt);
|
||||||
ASSERT(cipherKey.isValid());
|
ASSERT(cipherKey.isValid());
|
||||||
// validate common cipher properties - domainId, baseCipherId, baseCipherLen, rawBaseCipher
|
// validate common cipher properties - domainId, baseCipherId, baseCipherLen, rawBaseCipher
|
||||||
ASSERT_EQ(cipherKey->getBaseCipherId(), baseCipher->keyId);
|
ASSERT_EQ(cipherKey->getBaseCipherId(), baseCipher->keyId);
|
||||||
|
@ -759,7 +864,8 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
.detail("BaseCipherId", header.cipherTextDetails.baseCipherId);
|
.detail("BaseCipherId", header.cipherTextDetails.baseCipherId);
|
||||||
|
|
||||||
Reference<BlobCipherKey> tCipherKeyKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
Reference<BlobCipherKey> tCipherKeyKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
||||||
header.cipherTextDetails.baseCipherId);
|
header.cipherTextDetails.baseCipherId,
|
||||||
|
header.cipherTextDetails.salt);
|
||||||
ASSERT(tCipherKeyKey->isEqual(cipherKey));
|
ASSERT(tCipherKeyKey->isEqual(cipherKey));
|
||||||
DecryptBlobCipherAes256Ctr decryptor(
|
DecryptBlobCipherAes256Ctr decryptor(
|
||||||
tCipherKeyKey, Reference<BlobCipherKey>(), &header.cipherTextDetails.iv[0]);
|
tCipherKeyKey, Reference<BlobCipherKey>(), &header.cipherTextDetails.iv[0]);
|
||||||
|
@ -846,9 +952,11 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
StringRef(arena, &header.singleAuthToken.authToken[0], AUTH_TOKEN_SIZE).toString());
|
StringRef(arena, &header.singleAuthToken.authToken[0], AUTH_TOKEN_SIZE).toString());
|
||||||
|
|
||||||
Reference<BlobCipherKey> tCipherKeyKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
Reference<BlobCipherKey> tCipherKeyKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
||||||
header.cipherTextDetails.baseCipherId);
|
header.cipherTextDetails.baseCipherId,
|
||||||
|
header.cipherTextDetails.salt);
|
||||||
Reference<BlobCipherKey> hCipherKey = cipherKeyCache->getCipherKey(header.cipherHeaderDetails.encryptDomainId,
|
Reference<BlobCipherKey> hCipherKey = cipherKeyCache->getCipherKey(header.cipherHeaderDetails.encryptDomainId,
|
||||||
header.cipherHeaderDetails.baseCipherId);
|
header.cipherHeaderDetails.baseCipherId,
|
||||||
|
header.cipherHeaderDetails.salt);
|
||||||
ASSERT(tCipherKeyKey->isEqual(cipherKey));
|
ASSERT(tCipherKeyKey->isEqual(cipherKey));
|
||||||
DecryptBlobCipherAes256Ctr decryptor(tCipherKeyKey, hCipherKey, &header.cipherTextDetails.iv[0]);
|
DecryptBlobCipherAes256Ctr decryptor(tCipherKeyKey, hCipherKey, &header.cipherTextDetails.iv[0]);
|
||||||
Reference<EncryptBuf> decrypted = decryptor.decrypt(encrypted->begin(), bufLen, header, arena);
|
Reference<EncryptBuf> decrypted = decryptor.decrypt(encrypted->begin(), bufLen, header, arena);
|
||||||
|
@ -949,9 +1057,11 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
StringRef(arena, &header.singleAuthToken.authToken[0], AUTH_TOKEN_SIZE).toString());
|
StringRef(arena, &header.singleAuthToken.authToken[0], AUTH_TOKEN_SIZE).toString());
|
||||||
|
|
||||||
Reference<BlobCipherKey> tCipherKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
Reference<BlobCipherKey> tCipherKey = cipherKeyCache->getCipherKey(header.cipherTextDetails.encryptDomainId,
|
||||||
header.cipherTextDetails.baseCipherId);
|
header.cipherTextDetails.baseCipherId,
|
||||||
|
header.cipherTextDetails.salt);
|
||||||
Reference<BlobCipherKey> hCipherKey = cipherKeyCache->getCipherKey(header.cipherHeaderDetails.encryptDomainId,
|
Reference<BlobCipherKey> hCipherKey = cipherKeyCache->getCipherKey(header.cipherHeaderDetails.encryptDomainId,
|
||||||
header.cipherHeaderDetails.baseCipherId);
|
header.cipherHeaderDetails.baseCipherId,
|
||||||
|
header.cipherHeaderDetails.salt);
|
||||||
|
|
||||||
ASSERT(tCipherKey->isEqual(cipherKey));
|
ASSERT(tCipherKey->isEqual(cipherKey));
|
||||||
DecryptBlobCipherAes256Ctr decryptor(tCipherKey, hCipherKey, &header.cipherTextDetails.iv[0]);
|
DecryptBlobCipherAes256Ctr decryptor(tCipherKey, hCipherKey, &header.cipherTextDetails.iv[0]);
|
||||||
|
@ -1047,7 +1157,7 @@ TEST_CASE("flow/BlobCipher") {
|
||||||
|
|
||||||
// Validate dropping encyrptDomainId cached keys
|
// Validate dropping encyrptDomainId cached keys
|
||||||
const EncryptCipherDomainId candidate = deterministicRandom()->randomInt(minDomainId, maxDomainId);
|
const EncryptCipherDomainId candidate = deterministicRandom()->randomInt(minDomainId, maxDomainId);
|
||||||
cipherKeyCache->resetEncyrptDomainId(candidate);
|
cipherKeyCache->resetEncryptDomainId(candidate);
|
||||||
std::vector<Reference<BlobCipherKey>> cachedKeys = cipherKeyCache->getAllCiphers(candidate);
|
std::vector<Reference<BlobCipherKey>> cachedKeys = cipherKeyCache->getAllCiphers(candidate);
|
||||||
ASSERT(cachedKeys.empty());
|
ASSERT(cachedKeys.empty());
|
||||||
|
|
||||||
|
|
|
@ -82,11 +82,11 @@ private:
|
||||||
// This header is persisted along with encrypted buffer, it contains information necessary
|
// This header is persisted along with encrypted buffer, it contains information necessary
|
||||||
// to assist decrypting the buffers to serve read requests.
|
// to assist decrypting the buffers to serve read requests.
|
||||||
//
|
//
|
||||||
// The total space overhead is 96 bytes.
|
// The total space overhead is 104 bytes.
|
||||||
|
|
||||||
#pragma pack(push, 1) // exact fit - no padding
|
#pragma pack(push, 1) // exact fit - no padding
|
||||||
typedef struct BlobCipherEncryptHeader {
|
typedef struct BlobCipherEncryptHeader {
|
||||||
static constexpr int headerSize = 96;
|
static constexpr int headerSize = 104;
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
uint8_t size; // reading first byte is sufficient to determine header
|
uint8_t size; // reading first byte is sufficient to determine header
|
||||||
|
@ -101,7 +101,7 @@ typedef struct BlobCipherEncryptHeader {
|
||||||
|
|
||||||
// Cipher text encryption information
|
// Cipher text encryption information
|
||||||
struct {
|
struct {
|
||||||
// Encyrption domain boundary identifier.
|
// Encryption domain boundary identifier.
|
||||||
EncryptCipherDomainId encryptDomainId{};
|
EncryptCipherDomainId encryptDomainId{};
|
||||||
// BaseCipher encryption key identifier
|
// BaseCipher encryption key identifier
|
||||||
EncryptCipherBaseKeyId baseCipherId{};
|
EncryptCipherBaseKeyId baseCipherId{};
|
||||||
|
@ -116,6 +116,8 @@ typedef struct BlobCipherEncryptHeader {
|
||||||
EncryptCipherDomainId encryptDomainId{};
|
EncryptCipherDomainId encryptDomainId{};
|
||||||
// BaseCipher encryption key identifier.
|
// BaseCipher encryption key identifier.
|
||||||
EncryptCipherBaseKeyId baseCipherId{};
|
EncryptCipherBaseKeyId baseCipherId{};
|
||||||
|
// Random salt
|
||||||
|
EncryptCipherRandomSalt salt{};
|
||||||
} cipherHeaderDetails;
|
} cipherHeaderDetails;
|
||||||
|
|
||||||
// Encryption header is stored as plaintext on a persistent storage to assist reconstruction of cipher-key(s) for
|
// Encryption header is stored as plaintext on a persistent storage to assist reconstruction of cipher-key(s) for
|
||||||
|
@ -164,6 +166,11 @@ public:
|
||||||
const EncryptCipherBaseKeyId& baseCiphId,
|
const EncryptCipherBaseKeyId& baseCiphId,
|
||||||
const uint8_t* baseCiph,
|
const uint8_t* baseCiph,
|
||||||
int baseCiphLen);
|
int baseCiphLen);
|
||||||
|
BlobCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
|
const EncryptCipherBaseKeyId& baseCiphId,
|
||||||
|
const uint8_t* baseCiph,
|
||||||
|
int baseCiphLen,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
uint8_t* data() const { return cipher.get(); }
|
uint8_t* data() const { return cipher.get(); }
|
||||||
uint64_t getCreationTime() const { return creationTime; }
|
uint64_t getCreationTime() const { return creationTime; }
|
||||||
|
@ -206,7 +213,7 @@ private:
|
||||||
// This interface allows FDB processes participating in encryption to store and
|
// This interface allows FDB processes participating in encryption to store and
|
||||||
// index recently used encyption cipher keys. FDB encryption has two dimensions:
|
// index recently used encyption cipher keys. FDB encryption has two dimensions:
|
||||||
// 1. Mapping on cipher encryption keys per "encryption domains"
|
// 1. Mapping on cipher encryption keys per "encryption domains"
|
||||||
// 2. Per encryption domain, the cipher keys are index using "baseCipherKeyId".
|
// 2. Per encryption domain, the cipher keys are index using {baseCipherKeyId, salt} tuple.
|
||||||
//
|
//
|
||||||
// The design supports NIST recommendation of limiting lifetime of an encryption
|
// The design supports NIST recommendation of limiting lifetime of an encryption
|
||||||
// key. For details refer to:
|
// key. For details refer to:
|
||||||
|
@ -214,10 +221,10 @@ private:
|
||||||
//
|
//
|
||||||
// Below gives a pictoral representation of in-memory datastructure implemented
|
// Below gives a pictoral representation of in-memory datastructure implemented
|
||||||
// to index encryption keys:
|
// to index encryption keys:
|
||||||
// { encryptionDomain -> { baseCipherId -> cipherKey } }
|
// { encryptionDomain -> { {baseCipherId, salt} -> cipherKey } }
|
||||||
//
|
//
|
||||||
// Supported cache lookups schemes:
|
// Supported cache lookups schemes:
|
||||||
// 1. Lookup cipher based on { encryptionDomainId, baseCipherKeyId } tuple.
|
// 1. Lookup cipher based on { encryptionDomainId, baseCipherKeyId, salt } triplet.
|
||||||
// 2. Lookup latest cipher key for a given encryptionDomainId.
|
// 2. Lookup latest cipher key for a given encryptionDomainId.
|
||||||
//
|
//
|
||||||
// Client is responsible to handle cache-miss usecase, the corrective operation
|
// Client is responsible to handle cache-miss usecase, the corrective operation
|
||||||
|
@ -226,15 +233,29 @@ private:
|
||||||
// required encryption key, however, CPs/SSs cache-miss would result in RPC to
|
// required encryption key, however, CPs/SSs cache-miss would result in RPC to
|
||||||
// EncryptKeyServer to refresh the desired encryption key.
|
// EncryptKeyServer to refresh the desired encryption key.
|
||||||
|
|
||||||
using BlobCipherKeyIdCacheMap = std::unordered_map<EncryptCipherBaseKeyId, Reference<BlobCipherKey>>;
|
struct pair_hash {
|
||||||
|
template <class T1, class T2>
|
||||||
|
std::size_t operator()(const std::pair<T1, T2>& pair) const {
|
||||||
|
auto hash1 = std::hash<T1>{}(pair.first);
|
||||||
|
auto hash2 = std::hash<T2>{}(pair.second);
|
||||||
|
|
||||||
|
// Equal hashes XOR would be ZERO.
|
||||||
|
return hash1 == hash2 ? hash1 : hash1 ^ hash2;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
using BlobCipherKeyIdCacheKey = std::pair<EncryptCipherBaseKeyId, EncryptCipherRandomSalt>;
|
||||||
|
using BlobCipherKeyIdCacheMap = std::unordered_map<BlobCipherKeyIdCacheKey, Reference<BlobCipherKey>, pair_hash>;
|
||||||
using BlobCipherKeyIdCacheMapCItr =
|
using BlobCipherKeyIdCacheMapCItr =
|
||||||
std::unordered_map<EncryptCipherBaseKeyId, Reference<BlobCipherKey>>::const_iterator;
|
std::unordered_map<BlobCipherKeyIdCacheKey, Reference<BlobCipherKey>, pair_hash>::const_iterator;
|
||||||
|
|
||||||
struct BlobCipherKeyIdCache : ReferenceCounted<BlobCipherKeyIdCache> {
|
struct BlobCipherKeyIdCache : ReferenceCounted<BlobCipherKeyIdCache> {
|
||||||
public:
|
public:
|
||||||
BlobCipherKeyIdCache();
|
BlobCipherKeyIdCache();
|
||||||
explicit BlobCipherKeyIdCache(EncryptCipherDomainId dId);
|
explicit BlobCipherKeyIdCache(EncryptCipherDomainId dId);
|
||||||
|
|
||||||
|
BlobCipherKeyIdCacheKey getCacheKey(const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
// API returns the last inserted cipherKey.
|
// API returns the last inserted cipherKey.
|
||||||
// If none exists, 'encrypt_key_not_found' is thrown.
|
// If none exists, 'encrypt_key_not_found' is thrown.
|
||||||
|
|
||||||
|
@ -243,14 +264,33 @@ public:
|
||||||
// API returns cipherKey corresponding to input 'baseCipherKeyId'.
|
// API returns cipherKey corresponding to input 'baseCipherKeyId'.
|
||||||
// If none exists, 'encrypt_key_not_found' is thrown.
|
// If none exists, 'encrypt_key_not_found' is thrown.
|
||||||
|
|
||||||
Reference<BlobCipherKey> getCipherByBaseCipherId(EncryptCipherBaseKeyId baseCipherKeyId);
|
Reference<BlobCipherKey> getCipherByBaseCipherId(const EncryptCipherBaseKeyId& baseCipherKeyId,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
// API enables inserting base encryption cipher details to the BlobCipherKeyIdCache.
|
// API enables inserting base encryption cipher details to the BlobCipherKeyIdCache.
|
||||||
// Given cipherKeys are immutable, attempting to re-insert same 'identical' cipherKey
|
// Given cipherKeys are immutable, attempting to re-insert same 'identical' cipherKey
|
||||||
// is treated as a NOP (success), however, an attempt to update cipherKey would throw
|
// is treated as a NOP (success), however, an attempt to update cipherKey would throw
|
||||||
// 'encrypt_update_cipher' exception.
|
// 'encrypt_update_cipher' exception.
|
||||||
|
//
|
||||||
|
// API NOTE: Recommended usecase is to update encryption cipher-key is updated the external
|
||||||
|
// keyManagementSolution to limit an encryption key lifetime
|
||||||
|
|
||||||
void insertBaseCipherKey(EncryptCipherBaseKeyId baseCipherId, const uint8_t* baseCipher, int baseCipherLen);
|
void insertBaseCipherKey(const EncryptCipherBaseKeyId& baseCipherId, const uint8_t* baseCipher, int baseCipherLen);
|
||||||
|
|
||||||
|
// API enables inserting base encryption cipher details to the BlobCipherKeyIdCache
|
||||||
|
// Given cipherKeys are immutable, attempting to re-insert same 'identical' cipherKey
|
||||||
|
// is treated as a NOP (success), however, an attempt to update cipherKey would throw
|
||||||
|
// 'encrypt_update_cipher' exception.
|
||||||
|
//
|
||||||
|
// API NOTE: Recommended usecase is to update encryption cipher-key regeneration while performing
|
||||||
|
// decryption. The encryptionheader would contain relevant details including: 'encryptDomainId',
|
||||||
|
// 'baseCipherId' & 'salt'. The caller needs to fetch 'baseCipherKey' detail and re-populate KeyCache.
|
||||||
|
// Also, the invocation will NOT update the latest cipher-key details.
|
||||||
|
|
||||||
|
void insertBaseCipherKey(const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const uint8_t* baseCipher,
|
||||||
|
int baseCipherLen,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
// API cleanup the cache by dropping all cached cipherKeys
|
// API cleanup the cache by dropping all cached cipherKeys
|
||||||
void cleanup();
|
void cleanup();
|
||||||
|
@ -262,6 +302,7 @@ private:
|
||||||
EncryptCipherDomainId domainId;
|
EncryptCipherDomainId domainId;
|
||||||
BlobCipherKeyIdCacheMap keyIdCache;
|
BlobCipherKeyIdCacheMap keyIdCache;
|
||||||
EncryptCipherBaseKeyId latestBaseCipherKeyId;
|
EncryptCipherBaseKeyId latestBaseCipherKeyId;
|
||||||
|
EncryptCipherRandomSalt latestRandomSalt;
|
||||||
};
|
};
|
||||||
|
|
||||||
using BlobCipherDomainCacheMap = std::unordered_map<EncryptCipherDomainId, Reference<BlobCipherKeyIdCache>>;
|
using BlobCipherDomainCacheMap = std::unordered_map<EncryptCipherDomainId, Reference<BlobCipherKeyIdCache>>;
|
||||||
|
@ -277,12 +318,32 @@ public:
|
||||||
// The cipherKeys are indexed using 'baseCipherId', given cipherKeys are immutable,
|
// The cipherKeys are indexed using 'baseCipherId', given cipherKeys are immutable,
|
||||||
// attempting to re-insert same 'identical' cipherKey is treated as a NOP (success),
|
// attempting to re-insert same 'identical' cipherKey is treated as a NOP (success),
|
||||||
// however, an attempt to update cipherKey would throw 'encrypt_update_cipher' exception.
|
// however, an attempt to update cipherKey would throw 'encrypt_update_cipher' exception.
|
||||||
|
//
|
||||||
|
// API NOTE: Recommended usecase is to update encryption cipher-key is updated the external
|
||||||
|
// keyManagementSolution to limit an encryption key lifetime
|
||||||
|
|
||||||
void insertCipherKey(const EncryptCipherDomainId& domainId,
|
void insertCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
const EncryptCipherBaseKeyId& baseCipherId,
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
const uint8_t* baseCipher,
|
const uint8_t* baseCipher,
|
||||||
int baseCipherLen);
|
int baseCipherLen);
|
||||||
// API returns the last insert cipherKey for a given encyryption domain Id.
|
|
||||||
|
// Enable clients to insert base encryption cipher details to the BlobCipherKeyCache.
|
||||||
|
// The cipherKeys are indexed using 'baseCipherId', given cipherKeys are immutable,
|
||||||
|
// attempting to re-insert same 'identical' cipherKey is treated as a NOP (success),
|
||||||
|
// however, an attempt to update cipherKey would throw 'encrypt_update_cipher' exception.
|
||||||
|
//
|
||||||
|
// API NOTE: Recommended usecase is to update encryption cipher-key regeneration while performing
|
||||||
|
// decryption. The encryptionheader would contain relevant details including: 'encryptDomainId',
|
||||||
|
// 'baseCipherId' & 'salt'. The caller needs to fetch 'baseCipherKey' detail and re-populate KeyCache.
|
||||||
|
// Also, the invocation will NOT update the latest cipher-key details.
|
||||||
|
|
||||||
|
void insertCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const uint8_t* baseCipher,
|
||||||
|
int baseCipherLen,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
|
// API returns the last insert cipherKey for a given encryption domain Id.
|
||||||
// If none exists, it would throw 'encrypt_key_not_found' exception.
|
// If none exists, it would throw 'encrypt_key_not_found' exception.
|
||||||
|
|
||||||
Reference<BlobCipherKey> getLatestCipherKey(const EncryptCipherDomainId& domainId);
|
Reference<BlobCipherKey> getLatestCipherKey(const EncryptCipherDomainId& domainId);
|
||||||
|
@ -291,14 +352,16 @@ public:
|
||||||
// If none exists, it would throw 'encrypt_key_not_found' exception.
|
// If none exists, it would throw 'encrypt_key_not_found' exception.
|
||||||
|
|
||||||
Reference<BlobCipherKey> getCipherKey(const EncryptCipherDomainId& domainId,
|
Reference<BlobCipherKey> getCipherKey(const EncryptCipherDomainId& domainId,
|
||||||
const EncryptCipherBaseKeyId& baseCipherId);
|
const EncryptCipherBaseKeyId& baseCipherId,
|
||||||
|
const EncryptCipherRandomSalt& salt);
|
||||||
|
|
||||||
// API returns point in time list of all 'cached' cipherKeys for a given encryption domainId.
|
// API returns point in time list of all 'cached' cipherKeys for a given encryption domainId.
|
||||||
std::vector<Reference<BlobCipherKey>> getAllCiphers(const EncryptCipherDomainId& domainId);
|
std::vector<Reference<BlobCipherKey>> getAllCiphers(const EncryptCipherDomainId& domainId);
|
||||||
|
|
||||||
// API enables dropping all 'cached' cipherKeys for a given encryption domain Id.
|
// API enables dropping all 'cached' cipherKeys for a given encryption domain Id.
|
||||||
// Useful to cleanup cache if an encryption domain gets removed/destroyed etc.
|
// Useful to cleanup cache if an encryption domain gets removed/destroyed etc.
|
||||||
|
|
||||||
void resetEncyrptDomainId(const EncryptCipherDomainId domainId);
|
void resetEncryptDomainId(const EncryptCipherDomainId domainId);
|
||||||
|
|
||||||
static Reference<BlobCipherKeyCache> getInstance() {
|
static Reference<BlobCipherKeyCache> getInstance() {
|
||||||
if (g_network->isSimulated()) {
|
if (g_network->isSimulated()) {
|
||||||
|
@ -364,7 +427,7 @@ public:
|
||||||
const BlobCipherEncryptHeader& header,
|
const BlobCipherEncryptHeader& header,
|
||||||
Arena&);
|
Arena&);
|
||||||
|
|
||||||
// Enable caller to validate encryption header auth-token (if available) without needing to read the full encyrpted
|
// Enable caller to validate encryption header auth-token (if available) without needing to read the full encrypted
|
||||||
// payload. The call is NOP unless header.flags.authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_MULTI.
|
// payload. The call is NOP unless header.flags.authTokenMode == ENCRYPT_HEADER_AUTH_TOKEN_MODE_MULTI.
|
||||||
|
|
||||||
void verifyHeaderAuthToken(const BlobCipherEncryptHeader& header, Arena& arena);
|
void verifyHeaderAuthToken(const BlobCipherEncryptHeader& header, Arena& arena);
|
||||||
|
|
|
@ -1,23 +1,16 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -Eeuo pipefail
|
set -Eeuxo pipefail
|
||||||
|
|
||||||
namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
||||||
POD_NUM=$(echo $POD_NAME | cut -d - -f3)
|
POD_NUM=$(echo $POD_NAME | cut -d - -f3)
|
||||||
KEY="ycsb_load_${POD_NUM}_of_${NUM_PODS}_complete"
|
KEY="ycsb_load_${POD_NUM}_of_${NUM_PODS}_complete"
|
||||||
CLI=$(ls /var/dynamic-conf/bin/*/fdbcli | head -n1)
|
CLI=$(ls /var/dynamic-conf/bin/*/fdbcli | head -n1)
|
||||||
if [ ${MODE} != "load" ]; then
|
|
||||||
echo "WAITING FOR ALL PODS TO COME UP"
|
echo "WAITING FOR ALL PODS TO COME UP"
|
||||||
while [[ $(kubectl get pods -n ${namespace} -l name=ycsb,run=${RUN_ID} --field-selector=status.phase=Running | grep -cv NAME) -lt ${NUM_PODS} ]]; do
|
while [[ $(kubectl get pods -n ${namespace} -l name=ycsb,run=${RUN_ID} --field-selector=status.phase=Running | grep -cv NAME) -lt ${NUM_PODS} ]]; do
|
||||||
sleep 0.1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "ALL PODS ARE UP"
|
echo "ALL PODS ARE UP"
|
||||||
else
|
|
||||||
if ${CLI} --exec "get ${KEY}" | grep is ;
|
|
||||||
then
|
|
||||||
# load already completed
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi;
|
|
||||||
|
|
||||||
echo "RUNNING YCSB"
|
echo "RUNNING YCSB"
|
||||||
./bin/ycsb.sh ${MODE} foundationdb -s -P workloads/${WORKLOAD} ${YCSB_ARGS}
|
./bin/ycsb.sh ${MODE} foundationdb -s -P workloads/${WORKLOAD} ${YCSB_ARGS}
|
||||||
|
@ -27,7 +20,3 @@ echo "COPYING HISTOGRAMS TO S3"
|
||||||
aws s3 sync --sse aws:kms --exclude "*" --include "histogram.*" /tmp s3://${BUCKET}/ycsb_histograms/${namespace}/${POD_NAME}
|
aws s3 sync --sse aws:kms --exclude "*" --include "histogram.*" /tmp s3://${BUCKET}/ycsb_histograms/${namespace}/${POD_NAME}
|
||||||
echo "COPYING HISTOGRAMS TO S3 FINISHED"
|
echo "COPYING HISTOGRAMS TO S3 FINISHED"
|
||||||
|
|
||||||
if [ ${MODE} == "load" ]; then
|
|
||||||
${CLI} --exec "writemode on; set ${KEY} 1"
|
|
||||||
echo "WROTE LOAD COMPLETION KEY"
|
|
||||||
fi
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ from local_cluster import LocalCluster, random_secret_string
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_PLATFORMS = ["x86_64"]
|
SUPPORTED_PLATFORMS = ["x86_64"]
|
||||||
SUPPORTED_VERSIONS = ["7.2.0", "7.1.0", "7.0.0", "6.3.24", "6.3.23",
|
SUPPORTED_VERSIONS = ["7.2.0", "7.1.1", "7.1.0", "7.0.0", "6.3.24", "6.3.23",
|
||||||
"6.3.22", "6.3.18", "6.3.17", "6.3.16", "6.3.15", "6.3.13", "6.3.12", "6.3.9", "6.2.30",
|
"6.3.22", "6.3.18", "6.3.17", "6.3.16", "6.3.15", "6.3.13", "6.3.12", "6.3.9", "6.2.30",
|
||||||
"6.2.29", "6.2.28", "6.2.27", "6.2.26", "6.2.25", "6.2.24", "6.2.23", "6.2.22", "6.2.21",
|
"6.2.29", "6.2.28", "6.2.27", "6.2.26", "6.2.25", "6.2.24", "6.2.23", "6.2.22", "6.2.21",
|
||||||
"6.2.20", "6.2.19", "6.2.18", "6.2.17", "6.2.16", "6.2.15", "6.2.10", "6.1.13", "6.1.12",
|
"6.2.20", "6.2.19", "6.2.18", "6.2.17", "6.2.16", "6.2.15", "6.2.10", "6.1.13", "6.1.12",
|
||||||
|
@ -353,6 +353,17 @@ class UpgradeTest:
|
||||||
test_retcode = self.tester_retcode
|
test_retcode = self.tester_retcode
|
||||||
return test_retcode
|
return test_retcode
|
||||||
|
|
||||||
|
def grep_logs_for_events(self, severity):
|
||||||
|
return (
|
||||||
|
subprocess.getoutput(
|
||||||
|
"grep -r 'Severity=\"{}\"' {}".format(
|
||||||
|
severity,
|
||||||
|
self.cluster.log.as_posix())
|
||||||
|
)
|
||||||
|
.rstrip()
|
||||||
|
.splitlines()
|
||||||
|
)
|
||||||
|
|
||||||
# Check the cluster log for errors
|
# Check the cluster log for errors
|
||||||
def check_cluster_logs(self, error_limit=100):
|
def check_cluster_logs(self, error_limit=100):
|
||||||
sev40s = (
|
sev40s = (
|
||||||
|
@ -380,9 +391,28 @@ class UpgradeTest:
|
||||||
print(
|
print(
|
||||||
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails", err_cnt)
|
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails", err_cnt)
|
||||||
else:
|
else:
|
||||||
print("No error found in logs")
|
print("No errors found in logs")
|
||||||
return err_cnt == 0
|
return err_cnt == 0
|
||||||
|
|
||||||
|
# Check the server and client logs for warnings and dump them
|
||||||
|
def dump_warnings_in_logs(self, limit=100):
|
||||||
|
sev30s = (
|
||||||
|
subprocess.getoutput(
|
||||||
|
"grep -r 'Severity=\"30\"' {}".format(
|
||||||
|
self.cluster.log.as_posix())
|
||||||
|
)
|
||||||
|
.rstrip()
|
||||||
|
.splitlines()
|
||||||
|
)
|
||||||
|
|
||||||
|
if (len(sev30s) == 0):
|
||||||
|
print("No warnings found in logs")
|
||||||
|
else:
|
||||||
|
print(">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
|
||||||
|
len(sev30s)))
|
||||||
|
for line in sev30s[:limit]:
|
||||||
|
print(line)
|
||||||
|
|
||||||
# Dump the last cluster configuration and cluster logs
|
# Dump the last cluster configuration and cluster logs
|
||||||
def dump_cluster_logs(self):
|
def dump_cluster_logs(self):
|
||||||
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
|
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
|
||||||
|
@ -457,6 +487,7 @@ if __name__ == "__main__":
|
||||||
errcode = test.exec_test(args)
|
errcode = test.exec_test(args)
|
||||||
if not test.check_cluster_logs():
|
if not test.check_cluster_logs():
|
||||||
errcode = 1 if errcode == 0 else errcode
|
errcode = 1 if errcode == 0 else errcode
|
||||||
|
test.dump_warnings_in_logs()
|
||||||
if errcode != 0 and not args.disable_log_dump:
|
if errcode != 0 and not args.disable_log_dump:
|
||||||
test.dump_cluster_logs()
|
test.dump_cluster_logs()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue