Add the ability to access name and description in Error. Update error descriptions.
This commit is contained in:
parent
91281ec754
commit
d30c730f75
|
@ -131,7 +131,7 @@ namespace FDB {
|
|||
}
|
||||
else {
|
||||
if(versionBytes.get().size() != 12) {
|
||||
throw invalid_database_value();
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
if(((uint32_t*)versionBytes.get().begin())[0] > DirectoryLayer::VERSION[0]) {
|
||||
throw incompatible_directory_version();
|
||||
|
|
|
@ -51,7 +51,7 @@ namespace FDB {
|
|||
int64_t count = 0;
|
||||
if(countValue.present()) {
|
||||
if(countValue.get().size() != 8) {
|
||||
throw invalid_database_value();
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
count = *(int64_t*)countValue.get().begin();
|
||||
}
|
||||
|
|
|
@ -1585,7 +1585,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
|
|||
}
|
||||
catch (Error& e) {
|
||||
if(LOG_ERRORS) {
|
||||
printf("Error: %s (%d)\n", e.what(), e.code());
|
||||
printf("Error: %s (%d)\n", e.name(), e.code());
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
|
@ -1673,7 +1673,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
|
|||
catch(Error &e) {
|
||||
TraceEvent("ErrorRunningTest").error(e);
|
||||
if(LOG_ERRORS) {
|
||||
printf("Flow tester encountered error: %s\n", e.what());
|
||||
printf("Flow tester encountered error: %s\n", e.name());
|
||||
fflush(stdout);
|
||||
}
|
||||
flushAndExit(1);
|
||||
|
@ -1715,7 +1715,7 @@ ACTOR void _test_versionstamp() {
|
|||
catch (Error &e) {
|
||||
TraceEvent("ErrorRunningTest").error(e);
|
||||
if (LOG_ERRORS) {
|
||||
printf("Flow tester encountered error: %s\n", e.what());
|
||||
printf("Flow tester encountered error: %s\n", e.name());
|
||||
fflush(stdout);
|
||||
}
|
||||
flushAndExit(1);
|
||||
|
@ -1756,7 +1756,7 @@ int main( int argc, char** argv ) {
|
|||
flushAndExit(FDB_EXIT_SUCCESS);
|
||||
}
|
||||
catch (Error& e) {
|
||||
fprintf(stderr, "Error: %s\n", e.what());
|
||||
fprintf(stderr, "Error: %s\n", e.name());
|
||||
TraceEvent(SevError, "MainError").error(e);
|
||||
flushAndExit(FDB_EXIT_MAIN_ERROR);
|
||||
}
|
||||
|
|
|
@ -212,7 +212,7 @@ public:
|
|||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
TraceEvent(SevWarn, "FileRestoreError").error(e).detail("RestoreUID", u).detail("Description", details).detail("TaskInstance", (uint64_t)taskInstance);
|
||||
std::string msg = format("ERROR: %s %s", e.what(), details.c_str());
|
||||
std::string msg = format("ERROR: %s (%s)", details.c_str(), e.what());
|
||||
RestoreConfig restore(u);
|
||||
restore.lastError().set(tr, {StringRef(msg), (int64_t)now()});
|
||||
return Void();
|
||||
|
@ -788,7 +788,7 @@ namespace fileBackup {
|
|||
}
|
||||
catch (Error &e) {
|
||||
state Error err = e;
|
||||
Void _ = wait(logError(cx, keyErrors, format("ERROR: Failed to open file `%s' because of error %s", fileName.c_str(), err.what())));
|
||||
Void _ = wait(logError(cx, keyErrors, format("ERROR: Failed to open file `%s' because of error: %s", fileName.c_str(), err.what())));
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
@ -820,7 +820,7 @@ namespace fileBackup {
|
|||
throw;
|
||||
|
||||
state Error err = e;
|
||||
Void _ = wait(logError(cx, keyErrors, format("ERROR: Failed to write to file `%s' in container '%s' because of error %s", fileName.c_str(), backupContainer.c_str(), err.what())));
|
||||
Void _ = wait(logError(cx, keyErrors, format("ERROR: Failed to write to file `%s' in container '%s' because of error: %s", fileName.c_str(), backupContainer.c_str(), err.what())));
|
||||
throw err;
|
||||
}
|
||||
|
||||
|
@ -922,7 +922,7 @@ namespace fileBackup {
|
|||
throw;
|
||||
|
||||
state Error err = e;
|
||||
Void _ = wait(logError(tr->getDatabase(), keyErrors, format("ERROR: Failed to write to file `%s' because of error %s", filename.c_str(), err.what())));
|
||||
Void _ = wait(logError(tr->getDatabase(), keyErrors, format("ERROR: Failed to write to file `%s' because of error: %s", filename.c_str(), err.what())));
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
@ -1158,7 +1158,7 @@ namespace fileBackup {
|
|||
throw e2;
|
||||
}
|
||||
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error %s", outFileName.c_str(), e2.what())));
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error: %s", outFileName.c_str(), e2.what())));
|
||||
throw e2;
|
||||
}
|
||||
}
|
||||
|
@ -1166,7 +1166,7 @@ namespace fileBackup {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error %s", outFileName.c_str(), err.what())));
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error: %s", outFileName.c_str(), err.what())));
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
@ -1311,7 +1311,7 @@ namespace fileBackup {
|
|||
}
|
||||
|
||||
state Error err = e;
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error %s", fileName.c_str(), err.what())));
|
||||
Void _ = wait(logError(cx, task->params[FileBackupAgent::keyErrors], format("ERROR: Failed to write to file `%s' because of error: %s", fileName.c_str(), err.what())));
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
@ -3373,7 +3373,7 @@ public:
|
|||
try {
|
||||
Void _ = wait(timeoutError(bc->create(), 30));
|
||||
} catch(Error &e) {
|
||||
fprintf(stderr, "ERROR: Could not create backup container: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Could not create backup container: %s\n", e.what());
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
|
|
|
@ -1470,7 +1470,7 @@ THREAD_FUNC setAbort(void *arg) {
|
|||
((ThreadSingleAssignmentVar<Void>*)arg)->delref();
|
||||
}
|
||||
catch(Error &e) {
|
||||
printf("Caught error in setAbort: %s\n", e.what());
|
||||
printf("Caught error in setAbort: %s\n", e.name());
|
||||
ASSERT(false);
|
||||
}
|
||||
THREAD_RETURN;
|
||||
|
@ -1489,7 +1489,7 @@ THREAD_FUNC releaseMem(void *arg) {
|
|||
((ThreadSingleAssignmentVar<int>*)arg)->releaseMemory();
|
||||
}
|
||||
catch(Error &e) {
|
||||
printf("Caught error in releaseMem: %s\n", e.what());
|
||||
printf("Caught error in releaseMem: %s\n", e.name());
|
||||
ASSERT(false);
|
||||
}
|
||||
THREAD_RETURN;
|
||||
|
@ -1501,7 +1501,7 @@ THREAD_FUNC destroy(void *arg) {
|
|||
((ThreadSingleAssignmentVar<int>*)arg)->cancel();
|
||||
}
|
||||
catch(Error &e) {
|
||||
printf("Caught error in destroy: %s\n", e.what());
|
||||
printf("Caught error in destroy: %s\n", e.name());
|
||||
ASSERT(false);
|
||||
}
|
||||
THREAD_RETURN;
|
||||
|
@ -1514,7 +1514,7 @@ THREAD_FUNC cancel(void *arg) {
|
|||
destroy(arg);
|
||||
}
|
||||
catch(Error &e) {
|
||||
printf("Caught error in cancel: %s\n", e.what());
|
||||
printf("Caught error in cancel: %s\n", e.name());
|
||||
ASSERT(false);
|
||||
}
|
||||
THREAD_RETURN;
|
||||
|
@ -1608,7 +1608,7 @@ THREAD_FUNC runSingleAssignmentVarTest(void *arg) {
|
|||
}, NULL);
|
||||
}
|
||||
catch(Error &e) {
|
||||
printf("Caught error in test: %s\n", e.what());
|
||||
printf("Caught error in test: %s\n", e.name());
|
||||
*done = true;
|
||||
ASSERT(false);
|
||||
}
|
||||
|
|
|
@ -334,7 +334,7 @@ namespace HTTP {
|
|||
} catch(Error &e) {
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
printf("[%s] HTTP *ERROR*=%s, time=%fs %s %s [%u out]\n", conn->getDebugID().toString().c_str(), e.what(), elapsed, verb.c_str(), resource.c_str(), (int)total_sent);
|
||||
printf("[%s] HTTP *ERROR*=%s, time=%fs %s %s [%u out]\n", conn->getDebugID().toString().c_str(), e.name(), elapsed, verb.c_str(), resource.c_str(), (int)total_sent);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -786,8 +786,10 @@ ACTOR Future<Void> commitBatch(
|
|||
// Send replies to clients
|
||||
for (int t = 0; t < trs.size(); t++)
|
||||
{
|
||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware))
|
||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware)) {
|
||||
ASSERT_WE_THINK(commitVersion != invalidVersion);
|
||||
trs[t].reply.send(CommitID(commitVersion, t));
|
||||
}
|
||||
else if (committed[t] == ConflictBatch::TransactionTooOld)
|
||||
trs[t].reply.sendError(past_version());
|
||||
else
|
||||
|
|
|
@ -77,7 +77,7 @@ ACTOR Future<Void> start(Database cx, ApiWorkload *self) {
|
|||
}
|
||||
catch(Error &e) {
|
||||
if(e.code() != error_code_actor_cancelled)
|
||||
self->testFailure(format("Unhandled error %d: %s", e.code(), e.what()));
|
||||
self->testFailure(format("Unhandled error %d: %s", e.code(), e.name()));
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
|
|
@ -271,7 +271,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
if(e.code() == error_code_past_version || e.code() == error_code_future_version || e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed || e.code() == error_code_server_request_queue_full)
|
||||
TraceEvent("ConsistencyCheck_Retry").error(e); // FIXME: consistency check does not retry in this case
|
||||
else
|
||||
self->testFailure(format("Error %d - %s", e.code(), e.what()));
|
||||
self->testFailure(format("Error %d - %s", e.code(), e.name()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ struct PerformanceWorkload : TestWorkload {
|
|||
DistributedTestResults r = wait( runWorkload( cx, self->testers, self->dbName, spec ) );
|
||||
results = r;
|
||||
} catch(Error& e) {
|
||||
TraceEvent("PerformanceRunError").detail("Error", e.what()).detail("Workload", printable(self->probeWorkload));
|
||||
TraceEvent("PerformanceRunError").detail("Workload", printable(self->probeWorkload)).error(e, true);
|
||||
break;
|
||||
}
|
||||
PerfMetric tpsMetric = self->getNamedMetric( "Transactions/sec", results.metrics );
|
||||
|
|
|
@ -204,7 +204,7 @@ struct TaskBucketCorrectnessWorkload : TestWorkload {
|
|||
}
|
||||
catch (Error &e) {
|
||||
if (e.code() == error_code_timed_out)
|
||||
TraceEvent(SevWarn, "TaskBucketCorrectness").detail("error_code_timed_out", e.what());
|
||||
TraceEvent(SevWarn, "TaskBucketCorrectness").error(e);
|
||||
else
|
||||
Void _ = wait(tr->onError(e));
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ struct TaskBucketCorrectnessWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
catch (Error &e) {
|
||||
TraceEvent(SevError, "TaskBucketCorrectness").detail("error_code", e.code()).detail("error", e.what());
|
||||
TraceEvent(SevError, "TaskBucketCorrectness").error(e);
|
||||
Void _ = wait(tr->onError(e));
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ struct ThreadSafetyWorkload : TestWorkload {
|
|||
}
|
||||
catch(Error &e) {
|
||||
self->success = false;
|
||||
printf("Thread %d.%d failed: %s\n", self->clientId, i, e.what());
|
||||
printf("Thread %d.%d failed: %s\n", self->clientId, i, e.name());
|
||||
TraceEvent(SevError, "ThreadSafety_ThreadFailed").error(e);
|
||||
}
|
||||
|
||||
|
|
|
@ -81,11 +81,18 @@ ErrorCodeTable& Error::errorCodeTable() {
|
|||
return table;
|
||||
}
|
||||
|
||||
const char* Error::name() const {
|
||||
auto table = errorCodeTable();
|
||||
auto it = table.find(error_code);
|
||||
if (it == table.end()) return "UNKNOWN_ERROR";
|
||||
return it->second.first;
|
||||
}
|
||||
|
||||
const char* Error::what() const {
|
||||
auto table = errorCodeTable();
|
||||
auto it = table.find(error_code);
|
||||
if (it == table.end()) return "UNKNOWN_ERROR";
|
||||
return it->second;
|
||||
return it->second.second;
|
||||
}
|
||||
|
||||
void Error::init() {
|
||||
|
@ -99,12 +106,12 @@ Error Error::asInjectedFault() const {
|
|||
}
|
||||
|
||||
ErrorCodeTable::ErrorCodeTable() {
|
||||
#define ERROR(name, number, comment) (*this)[number] = #name; enum { Duplicate_Error_Code_##number = 0 };
|
||||
#define ERROR(name, number, description) addCode(number, #name, description); enum { Duplicate_Error_Code_##number = 0 };
|
||||
#include "error_definitions.h"
|
||||
}
|
||||
|
||||
void ErrorCodeTable::addCode(int code, const char* message) {
|
||||
(*this)[code] = message;
|
||||
void ErrorCodeTable::addCode(int code, const char *name, const char *description) {
|
||||
(*this)[code] = std::make_pair(name, description);
|
||||
}
|
||||
|
||||
bool isAssertDisabled(int line) {
|
||||
|
|
|
@ -32,15 +32,16 @@
|
|||
|
||||
enum { invalid_error_code = 0xffff };
|
||||
|
||||
class ErrorCodeTable : public std::map<int, const char*> {
|
||||
class ErrorCodeTable : public std::map<int, std::pair<const char*, const char*>> {
|
||||
public:
|
||||
ErrorCodeTable();
|
||||
void addCode(int code, const char* message);
|
||||
void addCode(int code, const char *name, const char *description);
|
||||
};
|
||||
|
||||
class Error {
|
||||
public:
|
||||
int code() const { return error_code; }
|
||||
const char* name() const;
|
||||
const char* what() const;
|
||||
bool isInjectedFault() const { return flags & FLAG_INJECTED_FAULT; } // Use as little as possible, so injected faults effectively test real faults!
|
||||
bool isValid() const { return error_code != invalid_error_code; }
|
||||
|
@ -68,7 +69,7 @@ private:
|
|||
};
|
||||
|
||||
#undef ERROR
|
||||
#define ERROR(name, number, comment) inline Error name() { return Error( number ); }; enum { error_code_##name = number };
|
||||
#define ERROR(name, number, description) inline Error name() { return Error( number ); }; enum { error_code_##name = number };
|
||||
#include "error_definitions.h"
|
||||
|
||||
//actor_cancelled has been renamed
|
||||
|
|
|
@ -690,7 +690,8 @@ TraceEvent& TraceEvent::error(class Error const& error, bool includeCancelled) {
|
|||
} else {
|
||||
if (error.isInjectedFault())
|
||||
detail("ErrorIsInjectedFault", true);
|
||||
detail("Error", error.what());
|
||||
detail("Error", error.name());
|
||||
detail("ErrorDescription", error.what());
|
||||
detail("ErrorCode", error.code());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,37 +35,37 @@ ERROR( end_of_stream, 1, "End of stream" )
|
|||
ERROR( operation_failed, 1000, "Operation failed")
|
||||
ERROR( wrong_shard_server, 1001, "Shard is not available from this server")
|
||||
ERROR( timed_out, 1004, "Operation timed out" )
|
||||
ERROR( coordinated_state_conflict, 1005, "Conflict changing coordination information" )
|
||||
ERROR( coordinated_state_conflict, 1005, "Conflict occurred while changing coordination information" )
|
||||
ERROR( all_alternatives_failed, 1006, "All alternatives failed" )
|
||||
ERROR( past_version, 1007, "Version no longer available" )
|
||||
ERROR( past_version, 1007, "Past version no longer available" )
|
||||
ERROR( no_more_servers, 1008, "Not enough physical servers available" )
|
||||
ERROR( future_version, 1009, "Request for future version" )
|
||||
ERROR( movekeys_conflict, 1010, "Conflicting attempts to change data distribution" )
|
||||
ERROR( tlog_stopped, 1011, "TLog stopped" )
|
||||
ERROR( server_request_queue_full, 1012, "The server request queue is full" )
|
||||
ERROR( not_committed, 1020, "Transaction not committed" )
|
||||
ERROR( server_request_queue_full, 1012, "Server request queue is full" )
|
||||
ERROR( not_committed, 1020, "Transaction not committed due to conflict with another transaction" )
|
||||
ERROR( commit_unknown_result, 1021, "Transaction may or may not have committed" )
|
||||
ERROR( transaction_cancelled, 1025, "Operation aborted because the transaction was cancelled" )
|
||||
ERROR( connection_failed, 1026, "Network connection failed" )
|
||||
ERROR( coordinators_changed, 1027, "The coordination servers have changed" )
|
||||
ERROR( new_coordinators_timed_out, 1028, "The new coordination servers did not respond in a timely way" )
|
||||
ERROR( watch_cancelled, 1029, "Storage server is monitoring too many watches" )
|
||||
ERROR( request_maybe_delivered, 1030, "The request may or may not have been delivered" )
|
||||
ERROR( coordinators_changed, 1027, "Coordination servers have changed" )
|
||||
ERROR( new_coordinators_timed_out, 1028, "New coordination servers did not respond in a timely way" )
|
||||
ERROR( watch_cancelled, 1029, "Watch cancelled because storage server watch limit exceeded" )
|
||||
ERROR( request_maybe_delivered, 1030, "Request may or may not have been delivered" )
|
||||
ERROR( transaction_timed_out, 1031, "Operation aborted because the transaction timed out" )
|
||||
ERROR( too_many_watches, 1032, "Too many watches currently set" )
|
||||
ERROR( locality_information_unavailable, 1033, "Locality information is not available" )
|
||||
ERROR( watches_disabled, 1034, "Disabling read your writes also disables watches" )
|
||||
ERROR( default_error_or, 1035, "The error code for a default constructed error or" )
|
||||
ERROR( locality_information_unavailable, 1033, "Locality information not available" )
|
||||
ERROR( watches_disabled, 1034, "Watches cannot be set if read your writes is disabled" )
|
||||
ERROR( default_error_or, 1035, "Default error for an ErrorOr object" )
|
||||
ERROR( accessed_unreadable, 1036, "Read or wrote an unreadable key" )
|
||||
ERROR( process_behind, 1037, "The process is behind" )
|
||||
ERROR( database_locked, 1038, "The database is locked" )
|
||||
ERROR( cluster_version_changed, 1039, "The cluster has been upgraded to a new protocol version" )
|
||||
ERROR( external_client_already_loaded, 1040, "The external client has already been loaded" )
|
||||
ERROR( process_behind, 1037, "Storage process does not have recent mutations" )
|
||||
ERROR( database_locked, 1038, "Database is locked" )
|
||||
ERROR( cluster_version_changed, 1039, "Cluster has been upgraded to a new protocol version" )
|
||||
ERROR( external_client_already_loaded, 1040, "External client has already been loaded" )
|
||||
|
||||
ERROR( broken_promise, 1100, "Broken promise" )
|
||||
ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" )
|
||||
ERROR( future_released, 1102, "The future has been released" )
|
||||
ERROR( connection_leaked, 1103, "A connection object has been leaked" )
|
||||
ERROR( future_released, 1102, "Future has been released" )
|
||||
ERROR( connection_leaked, 1103, "Connection object leaked" )
|
||||
|
||||
ERROR( recruitment_failed, 1200, "Recruitment of a server failed" ) // Be careful, catching this will delete the data of a storage server or tlog permanently
|
||||
ERROR( move_to_removed_server, 1201, "Attempt to move keys to a storage server that was removed" )
|
||||
|
@ -80,53 +80,53 @@ ERROR( master_proxy_failed, 1209, "Master terminating because a Proxy failed" )
|
|||
ERROR( master_resolver_failed, 1210, "Master terminating because a Resolver failed" )
|
||||
|
||||
// 15xx Platform errors
|
||||
ERROR( platform_error, 1500, "A platform error occurred" )
|
||||
ERROR( platform_error, 1500, "Platform error" )
|
||||
ERROR( large_alloc_failed, 1501, "Large block allocation failed" )
|
||||
ERROR( performance_counter_error, 1502, "QueryPerformanceCounter doesn't work" )
|
||||
ERROR( performance_counter_error, 1502, "QueryPerformanceCounter error" )
|
||||
|
||||
ERROR( io_error, 1510, "A disk i/o operation failed" )
|
||||
ERROR( io_error, 1510, "Disk i/o operation failed" )
|
||||
ERROR( file_not_found, 1511, "File not found" )
|
||||
ERROR( bind_failed, 1512, "Unable to bind to network" )
|
||||
ERROR( file_not_readable, 1513, "File could not be read from" )
|
||||
ERROR( file_not_writable, 1514, "File could not be written to" )
|
||||
ERROR( no_cluster_file_found, 1515, "No cluster file found in current directory or default location" )
|
||||
ERROR( file_too_large, 1516, "File too large to be read" )
|
||||
ERROR( non_sequential_op, 1517, "Non sequential file operation not allowed." )
|
||||
ERROR( http_bad_response, 1518, "HTTP response was not valid." )
|
||||
ERROR( http_not_accepted, 1519, "HTTP request not accepted." )
|
||||
ERROR( checksum_failed, 1520, "A data checksum failed." )
|
||||
ERROR( io_timeout, 1521, "A disk IO operation failed to complete in a timely manner." )
|
||||
ERROR( file_corrupt, 1522, "A structurally corrupt data file was detected." )
|
||||
ERROR( non_sequential_op, 1517, "Non sequential file operation not allowed" )
|
||||
ERROR( http_bad_response, 1518, "HTTP response was not valid" )
|
||||
ERROR( http_not_accepted, 1519, "HTTP request not accepted" )
|
||||
ERROR( checksum_failed, 1520, "Data does not match checksum" )
|
||||
ERROR( io_timeout, 1521, "Disk i/o operation failed to complete in a timely manner" )
|
||||
ERROR( file_corrupt, 1522, "Structurally corrupt data file detected" )
|
||||
|
||||
// 2xxx Attempt (presumably by a _client_) to do something illegal. If an error is known to
|
||||
// be internally caused, it should be 41xx
|
||||
ERROR( client_invalid_operation, 2000, "The client made an invalid API call" )
|
||||
ERROR( client_invalid_operation, 2000, "Invalid API call" )
|
||||
ERROR( commit_read_incomplete, 2002, "Commit with incomplete read" )
|
||||
ERROR( test_specification_invalid, 2003, "The test specification is invalid" )
|
||||
ERROR( key_outside_legal_range, 2004, "The specified key was outside the legal range" )
|
||||
ERROR( inverted_range, 2005, "The specified range has a begin key larger than the end key" )
|
||||
ERROR( invalid_option_value, 2006, "An invalid value was passed with the specified option" )
|
||||
ERROR( test_specification_invalid, 2003, "Invalid test specification" )
|
||||
ERROR( key_outside_legal_range, 2004, "Key outside legal range" )
|
||||
ERROR( inverted_range, 2005, "Range begin key larger than end key" )
|
||||
ERROR( invalid_option_value, 2006, "Option set with an invalid value" )
|
||||
ERROR( invalid_option, 2007, "Option not valid in this context" )
|
||||
ERROR( network_not_setup, 2008, "Action not possible before the network is configured" )
|
||||
ERROR( network_already_setup, 2009, "Network can be configured only once" )
|
||||
ERROR( read_version_already_set, 2010, "Transaction already has a read version set" )
|
||||
ERROR( version_invalid, 2011, "Version not valid" )
|
||||
ERROR( range_limits_invalid, 2012, "getRange limits not valid" )
|
||||
ERROR( invalid_database_name, 2013, "Database name not supported in this version" )
|
||||
ERROR( range_limits_invalid, 2012, "Range limits not valid" )
|
||||
ERROR( invalid_database_name, 2013, "Database name must be 'DB'" )
|
||||
ERROR( attribute_not_found, 2014, "Attribute not found in string" )
|
||||
ERROR( future_not_set, 2015, "The future has not been set" )
|
||||
ERROR( future_not_error, 2016, "The future is not an error" )
|
||||
ERROR( used_during_commit, 2017, "An operation was issued while a commit was outstanding" )
|
||||
ERROR( invalid_mutation_type, 2018, "An invalid atomic mutation type was issued" )
|
||||
ERROR( attribute_too_large, 2019, "The attribute is larger than an int" )
|
||||
ERROR( future_not_set, 2015, "Future not ready" )
|
||||
ERROR( future_not_error, 2016, "Future not an error" )
|
||||
ERROR( used_during_commit, 2017, "Operation issued while a commit was outstanding" )
|
||||
ERROR( invalid_mutation_type, 2018, "Unrecognized atomic mutation type" )
|
||||
ERROR( attribute_too_large, 2019, "Attribute too large for type int" )
|
||||
ERROR( transaction_invalid_version, 2020, "Transaction does not have a valid commit version" )
|
||||
ERROR( transaction_read_only, 2021, "Transaction is read-only and therefore does not have a commit version" )
|
||||
ERROR( environment_variable_network_option_failed, 2022, "Environment variable network option could not be set" )
|
||||
|
||||
ERROR( incompatible_protocol_version, 2100, "Incompatible protocol version" )
|
||||
ERROR( transaction_too_large, 2101, "Transaction too large" )
|
||||
ERROR( key_too_large, 2102, "Key too large" )
|
||||
ERROR( value_too_large, 2103, "Value too large" )
|
||||
ERROR( transaction_too_large, 2101, "Transaction exceeds byte limit" )
|
||||
ERROR( key_too_large, 2102, "Key length exceeds limit" )
|
||||
ERROR( value_too_large, 2103, "Value length exceeds limit" )
|
||||
ERROR( connection_string_invalid, 2104, "Connection string invalid" )
|
||||
ERROR( address_in_use, 2105, "Local address in use" )
|
||||
ERROR( invalid_local_address, 2106, "Invalid local address" )
|
||||
|
@ -134,30 +134,30 @@ ERROR( tls_error, 2107, "TLS error" )
|
|||
ERROR( unsupported_operation, 2108, "Unsupported operation" )
|
||||
|
||||
// 2200 - errors from bindings and official APIs
|
||||
ERROR( api_version_unset, 2200, "API version must be set" )
|
||||
ERROR( api_version_unset, 2200, "API version is not set" )
|
||||
ERROR( api_version_already_set, 2201, "API version may be set only once" )
|
||||
ERROR( api_version_invalid, 2202, "API version not valid" )
|
||||
ERROR( api_version_not_supported, 2203, "API version not supported in this version or binding" )
|
||||
ERROR( api_version_not_supported, 2203, "API version not supported" )
|
||||
ERROR( exact_mode_without_limits, 2210, "EXACT streaming mode requires limits, but none were given" )
|
||||
|
||||
ERROR( invalid_tuple_data_type, 2250, "Invalid data type in packed tuple")
|
||||
ERROR( invalid_tuple_data_type, 2250, "Unrecognized data type in packed tuple")
|
||||
ERROR( invalid_tuple_index, 2251, "Tuple does not have element at specified index")
|
||||
ERROR( key_not_in_subspace, 2252, "Cannot unpack key that is not in subspace" )
|
||||
ERROR( manual_prefixes_not_enabled, 2253, "Cannot specify a prefix unless manual prefixes are enabled" )
|
||||
ERROR( prefix_in_partition, 2254, "Cannot specify a prefix in a partition" )
|
||||
ERROR( cannot_open_root_directory, 2255, "The root directory cannot be opened" )
|
||||
ERROR( directory_already_exists, 2256, "The given directory already exists" )
|
||||
ERROR( directory_does_not_exist, 2257, "The given directory does not exist" )
|
||||
ERROR( parent_directory_does_not_exist, 2258, "The given directory's parent does not exist" )
|
||||
ERROR( mismatched_layer, 2259, "The directory has already been created with a different byte string" )
|
||||
ERROR( invalid_database_value, 2260, "Could not parse value in database" )
|
||||
ERROR( cannot_open_root_directory, 2255, "Root directory cannot be opened" )
|
||||
ERROR( directory_already_exists, 2256, "Directory already exists" )
|
||||
ERROR( directory_does_not_exist, 2257, "Directory does not exist" )
|
||||
ERROR( parent_directory_does_not_exist, 2258, "Directory's parent does not exist" )
|
||||
ERROR( mismatched_layer, 2259, "Directory has already been created with a different layer string" )
|
||||
ERROR( invalid_directory_layer_metadata, 2260, "Invalid directory layer metadata" )
|
||||
ERROR( cannot_move_directory_between_partitions, 2261, "Directory cannot be moved between partitions" )
|
||||
ERROR( cannot_use_partition_as_subspace, 2262, "Directory partition cannot be used as subspace" )
|
||||
ERROR( incompatible_directory_version, 2263, "Directory layer was created with an incompatible version" )
|
||||
ERROR( directory_prefix_not_empty, 2264, "The database has keys stored at the prefix chosen by the automatic prefix allocator" )
|
||||
ERROR( directory_prefix_in_use, 2265, "The directory layer already has a conflicting prefix" )
|
||||
ERROR( invalid_destination_directory, 2266, "The target directory is invalid" )
|
||||
ERROR( cannot_modify_root_directory, 2267, "The root directory cannot be modified" )
|
||||
ERROR( directory_prefix_not_empty, 2264, "Database has keys stored at the prefix chosen by the automatic prefix allocator" )
|
||||
ERROR( directory_prefix_in_use, 2265, "Directory layer already has a conflicting prefix" )
|
||||
ERROR( invalid_destination_directory, 2266, "Target directory is invalid" )
|
||||
ERROR( cannot_modify_root_directory, 2267, "Root directory cannot be modified" )
|
||||
ERROR( invalid_uuid_size, 2268, "UUID is not sixteen bytes");
|
||||
|
||||
// 2300 - backup and restore errors
|
||||
|
@ -167,12 +167,12 @@ ERROR( backup_duplicate, 2311, "Backup duplicate request")
|
|||
ERROR( backup_unneeded, 2312, "Backup unneeded request")
|
||||
ERROR( backup_bad_block_size, 2313, "Backup file block size too small")
|
||||
ERROR( restore_invalid_version, 2361, "Invalid restore version")
|
||||
ERROR( restore_corrupted_data, 2362, "Corrupted restore data")
|
||||
ERROR( restore_missing_data, 2363, "Missing restore data")
|
||||
ERROR( restore_corrupted_data, 2362, "Corrupted backup data")
|
||||
ERROR( restore_missing_data, 2363, "Missing backup data")
|
||||
ERROR( restore_duplicate_tag, 2364, "Restore duplicate request")
|
||||
ERROR( restore_unknown_tag, 2365, "Restore tag does not exist")
|
||||
ERROR( restore_unknown_file_type, 2366, "Unknown file type")
|
||||
ERROR( restore_unsupported_file_version, 2367, "Unsupported file version")
|
||||
ERROR( restore_unknown_file_type, 2366, "Unknown backup file type")
|
||||
ERROR( restore_unsupported_file_version, 2367, "Unsupported backup file version")
|
||||
ERROR( restore_bad_read, 2368, "Unexpected number of bytes read")
|
||||
ERROR( restore_corrupted_data_padding, 2369, "Backup file has unexpected padding bytes")
|
||||
ERROR( restore_destination_not_empty, 2370, "Attempted to restore into a non-empty destination database")
|
||||
|
|
Loading…
Reference in New Issue