Merge branch 'release-5.2' of github.com:apple/foundationdb into release-5.2
This commit is contained in:
commit
e101f9587e
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-5.2.5.pkg <https://www.foundationdb.org/downloads/5.2.5/macOS/installers/FoundationDB-5.2.5.pkg>`_
|
||||
* `FoundationDB-5.2.6.pkg <https://www.foundationdb.org/downloads/5.2.6/macOS/installers/FoundationDB-5.2.6.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-5.2.5-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.5/ubuntu/installers/foundationdb-clients_5.2.5-1_amd64.deb>`_
|
||||
* `foundationdb-server-5.2.5-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.5/ubuntu/installers/foundationdb-server_5.2.5-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-5.2.6-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.6/ubuntu/installers/foundationdb-clients_5.2.6-1_amd64.deb>`_
|
||||
* `foundationdb-server-5.2.6-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.6/ubuntu/installers/foundationdb-server_5.2.6-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-5.2.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel6/installers/foundationdb-clients-5.2.5-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.2.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel6/installers/foundationdb-server-5.2.5-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-5.2.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.6/rhel6/installers/foundationdb-clients-5.2.6-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.2.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.6/rhel6/installers/foundationdb-server-5.2.6-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-5.2.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel7/installers/foundationdb-clients-5.2.5-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.2.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel7/installers/foundationdb-server-5.2.5-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-5.2.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.6/rhel7/installers/foundationdb-clients-5.2.6-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.2.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.6/rhel7/installers/foundationdb-server-5.2.6-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-5.2.5-x64.msi <https://www.foundationdb.org/downloads/5.2.5/windows/installers/foundationdb-5.2.5-x64.msi>`_
|
||||
* `foundationdb-5.2.6-x64.msi <https://www.foundationdb.org/downloads/5.2.6/windows/installers/foundationdb-5.2.6-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-5.2.5.tar.gz <https://www.foundationdb.org/downloads/5.2.5/bindings/python/foundationdb-5.2.5.tar.gz>`_
|
||||
* `foundationdb-5.2.6.tar.gz <https://www.foundationdb.org/downloads/5.2.6/bindings/python/foundationdb-5.2.6.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-5.2.5.gem <https://www.foundationdb.org/downloads/5.2.5/bindings/ruby/fdb-5.2.5.gem>`_
|
||||
* `fdb-5.2.6.gem <https://www.foundationdb.org/downloads/5.2.6/bindings/ruby/fdb-5.2.6.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-5.2.5.jar <https://www.foundationdb.org/downloads/5.2.5/bindings/java/fdb-java-5.2.5.jar>`_
|
||||
* `fdb-java-5.2.5-javadoc.jar <https://www.foundationdb.org/downloads/5.2.5/bindings/java/fdb-java-5.2.5-javadoc.jar>`_
|
||||
* `fdb-java-5.2.6.jar <https://www.foundationdb.org/downloads/5.2.6/bindings/java/fdb-java-5.2.6.jar>`_
|
||||
* `fdb-java-5.2.6-javadoc.jar <https://www.foundationdb.org/downloads/5.2.6/bindings/java/fdb-java-5.2.6-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
|
|
@ -2,6 +2,21 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
5.2.6
|
||||
=====
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Improved backup error specificity regarding timeouts and active connection failures. `(PR #547) <https://github.com/apple/foundationdb/pull/581>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* A memory leak was fixed in connection closing. `(PR #574) <https://github.com/apple/foundationdb/pull/574>`_
|
||||
* A memory leak was fixed in the coordinator's handling of disconnected clients. `(PR #579) <https://github.com/apple/foundationdb/pull/579>`_
|
||||
* Aligned memory allocation on MacOS was sometimes failing to allocate memory, causing a crash. `(PR #547) <https://github.com/apple/foundationdb/pull/547>`_
|
||||
|
||||
5.2.5
|
||||
=====
|
||||
|
||||
|
|
|
@ -460,6 +460,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
loop {
|
||||
state Optional<Error> err;
|
||||
state Optional<NetworkAddress> remoteAddress;
|
||||
state bool connectionEstablished = false;
|
||||
|
||||
try {
|
||||
// Start connecting
|
||||
|
@ -481,6 +482,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
|
||||
// Finish connecting, do request
|
||||
state BlobStoreEndpoint::ReusableConnection rconn = wait(timeoutError(frconn, bstore->knobs.connect_timeout));
|
||||
connectionEstablished = true;
|
||||
|
||||
// Finish/update the request headers (which includes Date header)
|
||||
// This must be done AFTER the connection is ready because if credentials are coming from disk they are refreshed
|
||||
|
@ -519,6 +521,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
retryable = retryable && (thisTry < maxTries);
|
||||
|
||||
TraceEvent event(SevWarn, retryable ? "BlobStoreEndpointRequestFailedRetryable" : "BlobStoreEndpointRequestFailed");
|
||||
event.detail("ConnectionEstablished", connectionEstablished);
|
||||
|
||||
if(remoteAddress.present())
|
||||
event.detail("RemoteEndpoint", remoteAddress.get());
|
||||
|
@ -575,6 +578,21 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
if(r && r->code == 401)
|
||||
throw http_auth_failed();
|
||||
|
||||
// Recognize and throw specific errors
|
||||
if(err.present()) {
|
||||
int code = err.get().code();
|
||||
|
||||
// If we get a timed_out error during the the connect() phase, we'll call that connection_failed despite the fact that
|
||||
// there was technically never a 'connection' to begin with. It differentiates between an active connection
|
||||
// timing out vs a connection timing out, though not between an active connection failing vs connection attempt failing.
|
||||
// TODO: Add more error types?
|
||||
if(code == error_code_timed_out && !connectionEstablished)
|
||||
throw connection_failed();
|
||||
|
||||
if(code == error_code_timed_out || code == error_code_connection_failed || code == error_code_lookup_failed)
|
||||
throw err.get();
|
||||
}
|
||||
|
||||
throw http_request_failed();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ public:
|
|||
countConnClosedWithoutError.init(LiteralStringRef("Net2.CountConnClosedWithoutError"));
|
||||
}
|
||||
|
||||
struct Peer* getPeer( NetworkAddress const& address, bool doConnect = true );
|
||||
struct Peer* getPeer( NetworkAddress const& address, bool openConnection = true );
|
||||
|
||||
NetworkAddress localAddress;
|
||||
std::map<NetworkAddress, struct Peer*> peers;
|
||||
|
@ -212,11 +212,9 @@ static_assert( sizeof(ConnectPacket) == CONNECT_PACKET_V2_SIZE, "ConnectPacket p
|
|||
|
||||
static Future<Void> connectionReader( TransportData* const& transport, Reference<IConnection> const& conn, Peer* const& peer, Promise<Peer*> const& onConnected );
|
||||
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable );
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable, bool openConnection );
|
||||
|
||||
struct Peer : NonCopyable {
|
||||
// FIXME: Peers don't die!
|
||||
|
||||
TransportData* transport;
|
||||
NetworkAddress destination;
|
||||
UnsentPacketQueue unsent;
|
||||
|
@ -229,12 +227,10 @@ struct Peer : NonCopyable {
|
|||
double lastConnectTime;
|
||||
double reconnectionDelay;
|
||||
|
||||
explicit Peer( TransportData* transport, NetworkAddress const& destination, bool doConnect = true )
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(!doConnect), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true)
|
||||
explicit Peer( TransportData* transport, NetworkAddress const& destination )
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true)
|
||||
{
|
||||
if(doConnect) {
|
||||
connect = connectionKeeper(this);
|
||||
}
|
||||
connect = connectionKeeper(this);
|
||||
}
|
||||
|
||||
void send(PacketBuffer* pb, ReliablePacket* rp, bool firstUnsent) {
|
||||
|
@ -424,6 +420,13 @@ struct Peer : NonCopyable {
|
|||
IFailureMonitor::failureMonitor().notifyDisconnect( self->destination ); //< Clients might send more packets in response, which needs to go out on the next connection
|
||||
if (e.code() == error_code_actor_cancelled) throw;
|
||||
// Try to recover, even from serious errors, by retrying
|
||||
|
||||
if(self->reliable.empty() && self->unsent.empty()) {
|
||||
self->connect.cancel();
|
||||
self->transport->peers.erase(self->destination);
|
||||
delete self;
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -454,7 +457,7 @@ ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReade
|
|||
sendPacket( self,
|
||||
SerializeSource<Endpoint>( Endpoint( self->localAddress, destination.token ) ),
|
||||
Endpoint( destination.address, WLTOKEN_ENDPOINT_NOT_FOUND),
|
||||
false );
|
||||
false, true );
|
||||
}
|
||||
|
||||
if( inReadSocket )
|
||||
|
@ -736,10 +739,17 @@ ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr
|
|||
}
|
||||
}
|
||||
|
||||
Peer* TransportData::getPeer( NetworkAddress const& address, bool doConnect ) {
|
||||
auto& peer = peers[address];
|
||||
if (!peer) peer = new Peer(this, address, doConnect);
|
||||
return peer;
|
||||
Peer* TransportData::getPeer( NetworkAddress const& address, bool openConnection ) {
|
||||
auto peer = peers.find(address);
|
||||
if (peer != peers.end()) {
|
||||
return peer->second;
|
||||
}
|
||||
if(!openConnection) {
|
||||
return NULL;
|
||||
}
|
||||
Peer* newPeer = new Peer(this, address);
|
||||
peers[address] = newPeer;
|
||||
return newPeer;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> multiVersionCleanupWorker( TransportData* self ) {
|
||||
|
@ -823,7 +833,7 @@ void FlowTransport::addWellKnownEndpoint( Endpoint& endpoint, NetworkMessageRece
|
|||
ASSERT( endpoint.token == otoken );
|
||||
}
|
||||
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable ) {
|
||||
static PacketID sendPacket( TransportData* self, ISerializeSource const& what, const Endpoint& destination, bool reliable, bool openConnection ) {
|
||||
if (destination.address == self->localAddress) {
|
||||
TEST(true); // "Loopback" delivery
|
||||
// SOMEDAY: Would it be better to avoid (de)serialization by doing this check in flow?
|
||||
|
@ -846,10 +856,10 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
|
|||
|
||||
++self->countPacketsGenerated;
|
||||
|
||||
Peer* peer = self->getPeer(destination.address);
|
||||
Peer* peer = self->getPeer(destination.address, openConnection);
|
||||
|
||||
// If there isn't an open connection, a public address, or the peer isn't compatible, we can't send
|
||||
if ((peer->outgoingConnectionIdle && !destination.address.isPublic()) || (!peer->compatible && destination.token != WLTOKEN_PING_PACKET)) {
|
||||
if (!peer || (peer->outgoingConnectionIdle && !destination.address.isPublic()) || (!peer->compatible && destination.token != WLTOKEN_PING_PACKET)) {
|
||||
TEST(true); // Can't send to private address without a compatible open connection
|
||||
return (PacketID)NULL;
|
||||
}
|
||||
|
@ -937,7 +947,7 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
|
|||
}
|
||||
|
||||
PacketID FlowTransport::sendReliable( ISerializeSource const& what, const Endpoint& destination ) {
|
||||
return sendPacket( self, what, destination, true );
|
||||
return sendPacket( self, what, destination, true, true );
|
||||
}
|
||||
|
||||
void FlowTransport::cancelReliable( PacketID pid ) {
|
||||
|
@ -946,8 +956,8 @@ void FlowTransport::cancelReliable( PacketID pid ) {
|
|||
// SOMEDAY: Call reliable.compact() if a lot of memory is wasted in PacketBuffers by formerly reliable packets mixed with a few reliable ones. Don't forget to delref the new PacketBuffers since they are unsent.
|
||||
}
|
||||
|
||||
void FlowTransport::sendUnreliable( ISerializeSource const& what, const Endpoint& destination ) {
|
||||
sendPacket( self, what, destination, false );
|
||||
void FlowTransport::sendUnreliable( ISerializeSource const& what, const Endpoint& destination, bool openConnection ) {
|
||||
sendPacket( self, what, destination, false, openConnection );
|
||||
}
|
||||
|
||||
int FlowTransport::getEndpointCount() {
|
||||
|
|
|
@ -102,7 +102,7 @@ public:
|
|||
// Makes PacketID "unreliable" (either the data or a connection close event will be delivered
|
||||
// eventually). It can still be used safely to send a reply to a "reliable" request.
|
||||
|
||||
void sendUnreliable( ISerializeSource const& what, const Endpoint& destination );// { cancelReliable(sendReliable(what,destination)); }
|
||||
void sendUnreliable( ISerializeSource const& what, const Endpoint& destination, bool openConnection = true );// { cancelReliable(sendReliable(what,destination)); }
|
||||
|
||||
int getEndpointCount();
|
||||
// for tracing only
|
||||
|
|
|
@ -97,11 +97,11 @@ ACTOR template <class T>
|
|||
void networkSender( Future<T> input, Endpoint endpoint ) {
|
||||
try {
|
||||
T value = wait( input );
|
||||
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<T>(true, value), endpoint );
|
||||
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<T>(true, value), endpoint, false );
|
||||
} catch (Error& err) {
|
||||
//if (err.code() == error_code_broken_promise) return;
|
||||
ASSERT( err.code() != error_code_actor_cancelled );
|
||||
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<Error>(false, err), endpoint );
|
||||
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<Error>(false, err), endpoint, false );
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -368,29 +368,22 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
ACTOR Future<bool> getKeyLocations(Database cx, vector<pair<KeyRange, vector<StorageServerInterface>>> shards, ConsistencyCheckWorkload *self, Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise)
|
||||
{
|
||||
state Standalone<VectorRef<KeyValueRef>> keyLocations;
|
||||
state Key beginKey = allKeys.begin;
|
||||
state Key beginKey = allKeys.begin.withPrefix(keyServersPrefix);
|
||||
state Key endKey = allKeys.end.withPrefix(keyServersPrefix);
|
||||
state int i = 0;
|
||||
|
||||
//If the responses are too big, we may use multiple requests to get the key locations. Each request begins where the last left off
|
||||
for ( ; i < shards.size(); i++)
|
||||
{
|
||||
// skip serverList shards
|
||||
if (!shards[i].first.begin.startsWith(keyServersPrefix)) {
|
||||
break;
|
||||
}
|
||||
|
||||
state Key endKey = shards[i].first.end.startsWith(keyServersPrefix) ? shards[i].first.end.removePrefix(keyServersPrefix) : allKeys.end;
|
||||
|
||||
while(beginKey < endKey)
|
||||
while(beginKey < std::min<KeyRef>(shards[i].first.end, endKey))
|
||||
{
|
||||
try
|
||||
{
|
||||
Version version = wait(self->getVersion(cx, self));
|
||||
|
||||
GetKeyValuesRequest req;
|
||||
Key prefixBegin = beginKey.withPrefix(keyServersPrefix);
|
||||
req.begin = firstGreaterOrEqual(prefixBegin);
|
||||
req.end = firstGreaterOrEqual(keyServersEnd);
|
||||
req.begin = firstGreaterOrEqual(beginKey);
|
||||
req.end = firstGreaterOrEqual(std::min<KeyRef>(shards[i].first.end, endKey));
|
||||
req.limit = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT;
|
||||
req.limitBytes = SERVER_KNOBS->MOVE_KEYS_KRM_LIMIT_BYTES;
|
||||
req.version = version;
|
||||
|
@ -442,17 +435,26 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
}
|
||||
|
||||
auto keyValueResponse = keyValueFutures[firstValidStorageServer].get().get();
|
||||
Standalone<RangeResultRef> currentLocations = krmDecodeRanges( keyServersPrefix, KeyRangeRef(beginKey, endKey), RangeResultRef( keyValueResponse.data, keyValueResponse.more) );
|
||||
Standalone<RangeResultRef> currentLocations = krmDecodeRanges( keyServersPrefix, KeyRangeRef(beginKey.removePrefix(keyServersPrefix), std::min<KeyRef>(shards[i].first.end, endKey).removePrefix(keyServersPrefix)), RangeResultRef( keyValueResponse.data, keyValueResponse.more) );
|
||||
|
||||
//Push all but the last item, which will be pushed as the first item next iteration
|
||||
keyLocations.append_deep(keyLocations.arena(), currentLocations.begin(), currentLocations.size() - 1);
|
||||
if(keyValueResponse.data.size() && beginKey == keyValueResponse.data[0].key) {
|
||||
keyLocations.push_back_deep(keyLocations.arena(), currentLocations[0]);
|
||||
}
|
||||
|
||||
if(currentLocations.size() > 2) {
|
||||
keyLocations.append_deep(keyLocations.arena(), ¤tLocations[1], currentLocations.size() - 2);
|
||||
}
|
||||
|
||||
//Next iteration should pick up where we left off
|
||||
ASSERT(currentLocations.size() > 1);
|
||||
beginKey = currentLocations.end()[-1].key;
|
||||
if(!keyValueResponse.more) {
|
||||
beginKey = shards[i].first.end;
|
||||
} else {
|
||||
beginKey = keyValueResponse.data.end()[-1].key;
|
||||
}
|
||||
|
||||
//If this is the last iteration, then push the allKeys.end KV pair
|
||||
if(beginKey == allKeys.end)
|
||||
if(beginKey >= endKey)
|
||||
keyLocations.push_back_deep(keyLocations.arena(), currentLocations.end()[-1]);
|
||||
}
|
||||
catch(Error &e)
|
||||
|
@ -970,7 +972,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
//Min and max shard sizes have a 3 * shardBounds.permittedError.bytes cushion for error since shard sizes are not precise
|
||||
//Shard splits ignore the first key in a shard, so its size shouldn't be considered when checking the upper bound
|
||||
//0xff shards are not checked
|
||||
if( canSplit && self->performQuiescentChecks && !range.begin.startsWith(keyServersPrefix) &&
|
||||
if( canSplit && sampledKeys > 5 && self->performQuiescentChecks && !range.begin.startsWith(keyServersPrefix) &&
|
||||
(sampledBytes < shardBounds.min.bytes - 3 * shardBounds.permittedError.bytes || sampledBytes - firstKeySampledBytes > shardBounds.max.bytes + 3 * shardBounds.permittedError.bytes))
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_InvalidShardSize").detail("Min", shardBounds.min.bytes).detail("Max", shardBounds.max.bytes).detail("Size", shardBytes)
|
||||
|
|
|
@ -98,7 +98,7 @@ ERROR( http_not_accepted, 1519, "HTTP request not accepted" )
|
|||
ERROR( checksum_failed, 1520, "A data checksum failed" )
|
||||
ERROR( io_timeout, 1521, "A disk IO operation failed to complete in a timely manner" )
|
||||
ERROR( file_corrupt, 1522, "A structurally corrupt data file was detected" )
|
||||
ERROR( http_request_failed, 1523, "HTTP response code indicated failure" )
|
||||
ERROR( http_request_failed, 1523, "HTTP response code not received or indicated failure" )
|
||||
ERROR( http_auth_failed, 1524, "HTTP request failed due to bad credentials" )
|
||||
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
|
||||
<Product Name='$(var.Title)'
|
||||
Id='{E60C53B7-DA5E-49BA-9F5F-8FC668D1DD4C}'
|
||||
Id='{17E755FF-984D-4794-9FB7-F5EF61EFA6B6}'
|
||||
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
|
||||
Version='$(var.Version)'
|
||||
Manufacturer='$(var.Manufacturer)'
|
||||
|
|
Loading…
Reference in New Issue