From 7ac098dc0d79a17f74e01710519987586698dafe Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 25 Feb 2019 18:39:14 -0500 Subject: [PATCH 01/71] Add Versionstamp support to the Go Tuple layer --- bindings/bindingtester/known_testers.py | 2 +- bindings/go/src/_stacktester/stacktester.go | 26 ++++- bindings/go/src/fdb/tuple/tuple.go | 117 +++++++++++++++++++- 3 files changed, 139 insertions(+), 6 deletions(-) diff --git a/bindings/bindingtester/known_testers.py b/bindings/bindingtester/known_testers.py index 8abe8c5741..fee09f5adf 100644 --- a/bindings/bindingtester/known_testers.py +++ b/bindings/bindingtester/known_testers.py @@ -62,6 +62,6 @@ testers = { 'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION), 'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES), 'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES), - 'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION), + 'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION, types=ALL_TYPES), 'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False), } diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index f76641629e..151f1e766c 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -25,8 +25,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "github.com/apple/foundationdb/bindings/go/src/fdb" - "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" "log" "math/big" "os" @@ -37,6 +35,9 @@ import ( "strings" "sync" "time" + + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" ) const verbose bool = false @@ -661,6 +662,25 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) { t = append(t, sm.waitAndPop().item) } sm.store(idx, []byte(t.Pack())) + case op == "TUPLE_PACK_VERSIONSTAMP": + var t tuple.Tuple + count := sm.waitAndPop().item.(int64) + for i := 0; i < int(count); i++ { + t = append(t, sm.waitAndPop().item) + } + + incomplete, err := t.HasIncompleteVersionstamp() + if incomplete == false { + sm.store(idx, []byte("ERROR: NONE")) + } else { + if err != nil { + sm.store(idx, []byte("ERROR: MULTIPLE")) + } else { + packed := t.Pack() + sm.store(idx, "OK") + sm.store(idx, packed) + } + } case op == "TUPLE_UNPACK": t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte))) if e != nil { @@ -893,7 +913,7 @@ func main() { log.Fatal("API version not equal to value selected") } - db, e = fdb.OpenDatabase(clusterFile) + db, e = fdb.Open(clusterFile, []byte("DB")) if e != nil { log.Fatal(e) } diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index afd959420f..0e0cc732bd 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -39,6 +39,7 @@ package tuple import ( "bytes" "encoding/binary" + "errors" "fmt" "math" "math/big" @@ -72,6 +73,37 @@ type Tuple []TupleElement // an instance of this type. type UUID [16]byte +// Versionstamp . +type Versionstamp struct { + TransactionVersion [10]byte + UserVersion uint16 +} + +var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + +const versionstampLength = 13 + +// IncompleteVersionstamp . +func IncompleteVersionstamp(userVersion uint16) Versionstamp { + return Versionstamp{ + TransactionVersion: incompleteTransactionVersion, + UserVersion: userVersion, + } +} + +// Bytes . +func (v Versionstamp) Bytes() []byte { + var scratch [12]byte + + copy(scratch[:], v.TransactionVersion[:]) + + binary.BigEndian.PutUint16(scratch[10:], v.UserVersion) + + fmt.Println(scratch) + + return scratch[:] +} + // Type codes: These prefix the different elements in a packed Tuple // to indicate what type they are. const nilCode = 0x00 @@ -86,6 +118,7 @@ const doubleCode = 0x21 const falseCode = 0x26 const trueCode = 0x27 const uuidCode = 0x30 +const versionstampCode = 0x33 var sizeLimits = []uint64{ 1<<(0*8) - 1, @@ -122,7 +155,15 @@ func adjustFloatBytes(b []byte, encode bool) { } type packer struct { - buf []byte + versionstampPos int32 + buf []byte +} + +func newPacker() *packer { + return &packer{ + versionstampPos: -1, + buf: make([]byte, 0, 64), + } } func (p *packer) putByte(b byte) { @@ -249,6 +290,18 @@ func (p *packer) encodeUUID(u UUID) { p.putBytes(u[:]) } +func (p *packer) encodeVersionstamp(v Versionstamp) { + p.putByte(versionstampCode) + + if p.versionstampPos != 0 && v.TransactionVersion == incompleteTransactionVersion { + panic(fmt.Sprintf("Tuple can only contain one unbound versionstamp")) + } else { + p.versionstampPos = int32(len(p.buf)) + } + + p.putBytes(v.Bytes()) +} + func (p *packer) encodeTuple(t Tuple, nested bool) { if nested { p.putByte(nestedCode) @@ -293,6 +346,8 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { } case UUID: p.encodeUUID(e) + case Versionstamp: + p.encodeVersionstamp(e) default: panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i])) } @@ -314,11 +369,50 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { // call Pack when using a Tuple with a FoundationDB API function that requires a // key. func (t Tuple) Pack() []byte { - p := packer{buf: make([]byte, 0, 64)} + p := newPacker() p.encodeTuple(t, false) return p.buf } +// PackWithVersionstamp packs the specified tuple into a key for versionstamp operations +func (t Tuple) PackWithVersionstamp() ([]byte, error) { + hasVersionstamp, err := t.HasIncompleteVersionstamp() + if err != nil { + return nil, err + } + + p := newPacker() + p.encodeTuple(t, false) + + if hasVersionstamp { + var scratch [4]byte + binary.LittleEndian.PutUint32(scratch[:], uint32(p.versionstampPos)) + p.putBytes(scratch[:]) + } + + return p.buf, nil +} + +// HasIncompleteVersionstamp determines if there is at least one incomplete versionstamp in a tuple +func (t Tuple) HasIncompleteVersionstamp() (bool, error) { + incompleteCount := 0 + for _, el := range t { + switch e := el.(type) { + case Versionstamp: + if e.TransactionVersion == incompleteTransactionVersion { + incompleteCount++ + } + } + } + + var err error + if incompleteCount > 1 { + err = errors.New("Tuple can only contain one unbound versionstamp") + } + + return incompleteCount == 1, err +} + func findTerminator(b []byte) int { bp := b var length int @@ -438,6 +532,20 @@ func decodeUUID(b []byte) (UUID, int) { return u, 17 } +func decodeVersionstamp(b []byte) (Versionstamp, int) { + var transactionVersion [10]byte + var userVersion uint16 + + copy(transactionVersion[:], b[1:11]) + + userVersion = binary.BigEndian.Uint16(b[11:]) + + return Versionstamp{ + TransactionVersion: transactionVersion, + UserVersion: userVersion, + }, versionstampLength +} + func decodeTuple(b []byte, nested bool) (Tuple, int, error) { var t Tuple @@ -489,6 +597,11 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) { return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i) } el, off = decodeUUID(b[i:]) + case b[i] == versionstampCode: + if i+versionstampLength > len(b) { + return nil, i, fmt.Errorf("insufficient bytes to decode Versionstamp starting at position %d of byte array for tuple", i) + } + el, off = decodeVersionstamp(b[i:]) case b[i] == nestedCode: var err error el, off, err = decodeTuple(b[i+1:], true) From b2f26224b9a46a6f96a3bcccd6a7bbdee66ac198 Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 25 Feb 2019 18:41:57 -0500 Subject: [PATCH 02/71] Revert unintentional change back to old API --- bindings/go/src/_stacktester/stacktester.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index 151f1e766c..6a1100ffe7 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -913,7 +913,7 @@ func main() { log.Fatal("API version not equal to value selected") } - db, e = fdb.Open(clusterFile, []byte("DB")) + db, e = fdb.OpenDatabase(clusterFile) if e != nil { log.Fatal(e) } From 292bb6ab0f564a4649f20b2e4d60ab5e918758ea Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 25 Feb 2019 18:57:28 -0500 Subject: [PATCH 03/71] Make `versionstampLength` constant equal Versionstamp actual length. --- bindings/go/src/fdb/tuple/tuple.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index 0e0cc732bd..2c30705ba0 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -81,7 +81,7 @@ type Versionstamp struct { var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} -const versionstampLength = 13 +const versionstampLength = 12 // IncompleteVersionstamp . func IncompleteVersionstamp(userVersion uint16) Versionstamp { @@ -543,7 +543,7 @@ func decodeVersionstamp(b []byte) (Versionstamp, int) { return Versionstamp{ TransactionVersion: transactionVersion, UserVersion: userVersion, - }, versionstampLength + }, versionstampLength + 1 } func decodeTuple(b []byte, nested bool) (Tuple, int, error) { @@ -598,7 +598,7 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) { } el, off = decodeUUID(b[i:]) case b[i] == versionstampCode: - if i+versionstampLength > len(b) { + if i+versionstampLength+1 > len(b) { return nil, i, fmt.Errorf("insufficient bytes to decode Versionstamp starting at position %d of byte array for tuple", i) } el, off = decodeVersionstamp(b[i:]) From 4dd04862c7037a5ba246eab3ea0aa4a40f644942 Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 25 Feb 2019 19:05:45 -0500 Subject: [PATCH 04/71] Flatten if statements --- bindings/go/src/_stacktester/stacktester.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index 6a1100ffe7..8ef2c3d78f 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -672,14 +672,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) { incomplete, err := t.HasIncompleteVersionstamp() if incomplete == false { sm.store(idx, []byte("ERROR: NONE")) + } else if err != nil { + sm.store(idx, []byte("ERROR: MULTIPLE")) } else { - if err != nil { - sm.store(idx, []byte("ERROR: MULTIPLE")) - } else { - packed := t.Pack() - sm.store(idx, "OK") - sm.store(idx, packed) - } + packed := t.Pack() + sm.store(idx, "OK") + sm.store(idx, packed) } case op == "TUPLE_UNPACK": t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte))) From 05d347e194d0de66f635b61b72477ead4b75c7da Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 25 Feb 2019 19:08:29 -0500 Subject: [PATCH 05/71] Push byte slice instead of string onto the stack --- bindings/go/src/_stacktester/stacktester.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index 8ef2c3d78f..b2012546e7 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -676,7 +676,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) { sm.store(idx, []byte("ERROR: MULTIPLE")) } else { packed := t.Pack() - sm.store(idx, "OK") + sm.store(idx, []byte("OK")) sm.store(idx, packed) } case op == "TUPLE_UNPACK": From 7ef189701e3ec2496dd080f57fec124ae469ceaa Mon Sep 17 00:00:00 2001 From: Alec Grieser Date: Thu, 28 Feb 2019 14:33:59 -0800 Subject: [PATCH 06/71] Resolves #719: Support `.setReadVersion()` on `ReadTransaction` --- .../apple/foundationdb/FDBTransaction.java | 40 ++++++++++++ .../apple/foundationdb/ReadTransaction.java | 61 ++++++++++++++++++- .../com/apple/foundationdb/Transaction.java | 27 +------- documentation/sphinx/source/release-notes.rst | 1 + 4 files changed, 102 insertions(+), 27 deletions(-) diff --git a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java index 49354b993d..b0e904f725 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java @@ -40,11 +40,26 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC public final ReadTransaction snapshot; class ReadSnapshot implements ReadTransaction { + @Override + public boolean isSnapshot() { + return true; + } + + @Override + public ReadTransaction snapshot() { + return this; + } + @Override public CompletableFuture getReadVersion() { return FDBTransaction.this.getReadVersion(); } + @Override + public void setReadVersion(long version) { + FDBTransaction.this.setReadVersion(version); + } + @Override public CompletableFuture get(byte[] key) { return get_internal(key, true); @@ -126,6 +141,16 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC return getRange(range, ReadTransaction.ROW_LIMIT_UNLIMITED); } + @Override + public void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { + // Do nothing + } + + @Override + public void addReadConflictKeyIfNotSnapshot(byte[] key) { + // Do nothing + } + @Override public TransactionOptions options() { return FDBTransaction.this.options(); @@ -157,6 +182,11 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC transactionOwner = true; } + @Override + public boolean isSnapshot() { + return false; + } + @Override public ReadTransaction snapshot() { return snapshot; @@ -321,11 +351,21 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC } } + @Override + public void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { + addReadConflictRange(keyBegin, keyEnd); + } + @Override public void addReadConflictRange(byte[] keyBegin, byte[] keyEnd) { addConflictRange(keyBegin, keyEnd, ConflictRangeType.READ); } + @Override + public void addReadConflictKeyIfNotSnapshot(byte[] key) { + addReadConflictKey(key); + } + @Override public void addReadConflictKey(byte[] key) { addConflictRange(key, ByteArrayUtil.join(key, new byte[]{(byte) 0}), ConflictRangeType.READ); diff --git a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java index 5130ce01ff..b7af660e9a 100644 --- a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java @@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Tuple; *
* Note: Client must call {@link Transaction#commit()} and wait on the result on all transactions, * even ones that only read. This is done automatically when using the retry loops from - * {@link Database#run(Function)}. This is explained more in the intro to {@link Transaction}. + * {@link Database#run(java.util.function.Function)}. This is explained more in the intro to {@link Transaction}. * * @see Transaction */ @@ -43,12 +43,71 @@ public interface ReadTransaction extends ReadTransactionContext { */ int ROW_LIMIT_UNLIMITED = 0; + /** + * Gets whether this transaction is a snapshot view of the database. In other words, this returns + * whether read conflict ranges are omitted for any reads done through this {@code ReadTransaction}. + *
+ * For more information about how to use snapshot reads correctly, see + * Using snapshot reads. + * + * @return whether this is a snapshot view of the database with relaxed isolation properties + * @see #snapshot() + */ + boolean isSnapshot(); + + /** + * Return a special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads". + * Snapshot reads selectively relax FoundationDB's isolation property, reducing + * Transaction conflicts + * but making reasoning about concurrency harder.
+ *
+ * For more information about how to use snapshot reads correctly, see + * Using snapshot reads. + * + * @return a read-only view of this {@code ReadTransaction} with relaxed isolation properties + */ + ReadTransaction snapshot(); + /** * Gets the version at which the reads for this {@code Transaction} will access the database. * @return the version for database reads */ CompletableFuture getReadVersion(); + /** + * Directly sets the version of the database at which to execute reads. The + * normal operation of a transaction is to determine an appropriately recent + * version; this call overrides that behavior. If the version is set too + * far in the past, {@code past_version} errors will be thrown from read operations. + * Infrequently used. + * + * @param version the version at which to read from the database + */ + void setReadVersion(long version); + + /** + * Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read + * the given key range. If this is a {@linkplain #snapshot() snapshot} view of the database, this will + * not add the conflict range. This mirrors how reading a range through a snapshot view + * of the database does not add a conflict range for the read keys. + * + * @param keyBegin the first key in the range (inclusive) + * @param keyEnd the last key in the range (exclusive) + * @see Transaction#addReadConflictRange(byte[], byte[]) + */ + void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd); + + /** + * Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read + * the given key. If this is a {@linkplain #snapshot() snapshot} view of the database, this will + * not add the conflict range. This mirrors how reading a key through a snapshot view + * of the database does not add a conflict range for the read key. + * + * @param key the key to add to the read conflict range set (it this is not a snapshot view of the database) + * @see Transaction#addReadConflictKey(byte[]) + */ + void addReadConflictKeyIfNotSnapshot(byte[] key); + /** * Gets a value from the database. The call will return {@code null} if the key is not * present in the database. diff --git a/bindings/java/src/main/com/apple/foundationdb/Transaction.java b/bindings/java/src/main/com/apple/foundationdb/Transaction.java index 2b87736d7b..c3a8c6b671 100644 --- a/bindings/java/src/main/com/apple/foundationdb/Transaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/Transaction.java @@ -76,31 +76,6 @@ import com.apple.foundationdb.tuple.Tuple; */ public interface Transaction extends AutoCloseable, ReadTransaction, TransactionContext { - /** - * Return special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads". - * Snapshot reads selectively relax FoundationDB's isolation property, reducing - * Transaction conflicts - * but making reasoning about concurrency harder.
- *
- * For more information about how to use snapshot reads correctly, see - * Using snapshot reads. - * - * @return a read-only view of this {@code Transaction} with relaxed isolation properties - */ - ReadTransaction snapshot(); - - /** - * Directly sets the version of the database at which to execute reads. The - * normal operation of a transaction is to determine an appropriately recent - * version; this call overrides that behavior. If the version is set too - * far in the past, {@code past_version} errors will be thrown from read operations. - * Infrequently used. - * - * @param version the version at which to read from the database - */ - void setReadVersion(long version); - - /** * Adds a range of keys to the transaction's read conflict ranges as if you * had read the range. As a result, other transactions that write a key in @@ -116,7 +91,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction * the key. As a result, other transactions that concurrently write this key * could cause the transaction to fail with a conflict. * - * @param key the key to be added to the range + * @param key the key to be added to the read conflict range set */ void addReadConflictKey(byte[] key); diff --git a/documentation/sphinx/source/release-notes.rst b/documentation/sphinx/source/release-notes.rst index fa83fc2875..a9d11f4d56 100644 --- a/documentation/sphinx/source/release-notes.rst +++ b/documentation/sphinx/source/release-notes.rst @@ -35,6 +35,7 @@ Bindings * Python: Removed ``fdb.init``, ``fdb.create_cluster``, and ``fdb.Cluster``. ``fdb.open`` no longer accepts a ``database_name`` parameter. `(PR #942) `_ * Java: Deprecated ``FDB.createCluster`` and ``Cluster``. The preferred way to get a ``Database`` is by using ``FDB.open``, which should work in both new and old API versions. `(PR #942) `_ * Java: Removed ``Cluster(long cPtr, Executor executor)`` constructor. This is API breaking for any code that has subclassed the ``Cluster`` class and is not protected by API versioning. `(PR #942) `_ +* Java: Several methods relevant to read-only transactions have been moved into the ``ReadTransaction`` interface. * Ruby: Removed ``FDB.init``, ``FDB.create_cluster``, and ``FDB.Cluster``. ``FDB.open`` no longer accepts a ``database_name`` parameter. `(PR #942) `_ * Golang: Deprecated ``fdb.StartNetwork``, ``fdb.Open``, ``fdb.MustOpen``, and ``fdb.CreateCluster`` and added ``fdb.OpenDatabase`` and ``fdb.MustOpenDatabase``. The preferred way to start the network and get a ``Database`` is by using ``FDB.OpenDatabase`` or ``FDB.OpenDefault``. `(PR #942) `_ * Flow: Deprecated ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The preferred way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) `_ From eb8a085cf9cf9588d2bc5ec80a912c0b58ba4cc3 Mon Sep 17 00:00:00 2001 From: Alec Grieser Date: Sat, 2 Mar 2019 09:44:15 -0800 Subject: [PATCH 07/71] conditional add read conflict methods now return whether they added the conflict range ; test added for snapshot transactions --- .../apple/foundationdb/FDBTransaction.java | 16 +- .../apple/foundationdb/ReadTransaction.java | 6 +- .../test/SnapshotTransactionTest.java | 209 ++++++++++++++++++ 3 files changed, 223 insertions(+), 8 deletions(-) create mode 100644 bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java diff --git a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java index b0e904f725..0e507c914d 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/FDBTransaction.java @@ -142,13 +142,15 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC } @Override - public void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { - // Do nothing + public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { + // This is a snapshot transaction; do not add the conflict range. + return false; } @Override - public void addReadConflictKeyIfNotSnapshot(byte[] key) { - // Do nothing + public boolean addReadConflictKeyIfNotSnapshot(byte[] key) { + // This is a snapshot transaction; do not add the conflict key. + return false; } @Override @@ -352,8 +354,9 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC } @Override - public void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { + public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) { addReadConflictRange(keyBegin, keyEnd); + return true; } @Override @@ -362,8 +365,9 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC } @Override - public void addReadConflictKeyIfNotSnapshot(byte[] key) { + public boolean addReadConflictKeyIfNotSnapshot(byte[] key) { addReadConflictKey(key); + return true; } @Override diff --git a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java index b7af660e9a..3f44e2ba36 100644 --- a/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java +++ b/bindings/java/src/main/com/apple/foundationdb/ReadTransaction.java @@ -93,9 +93,10 @@ public interface ReadTransaction extends ReadTransactionContext { * * @param keyBegin the first key in the range (inclusive) * @param keyEnd the last key in the range (exclusive) + * @return {@code true} if the read conflict range was added and {@code false} otherwise * @see Transaction#addReadConflictRange(byte[], byte[]) */ - void addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd); + boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd); /** * Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read @@ -104,9 +105,10 @@ public interface ReadTransaction extends ReadTransactionContext { * of the database does not add a conflict range for the read key. * * @param key the key to add to the read conflict range set (it this is not a snapshot view of the database) + * @return {@code true} if the read conflict key was added and {@code false} otherwise * @see Transaction#addReadConflictKey(byte[]) */ - void addReadConflictKeyIfNotSnapshot(byte[] key); + boolean addReadConflictKeyIfNotSnapshot(byte[] key); /** * Gets a value from the database. The call will return {@code null} if the key is not diff --git a/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java b/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java new file mode 100644 index 0000000000..d7532f6bfe --- /dev/null +++ b/bindings/java/src/test/com/apple/foundationdb/test/SnapshotTransactionTest.java @@ -0,0 +1,209 @@ +/* + * SnapshotTransactionTest.java + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2018 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.apple.foundationdb.test; + +import java.util.UUID; +import java.util.concurrent.CompletionException; + +import com.apple.foundationdb.Database; +import com.apple.foundationdb.FDB; +import com.apple.foundationdb.FDBException; +import com.apple.foundationdb.ReadTransaction; +import com.apple.foundationdb.Transaction; +import com.apple.foundationdb.subspace.Subspace; +import com.apple.foundationdb.tuple.Tuple; + +/** + * Some tests regarding conflict ranges to make sure they do what we expect. + */ +public class SnapshotTransactionTest { + private static final int CONFLICT_CODE = 1020; + private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges")); + + public static void main(String[] args) { + FDB fdb = FDB.selectAPIVersion(610); + try(Database db = fdb.open()) { + snapshotReadShouldNotConflict(db); + snapshotShouldNotAddConflictRange(db); + snapshotOnSnapshot(db); + } + } + + // Adding a random write conflict key makes it so the transaction conflicts are actually resolved. + public static void addUUIDConflicts(Transaction... trs) { + for(Transaction tr : trs) { + tr.options().setTimeout(1000); + tr.getReadVersion().join(); + byte[] key = SUBSPACE.pack(Tuple.from("uuids", UUID.randomUUID())); + tr.addReadConflictKey(key); + tr.addWriteConflictKey(key); + } + } + + public static void validateConflict(E e) throws E { + FDBException fdbE = null; + Throwable current = e; + while(current != null && fdbE == null) { + if(current instanceof FDBException) { + fdbE = (FDBException)current; + } + else { + current = current.getCause(); + } + } + if(fdbE == null) { + System.err.println("Error was not caused by FDBException"); + throw e; + } + else { + int errorCode = fdbE.getCode(); + if(errorCode != CONFLICT_CODE) { + System.err.println("FDB error was not caused by a transaction conflict"); + throw e; + } + } + } + + public static void snapshotReadShouldNotConflict(Database db) { + try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) { + addUUIDConflicts(tr1, tr2, tr3); + + // Verify reading a *range* causes a conflict + tr1.addWriteConflictKey(SUBSPACE.pack(Tuple.from("foo", 0L))); + tr2.snapshot().getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join(); + tr3.getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join(); + + // Two successful commits + tr1.commit().join(); + tr2.commit().join(); + + // Read from tr3 should conflict with update from tr1. + try { + tr3.commit().join(); + throw new RuntimeException("tr3 did not conflict"); + } catch(CompletionException e) { + validateConflict(e); + } + } + try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) { + addUUIDConflicts(tr1, tr2, tr3); + + // Verify reading a *key* causes a conflict + byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L)); + tr1.addWriteConflictKey(key); + tr2.snapshot().get(key); + tr3.get(key).join(); + + tr1.commit().join(); + tr2.commit().join(); + + try { + tr3.commit().join(); + throw new RuntimeException("tr3 did not conflict"); + } + catch(CompletionException e) { + validateConflict(e); + } + } + } + + public static void snapshotShouldNotAddConflictRange(Database db) { + try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) { + addUUIDConflicts(tr1, tr2, tr3); + + // Verify adding a read conflict *range* causes a conflict. + Subspace fooSubspace = SUBSPACE.subspace(Tuple.from("foo")); + tr1.addWriteConflictKey(fooSubspace.pack(Tuple.from(0L))); + byte[] beginKey = fooSubspace.range().begin; + byte[] endKey = fooSubspace.range().end; + if(tr2.snapshot().addReadConflictRangeIfNotSnapshot(beginKey, endKey)) { + throw new RuntimeException("snapshot read said it added a conflict range"); + } + if(!tr3.addReadConflictRangeIfNotSnapshot(beginKey, endKey)) { + throw new RuntimeException("non-snapshot read said it did not add a conflict range"); + } + + // Two successful commits + tr1.commit().join(); + tr2.commit().join(); + + // Read from tr3 should conflict with update from tr1. + try { + tr3.commit().join(); + throw new RuntimeException("tr3 did not conflict"); + } + catch(CompletionException e) { + validateConflict(e); + } + } + try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) { + addUUIDConflicts(tr1, tr2, tr3); + + // Verify adding a read conflict *key* causes a conflict. + byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L)); + tr1.addWriteConflictKey(key); + if(tr2.snapshot().addReadConflictKeyIfNotSnapshot(key)) { + throw new RuntimeException("snapshot read said it added a conflict range"); + } + if(!tr3.addReadConflictKeyIfNotSnapshot(key)) { + throw new RuntimeException("non-snapshot read said it did not add a conflict range"); + } + + // Two successful commits + tr1.commit().join(); + tr2.commit().join(); + + // Read from tr3 should conflict with update from tr1. + try { + tr3.commit().join(); + throw new RuntimeException("tr3 did not conflict"); + } + catch(CompletionException e) { + validateConflict(e); + } + } + } + + private static void snapshotOnSnapshot(Database db) { + try(Transaction tr = db.createTransaction()) { + if(tr.isSnapshot()) { + throw new RuntimeException("new transaction is a snapshot transaction"); + } + ReadTransaction snapshotTr = tr.snapshot(); + if(!snapshotTr.isSnapshot()) { + throw new RuntimeException("snapshot transaction is not a snapshot transaction"); + } + if(snapshotTr == tr) { + throw new RuntimeException("snapshot and regular transaction are pointer-equal"); + } + ReadTransaction snapshotSnapshotTr = snapshotTr.snapshot(); + if(!snapshotSnapshotTr.isSnapshot()) { + throw new RuntimeException("snapshot transaction is not a snapshot transaction"); + } + if(snapshotSnapshotTr != snapshotTr) { + throw new RuntimeException("calling snapshot on a snapshot transaction produced a different transaction"); + } + } + } + + private SnapshotTransactionTest() {} +} + From 1a550712cb893c5fa5b01d5517deadb5fc0c284a Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Wed, 20 Feb 2019 15:37:13 -0800 Subject: [PATCH 08/71] Add serialization support for std::array --- flow/serialize.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/flow/serialize.h b/flow/serialize.h index ce486ff85c..4f505e2157 100644 --- a/flow/serialize.h +++ b/flow/serialize.h @@ -23,6 +23,7 @@ #pragma once #include +#include #include #include "flow/Error.h" #include "flow/Arena.h" @@ -150,6 +151,20 @@ inline void load( Archive& ar, std::vector& value ) { ASSERT( ar.protocolVersion() != 0 ); } +template +inline void save( Archive& ar, const std::array& value ) { + for(int ii = 0; ii < N; ++ii) + ar << value[ii]; + ASSERT( ar.protocolVersion() != 0 ); +} +template +inline void load( Archive& ar, std::array& value ) { + for (int ii = 0; ii < N; ii++) { + ar >> value[ii]; + } + ASSERT( ar.protocolVersion() != 0 ); +} + template inline void save( Archive& ar, const std::set& value ) { ar << (int)value.size(); From 57832e625d956135644221e571f4a6405b208266 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Tue, 26 Feb 2019 18:04:03 -0800 Subject: [PATCH 09/71] net: Support IPv6 #963 - NetworkAddress now contains IPAddress object which can be either IPv4 or IPv6 address. 128bits are used even for IPv4 addresses, however only 32bits are used when using/serializing IPv4 address. - ConnectPacket is updated to store IPv6 address. Backward compatible with old format since the first 32bits of IP address field is used for serialization of IPv4. - Mainly updates rest of the code to use IPAddress structure instead of plain uint32_t. - IPv6 address/pair ports should be represented as `[ip]:port` as per convention. This applies to both cluster files and command line arguments. --- fdbcli/fdbcli.actor.cpp | 4 +- fdbclient/AutoPublicAddress.cpp | 20 +++- fdbclient/FDBTypes.h | 34 ++---- fdbclient/ManagementAPI.actor.cpp | 6 +- fdbclient/MonitorLeader.actor.cpp | 22 ++++ fdbclient/NativeAPI.actor.cpp | 23 ++-- fdbclient/SystemData.cpp | 2 +- fdbrpc/FlowTransport.actor.cpp | 79 +++++++++---- fdbrpc/TLSConnection.actor.cpp | 2 +- fdbrpc/sim2.actor.cpp | 32 ++--- fdbrpc/simulator.h | 12 +- fdbserver/Status.actor.cpp | 16 ++- fdbserver/fdbserver.actor.cpp | 4 +- fdbserver/workloads/CpuProfiler.actor.cpp | 3 +- .../workloads/RemoveServersSafely.actor.cpp | 2 +- fdbserver/workloads/SaveAndKill.actor.cpp | 6 +- flow/Net2.actor.cpp | 28 +++-- flow/Platform.cpp | 24 ++-- flow/Platform.h | 4 +- flow/SystemMonitor.cpp | 11 +- flow/SystemMonitor.h | 9 +- flow/TDMetric.actor.h | 3 +- flow/Trace.cpp | 9 +- flow/network.cpp | 110 ++++++++++++++++-- flow/network.h | 81 +++++++++++-- 25 files changed, 387 insertions(+), 159 deletions(-) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index 59f964eae8..3cb37d6e06 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -2031,7 +2031,7 @@ ACTOR Future exclude( Database db, std::vector tokens, Referenc wait( makeInterruptable(waitForExcludedServers(db,addresses)) ); std::vector workers = wait( makeInterruptable(getWorkers(db)) ); - std::map> workerPorts; + std::map> workerPorts; for(auto addr : workers) workerPorts[addr.address.ip].insert(addr.address.port); @@ -2050,7 +2050,7 @@ ACTOR Future exclude( Database db, std::vector tokens, Referenc "excluded the correct machines or processes before removing them from the cluster:\n"); for(auto addr : absentExclusions) { if(addr.port == 0) - printf(" %s\n", toIPString(addr.ip).c_str()); + printf(" %s\n", addr.ip.toString().c_str()); else printf(" %s\n", addr.toString().c_str()); } diff --git a/fdbclient/AutoPublicAddress.cpp b/fdbclient/AutoPublicAddress.cpp index 11d536a181..93e5b25f92 100644 --- a/fdbclient/AutoPublicAddress.cpp +++ b/fdbclient/AutoPublicAddress.cpp @@ -28,13 +28,21 @@ #include "fdbclient/CoordinationInterface.h" -uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) { +IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs) { try { - boost::asio::io_service ioService; - boost::asio::ip::udp::socket socket(ioService); - boost::asio::ip::udp::endpoint endpoint(boost::asio::ip::address_v4(ccs.coordinators()[0].ip), ccs.coordinators()[0].port); + using namespace boost::asio; + + io_service ioService; + ip::udp::socket socket(ioService); + + const auto& coordAddr = ccs.coordinators()[0]; + const auto boostIp = coordAddr.ip.isV6() ? ip::address(ip::address_v6(coordAddr.ip.toV6())) + : ip::address(ip::address_v4(coordAddr.ip.toV4())); + + ip::udp::endpoint endpoint(boostIp, coordAddr.port); socket.connect(endpoint); - auto ip = socket.local_endpoint().address().to_v4().to_ulong(); + IPAddress ip = coordAddr.ip.isV6() ? IPAddress(socket.local_endpoint().address().to_v6().to_bytes()) + : IPAddress(socket.local_endpoint().address().to_v4().to_ulong()); socket.close(); return ip; @@ -43,4 +51,4 @@ uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) { fprintf(stderr, "Error determining public address: %s\n", e.what()); throw bind_failed(); } -} \ No newline at end of file +} diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index 090802472d..0dc1bccb83 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -633,33 +633,21 @@ struct LogMessageVersion { }; struct AddressExclusion { - uint32_t ip; + IPAddress ip; int port; AddressExclusion() : ip(0), port(0) {} - explicit AddressExclusion( uint32_t ip ) : ip(ip), port(0) {} - explicit AddressExclusion( uint32_t ip, int port ) : ip(ip), port(port) {} + explicit AddressExclusion(const IPAddress& ip) : ip(ip), port(0) {} + explicit AddressExclusion(const IPAddress& ip, int port) : ip(ip), port(port) {} - explicit AddressExclusion (std::string s) { - int a,b,c,d,p,count=-1; - if (sscanf(s.c_str(), "%d.%d.%d.%d:%d%n", &a,&b,&c,&d, &p, &count) == 5 && count == s.size()) { - ip = (a<<24)+(b<<16)+(c<<8)+d; - port = p; - } - else if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count) == 4 && count == s.size()) { - ip = (a<<24)+(b<<16)+(c<<8)+d; - port = 0; - } - else { - throw connection_string_invalid(); - } + bool operator<(AddressExclusion const& r) const { + if (ip != r.ip) return ip < r.ip; + return port < r.port; } - - bool operator< (AddressExclusion const& r) const { if (ip != r.ip) return ip < r.ip; return port>24)&0xff, (ip>>16)&0xff, (ip>>8)&0xff, ip&0xff ); - if (!isWholeMachine()) - as += format(":%d", port); + std::string as = format("%s", ip.toString().c_str()); + const char* formatPatt = ip.isV6() ? "[%s]:%d" : "%s:%d"; + if (!isWholeMachine()) return format(formatPatt, as.c_str(), port); return as; } diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index df17d6cc93..fa54b5b391 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -1730,7 +1730,7 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") { data.locality.set(LiteralStringRef("rack"), StringRef(rack)); data.locality.set(LiteralStringRef("zoneid"), StringRef(rack)); data.locality.set(LiteralStringRef("machineid"), StringRef(machineId)); - data.address.ip = i; + data.address.ip = IPAddress(i); workers.push_back(data); } @@ -1749,8 +1749,8 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") { LiteralStringRef("machineid") }); for(auto worker = chosen.begin(); worker != chosen.end(); worker++) { - ASSERT(worker->ip < workers.size()); - LocalityData data = workers[worker->ip].locality; + ASSERT(worker->ip.toV4() < workers.size()); + LocalityData data = workers[worker->ip.toV4()].locality; for(auto field = fields.begin(); field != fields.end(); field++) { chosenValues[*field].insert(data.get(*field).get()); } diff --git a/fdbclient/MonitorLeader.actor.cpp b/fdbclient/MonitorLeader.actor.cpp index 64482da09d..c07ed5ef73 100644 --- a/fdbclient/MonitorLeader.actor.cpp +++ b/fdbclient/MonitorLeader.actor.cpp @@ -214,6 +214,28 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/basic") { ASSERT( input == cs.toString() ); } + { + input = "0xxdeadbeef:100100100@[::1]:1234,[::1]:1235"; + std::string commented("#start of comment\n"); + commented += input; + commented += "\n"; + commented += "# asdfasdf ##"; + + ClusterConnectionString cs(commented); + ASSERT(input == cs.toString()); + } + + { + input = "0xxdeadbeef:100100100@[abcd:dcba::1]:1234,[abcd:dcba::abcd:1]:1234"; + std::string commented("#start of comment\n"); + commented += input; + commented += "\n"; + commented += "# asdfasdf ##"; + + ClusterConnectionString cs(commented); + ASSERT(input == cs.toString()); + } + return Void(); } diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 61e44640c5..f4c982523d 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -749,7 +749,7 @@ Database Database::createDatabase( std::string connFileName, int apiVersion, Loc return Database::createDatabase(rccf, apiVersion, clientLocality); } -extern uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ); +extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs); Cluster::Cluster( Reference connFile, int apiVersion ) : clusterInterface(new AsyncVar>()) @@ -791,7 +791,7 @@ void Cluster::init( Reference connFile, bool startClientI .detailf("ImageOffset", "%p", platform::getImageOffset()) .trackLatest("ClientStart"); - initializeSystemMonitorMachineState(SystemMonitorMachineState(publicIP)); + initializeSystemMonitorMachineState(SystemMonitorMachineState(IPAddress(publicIP))); systemMonitor(); uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskFlushTrace ) ); @@ -1066,24 +1066,15 @@ bool GetRangeLimits::hasSatisfiedMinRows() { return hasByteLimit() && minRows == 0; } - AddressExclusion AddressExclusion::parse( StringRef const& key ) { //Must not change: serialized to the database! - std::string s = key.toString(); - int a,b,c,d,port,count=-1; - if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count)<4) { + try { + auto addr = NetworkAddress::parse(key.toString()); + return AddressExclusion(addr.ip, addr.port); + } catch (Error& e) { TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key)); return AddressExclusion(); } - s = s.substr(count); - uint32_t ip = (a<<24)+(b<<16)+(c<<8)+d; - if (!s.size()) - return AddressExclusion( ip ); - if (sscanf( s.c_str(), ":%d%n", &port, &count ) < 1 || count != s.size()) { - TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key)); - return AddressExclusion(); - } - return AddressExclusion( ip, port ); } Future> getRange( @@ -2038,7 +2029,7 @@ ACTOR Future< Standalone< VectorRef< const char*>>> getAddressesForKeyActor( Key Standalone> addresses; for (auto i : ssi) { - std::string ipString = toIPString(i.address().ip); + std::string ipString = i.address().ip.toString(); char* c_string = new (addresses.arena()) char[ipString.length()+1]; strcpy(c_string, ipString.c_str()); addresses.push_back(addresses.arena(), c_string); diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index ea4be9c8d8..cb54165469 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -374,7 +374,7 @@ const AddressExclusion decodeExcludedServersKey( KeyRef const& key ) { } std::string encodeExcludedServersKey( AddressExclusion const& addr ) { //FIXME: make sure what's persisted here is not affected by innocent changes elsewhere - std::string as = format( "%d.%d.%d.%d", (addr.ip>>24)&0xff, (addr.ip>>16)&0xff, (addr.ip>>8)&0xff, addr.ip&0xff ); + std::string as = format("%s", addr.ip.toString().c_str()); //ASSERT( StringRef(as).endsWith(LiteralStringRef(":0")) == (addr.port == 0) ); if (!addr.isWholeMachine()) as += format(":%d", addr.port); diff --git a/fdbrpc/FlowTransport.actor.cpp b/fdbrpc/FlowTransport.actor.cpp index 3e015d2995..930d5113ee 100644 --- a/fdbrpc/FlowTransport.actor.cpp +++ b/fdbrpc/FlowTransport.actor.cpp @@ -194,10 +194,7 @@ public: }; #define CONNECT_PACKET_V0 0x0FDB00A444020001LL -#define CONNECT_PACKET_V1 0x0FDB00A446030001LL #define CONNECT_PACKET_V0_SIZE 14 -#define CONNECT_PACKET_V1_SIZE 22 -#define CONNECT_PACKET_V2_SIZE 26 #pragma pack( push, 1 ) struct ConnectPacket { @@ -205,16 +202,44 @@ struct ConnectPacket { uint64_t protocolVersion; // Expect currentProtocolVersion uint16_t canonicalRemotePort; // Port number to reconnect to the originating process uint64_t connectionId; // Multi-version clients will use the same Id for both connections, other connections will set this to zero. Added at protocol Version 0x0FDB00A444020001. - uint32_t canonicalRemoteIp; // IP Address to reconnect to the originating process + union { + uint32_t v4; + uint8_t v6[16]; + } canonicalRemoteIp46; // IP Address to reconnect to the originating process - size_t minimumSize() { - if (protocolVersion < CONNECT_PACKET_V0) return CONNECT_PACKET_V0_SIZE; - if (protocolVersion < CONNECT_PACKET_V1) return CONNECT_PACKET_V1_SIZE; - return CONNECT_PACKET_V2_SIZE; + IPAddress canonicalRemoteIp() const { + if (isIPv6()) { + IPAddress::IPAddressStore ip; + memcpy(ip.data(), &canonicalRemoteIp46.v6, ip.size()); + return IPAddress(ip); + } else { + return IPAddress(canonicalRemoteIp46.v4); + } + } + + void setCanonicalRemoteIp(const IPAddress& ip) { + if (ip.isV6()) { + memcpy(&canonicalRemoteIp46.v6, ip.toV6().data(), 16); + } else { + canonicalRemoteIp46.v4 = ip.toV4(); + } + } + + bool isIPv6() const { return connectPacketLength == (sizeof(ConnectPacket) - sizeof(connectPacketLength)); } + + uint32_t totalPacketSize() const { return connectPacketLength + sizeof(connectPacketLength); } + + template + void serialize(Ar& ar) { + serializer(ar, connectPacketLength, protocolVersion, canonicalRemotePort, connectionId); + if (isIPv6()) { + ar.serializeBytes(&canonicalRemoteIp46.v6, sizeof(canonicalRemoteIp46.v6)); + } else { + serializer(ar, canonicalRemoteIp46.v4); + } } }; -static_assert( sizeof(ConnectPacket) == CONNECT_PACKET_V2_SIZE, "ConnectPacket packed incorrectly" ); #pragma pack( pop ) ACTOR static Future connectionReader(TransportData* transport, Reference conn, Peer* peer, @@ -256,23 +281,24 @@ struct Peer : NonCopyable { for(auto& addr : transport->localAddresses) { if(addr.isTLS() == destination.isTLS()) { pkt.canonicalRemotePort = addr.port; - pkt.canonicalRemoteIp = addr.ip; + pkt.setCanonicalRemoteIp(addr.ip); + pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength) - (addr.isV6() ? 0 : 12); found = true; break; } } if (!found) { pkt.canonicalRemotePort = 0; // a "mixed" TLS/non-TLS connection is like a client/server connection - there's no way to reverse it - pkt.canonicalRemoteIp = 0; + pkt.setCanonicalRemoteIp(IPAddress(0)); + pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength); } - pkt.connectPacketLength = sizeof(pkt)-sizeof(pkt.connectPacketLength); pkt.protocolVersion = currentProtocolVersion; pkt.connectionId = transport->transportId; PacketBuffer* pb_first = new PacketBuffer; PacketWriter wr( pb_first, NULL, Unversioned() ); - wr.serializeBinaryItem(pkt); + pkt.serialize(wr); unsent.prependWriteBuffer(pb_first, wr.finish()); } @@ -647,7 +673,7 @@ ACTOR static Future connectionReader( ConnectPacket* p = (ConnectPacket*)unprocessed_begin; uint64_t connectionId = 0; - int32_t connectPacketSize = p->minimumSize(); + int32_t connectPacketSize = p->totalPacketSize(); if ( unprocessed_end-unprocessed_begin >= connectPacketSize ) { if(p->protocolVersion >= 0x0FDB00A444020001) { connectionId = p->connectionId; @@ -655,18 +681,22 @@ ACTOR static Future connectionReader( if( (p->protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) { incompatibleProtocolVersionNewer = p->protocolVersion > currentProtocolVersion; - NetworkAddress addr = p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress(); + NetworkAddress addr = p->canonicalRemotePort + ? NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort) + : conn->getPeerAddress(); if(connectionId != 1) addr.port = 0; if(!transport->multiVersionConnections.count(connectionId)) { if(now() - transport->lastIncompatibleMessage > FLOW_KNOBS->CONNECTION_REJECTED_MESSAGE_DELAY) { TraceEvent(SevWarn, "ConnectionRejected", conn->getDebugID()) - .detail("Reason", "IncompatibleProtocolVersion") - .detail("LocalVersion", currentProtocolVersion) - .detail("RejectedVersion", p->protocolVersion) - .detail("VersionMask", compatibleProtocolVersionMask) - .detail("Peer", p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress()) - .detail("ConnectionId", connectionId); + .detail("Reason", "IncompatibleProtocolVersion") + .detail("LocalVersion", currentProtocolVersion) + .detail("RejectedVersion", p->protocolVersion) + .detail("VersionMask", compatibleProtocolVersionMask) + .detail("Peer", p->canonicalRemotePort ? NetworkAddress(p->canonicalRemoteIp(), + p->canonicalRemotePort) + : conn->getPeerAddress()) + .detail("ConnectionId", connectionId); transport->lastIncompatibleMessage = now(); } if(!transport->incompatiblePeers.count(addr)) { @@ -699,7 +729,9 @@ ACTOR static Future connectionReader( peerProtocolVersion = p->protocolVersion; if (peer != nullptr) { // Outgoing connection; port information should be what we expect - TraceEvent("ConnectedOutgoing").suppressFor(1.0).detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) ); + TraceEvent("ConnectedOutgoing") + .suppressFor(1.0) + .detail("PeerAddr", NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort)); peer->compatible = compatible; peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer; if (!compatible) { @@ -709,7 +741,8 @@ ACTOR static Future connectionReader( ASSERT( p->canonicalRemotePort == peerAddress.port ); } else { if (p->canonicalRemotePort) { - peerAddress = NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort, true, peerAddress.isTLS() ); + peerAddress = NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort, true, + peerAddress.isTLS()); } peer = transport->getPeer(peerAddress); peer->compatible = compatible; diff --git a/fdbrpc/TLSConnection.actor.cpp b/fdbrpc/TLSConnection.actor.cpp index 0b49e6ea3d..83f0a6cd2e 100644 --- a/fdbrpc/TLSConnection.actor.cpp +++ b/fdbrpc/TLSConnection.actor.cpp @@ -177,7 +177,7 @@ Future> TLSNetworkConnections::connect( NetworkAddress to // addresses against certificates, so we have our own peer verifying logic // to use. For FDB<->external system connections, we can use the standard // hostname-based certificate verification logic. - if (host.empty() || host == toIPString(toAddr.ip)) + if (host.empty() || host == toAddr.ip.toString()) return wrap(options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), true, network->connect(clearAddr), std::string("")); else return wrap( options->get_policy(TLSOptions::POLICY_NO_VERIFY_PEERS), true, network->connect( clearAddr ), host ); diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 4d63b5496d..c729675769 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -135,28 +135,29 @@ struct SimClogging { return t - tnow; } - void clogPairFor( uint32_t from, uint32_t to, double t ) { + void clogPairFor(const IPAddress& from, const IPAddress& to, double t) { auto& u = clogPairUntil[ std::make_pair( from, to ) ]; u = std::max(u, now() + t); } - void clogSendFor( uint32_t from, double t ) { + void clogSendFor(const IPAddress& from, double t) { auto& u = clogSendUntil[from]; u = std::max(u, now() + t); } - void clogRecvFor( uint32_t from, double t ) { + void clogRecvFor(const IPAddress& from, double t) { auto& u = clogRecvUntil[from]; u = std::max(u, now() + t); } - double setPairLatencyIfNotSet( uint32_t from, uint32_t to, double t ) { + double setPairLatencyIfNotSet(const IPAddress& from, const IPAddress& to, double t) { auto i = clogPairLatency.find( std::make_pair(from,to) ); if (i == clogPairLatency.end()) i = clogPairLatency.insert( std::make_pair( std::make_pair(from,to), t ) ).first; return i->second; } + private: - std::map< uint32_t, double > clogSendUntil, clogRecvUntil; - std::map< std::pair, double > clogPairUntil; - std::map< std::pair, double > clogPairLatency; + std::map clogSendUntil, clogRecvUntil; + std::map, double> clogPairUntil; + std::map, double> clogPairLatency; double halfLatency() { double a = g_random->random01(); const double pFast = 0.999; @@ -789,9 +790,10 @@ public: Reference myc( new Sim2Conn( getCurrentProcess() ) ); Reference peerc( new Sim2Conn( peerp ) ); + // TODO Support IPv6 myc->connect(peerc, toAddr); - peerc->connect(myc, NetworkAddress( getCurrentProcess()->address.ip + g_random->randomInt(0,256), - g_random->randomInt(40000, 60000) )); + IPAddress localIp(getCurrentProcess()->address.ip.toV4() + g_random->randomInt(0, 256)); + peerc->connect(myc, NetworkAddress(localIp, g_random->randomInt(40000, 60000))); ((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*g_random->random01(), Reference(peerc) ); return onConnect( ::delay(0.5*g_random->random01()), myc ); @@ -1499,22 +1501,24 @@ public: return (kt == ktMin); } - virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) { + virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) { if (mode == ClogDefault) { double a = g_random->random01(); if ( a < 0.3 ) mode = ClogSend; else if (a < 0.6 ) mode = ClogReceive; else mode = ClogAll; } - TraceEvent("ClogInterface").detail("IP", toIPString(ip)).detail("Delay", seconds) - .detail("Queue", mode==ClogSend?"Send":mode==ClogReceive?"Receive":"All"); + TraceEvent("ClogInterface") + .detail("IP", ip.toString()) + .detail("Delay", seconds) + .detail("Queue", mode == ClogSend ? "Send" : mode == ClogReceive ? "Receive" : "All"); if (mode == ClogSend || mode==ClogAll) g_clogging.clogSendFor( ip, seconds ); if (mode == ClogReceive || mode==ClogAll) g_clogging.clogRecvFor( ip, seconds ); } - virtual void clogPair( uint32_t from, uint32_t to, double seconds ) { + virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) { g_clogging.clogPairFor( from, to, seconds ); } virtual std::vector getAllProcesses() const { @@ -1653,7 +1657,7 @@ public: INetwork *net2; //Map from machine IP -> machine disk space info - std::map diskSpaceMap; + std::map diskSpaceMap; //Whether or not yield has returned true during the current iteration of the run loop bool yielded; diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 8b76289f0a..829b2c3554 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -114,8 +114,12 @@ public: std::string toString() const { const NetworkAddress& address = addresses[0]; - return format("name: %s address: %d.%d.%d.%d:%d zone: %s datahall: %s class: %s excluded: %d cleared: %d", - name, (address.ip>>24)&0xff, (address.ip>>16)&0xff, (address.ip>>8)&0xff, address.ip&0xff, address.port, (locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"), (locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"), startingClass.toString().c_str(), excluded, cleared); + return format( + "name: %s address: %s:%d zone: %s datahall: %s class: %s excluded: %d cleared: %d", name, + address.ip.toString().c_str(), address.port, + (locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"), + (locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"), + startingClass.toString().c_str(), excluded, cleared); } // Members not for external use @@ -256,8 +260,8 @@ public: allSwapsDisabled = true; } - virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) = 0; - virtual void clogPair( uint32_t from, uint32_t to, double seconds ) = 0; + virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) = 0; + virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) = 0; virtual std::vector getAllProcesses() const = 0; virtual ProcessInfo* getProcessByAddress( NetworkAddress const& address ) = 0; virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) = 0; diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index d27358fa33..c14f18f5f0 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -289,7 +289,7 @@ static JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, vectorsecond; try { - std::string address = toIPString(it->first.ip); + std::string address = it->first.ip.toString(); // We will use the "physical" caluculated machine ID here to limit exposure to machineID repurposing std::string machineId = event.getValue("MachineID"); @@ -1254,9 +1254,15 @@ namespace std { size_t operator()(const NetworkAddress& na) const { - return (na.ip << 16) + na.port; - } - }; + int result = 0; + if (na.ip.isV6()) { + result = hashlittle(na.ip.toV6().data(), 16, 0); + } else { + result = na.ip.toV4(); + } + return (result << 16) + na.port; + } + }; } ACTOR template @@ -1667,7 +1673,7 @@ static JsonBuilderArray getClientIssuesAsMessages( ProcessIssuesMap const& _issu std::map> deduplicatedIssues; for(auto i : issues) { - deduplicatedIssues[i.second.first].push_back(format("%s:%d", toIPString(i.first.ip).c_str(), i.first.port)); + deduplicatedIssues[i.second.first].push_back(format("%s:%d", i.first.ip.toString().c_str(), i.first.port)); } for (auto i : deduplicatedIssues) { diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index abb80c817b..0ed260e8bc 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -170,7 +170,7 @@ extern void copyTest(); extern void versionedMapTest(); extern void createTemplateDatabase(); // FIXME: this really belongs in a header somewhere since it is actually used. -extern uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ); +extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs); extern const char* getHGVersion(); @@ -776,7 +776,7 @@ std::pair buildNetworkAddresses(const Cl if (autoPublicAddress) { try { const NetworkAddress& parsedAddress = NetworkAddress::parse("0.0.0.0:" + publicAddressStr.substr(5)); - uint32_t publicIP = determinePublicIPAutomatically(connectionFile.getConnectionString()); + const IPAddress publicIP = determinePublicIPAutomatically(connectionFile.getConnectionString()); publicNetworkAddresses.emplace_back(publicIP, parsedAddress.port, true, parsedAddress.isTLS()); } catch (Error& e) { fprintf(stderr, "ERROR: could not determine public address automatically from `%s': %s\n", publicAddressStr.c_str(), e.what()); diff --git a/fdbserver/workloads/CpuProfiler.actor.cpp b/fdbserver/workloads/CpuProfiler.actor.cpp index a4d072203d..a8208d61d3 100644 --- a/fdbserver/workloads/CpuProfiler.actor.cpp +++ b/fdbserver/workloads/CpuProfiler.actor.cpp @@ -90,7 +90,8 @@ struct CpuProfilerWorkload : TestWorkload req.duration = 0; //unused //The profiler output name will be the ip.port.prof - req.outputFile = StringRef(toIPString(self->profilingWorkers[i].address().ip) + "." + format("%d", self->profilingWorkers[i].address().port) + ".profile.bin"); + req.outputFile = StringRef(self->profilingWorkers[i].address().ip.toString() + "." + + format("%d", self->profilingWorkers[i].address().port) + ".profile.bin"); replies.push_back(self->profilingWorkers[i].clientInterface.profiler.tryGetReply(req)); } diff --git a/fdbserver/workloads/RemoveServersSafely.actor.cpp b/fdbserver/workloads/RemoveServersSafely.actor.cpp index 6f6cda1f64..d852db53b6 100644 --- a/fdbserver/workloads/RemoveServersSafely.actor.cpp +++ b/fdbserver/workloads/RemoveServersSafely.actor.cpp @@ -65,7 +65,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { std::map>, AddressExclusion> machinesMap; // Locality Zone Id -> ip address std::vector processAddrs; // IF (killProcesses) THEN ip:port ELSE ip addresses unique list of the machines - std::map>> ip_dcid; + std::map>> ip_dcid; auto processes = getServers(); for(auto& it : processes) { AddressExclusion machineIp(it->address.ip); diff --git a/fdbserver/workloads/SaveAndKill.actor.cpp b/fdbserver/workloads/SaveAndKill.actor.cpp index b9ab47063b..3a6a8fdd69 100644 --- a/fdbserver/workloads/SaveAndKill.actor.cpp +++ b/fdbserver/workloads/SaveAndKill.actor.cpp @@ -91,13 +91,15 @@ struct SaveAndKillWorkload : TestWorkload { ini.SetValue(machineIdString, "dcUID", (process->locality.dcId().present()) ? process->locality.dcId().get().printable().c_str() : ""); ini.SetValue(machineIdString, "zoneId", (process->locality.zoneId().present()) ? process->locality.zoneId().get().printable().c_str() : ""); ini.SetValue(machineIdString, "mClass", format("%d", process->startingClass.classType()).c_str()); - ini.SetValue(machineIdString, format("ipAddr%d", process->address.port-1).c_str(), format("%d", process->address.ip).c_str()); + ini.SetValue(machineIdString, format("ipAddr%d", process->address.port - 1).c_str(), + format("%d", process->address.ip.toV4()).c_str()); ini.SetValue(machineIdString, format("%d", process->address.port-1).c_str(), process->dataFolder); ini.SetValue(machineIdString, format("c%d", process->address.port-1).c_str(), process->coordinationFolder); j++; } else { - ini.SetValue(machineIdString, format("ipAddr%d", process->address.port-1).c_str(), format("%d", process->address.ip).c_str()); + ini.SetValue(machineIdString, format("ipAddr%d", process->address.port - 1).c_str(), + format("%d", process->address.ip.toV4()).c_str()); int oldValue = machines.find(machineId)->second; ini.SetValue(machineIdString, format("%d", process->address.port-1).c_str(), process->dataFolder); ini.SetValue(machineIdString, format("c%d", process->address.port-1).c_str(), process->coordinationFolder); diff --git a/flow/Net2.actor.cpp b/flow/Net2.actor.cpp index 1ea35070c4..9afea3140c 100644 --- a/flow/Net2.actor.cpp +++ b/flow/Net2.actor.cpp @@ -172,7 +172,7 @@ public: TDMetricCollection tdmetrics; double currentTime; bool stopped; - std::map< uint32_t, bool > addressOnHostCache; + std::map addressOnHostCache; uint64_t numYields; @@ -226,8 +226,16 @@ public: std::vector blobCredentialFiles; }; +static boost::asio::ip::address tcpAddress(IPAddress const& n) { + if (n.isV6()) { + return boost::asio::ip::address_v6(n.toV6()); + } else { + return boost::asio::ip::address_v4(n.toV4()); + } +} + static tcp::endpoint tcpEndpoint( NetworkAddress const& n ) { - return tcp::endpoint( boost::asio::ip::address_v4( n.ip ), n.port ); + return tcp::endpoint(tcpAddress(n.ip), n.port); } class BindPromise { @@ -458,7 +466,9 @@ private: auto f = p.getFuture(); self->acceptor.async_accept( conn->getSocket(), peer_endpoint, std::move(p) ); wait( f ); - conn->accept( NetworkAddress(peer_endpoint.address().to_v4().to_ulong(), peer_endpoint.port()) ); + auto peer_address = peer_endpoint.address().is_v6() ? IPAddress(peer_endpoint.address().to_v6().to_bytes()) + : IPAddress(peer_endpoint.address().to_v4().to_ulong()); + conn->accept(NetworkAddress(peer_address, peer_endpoint.port())); return conn; } catch (...) { @@ -850,13 +860,14 @@ ACTOR static Future> resolveTCPEndpoint_impl( Net2 * } std::vector addrs; - + tcp::resolver::iterator end; while(iter != end) { auto endpoint = iter->endpoint(); - // Currently only ipv4 is supported by NetworkAddress auto addr = endpoint.address(); - if(addr.is_v4()) { + if (addr.is_v6()) { + addrs.push_back(NetworkAddress(IPAddress(addr.to_v6().to_bytes()), endpoint.port())); + } else { addrs.push_back(NetworkAddress(addr.to_v4().to_ulong(), endpoint.port())); } ++iter; @@ -890,9 +901,10 @@ bool Net2::isAddressOnThisHost( NetworkAddress const& addr ) { try { boost::asio::io_service ioService; boost::asio::ip::udp::socket socket(ioService); - boost::asio::ip::udp::endpoint endpoint(boost::asio::ip::address_v4(addr.ip), 1); + boost::asio::ip::udp::endpoint endpoint(tcpAddress(addr.ip), 1); socket.connect(endpoint); - bool local = socket.local_endpoint().address().to_v4().to_ulong() == addr.ip; + bool local = addr.ip.isV6() ? socket.local_endpoint().address().to_v6().to_bytes() == addr.ip.toV6() + : socket.local_endpoint().address().to_v4().to_ulong() == addr.ip.toV4(); socket.close(); if (local) TraceEvent(SevInfo, "AddressIsOnHost").detail("Address", addr); return addressOnHostCache[ addr.ip ] = local; diff --git a/flow/Platform.cpp b/flow/Platform.cpp index 51e2e492c5..c0c9aecb44 100644 --- a/flow/Platform.cpp +++ b/flow/Platform.cpp @@ -499,7 +499,7 @@ void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) { } #ifdef __unixish__ -const char* getInterfaceName(uint32_t _ip) { +const char* getInterfaceName(const IPAddress& _ip) { INJECT_FAULT( platform_error, "getInterfaceName" ); static char iname[20]; @@ -514,9 +514,15 @@ const char* getInterfaceName(uint32_t _ip) { for (struct ifaddrs* iter = interfaces; iter; iter = iter->ifa_next) { if(!iter->ifa_addr) continue; - if (iter->ifa_addr->sa_family == AF_INET) { + if (iter->ifa_addr->sa_family == AF_INET && _ip.isV4()) { uint32_t ip = ntohl(((struct sockaddr_in*)iter->ifa_addr)->sin_addr.s_addr); - if (ip == _ip) { + if (ip == _ip.toV4()) { + ifa_name = iter->ifa_name; + break; + } + } else if (iter->ifa_addr->sa_family == AF_INET6 && _ip.isV6()) { + struct sockaddr_in6* ifa_addr = (struct sockaddr_in6*)iter->ifa_addr; + if (memcmp(_ip.toV6().data(), &ifa_addr->sin6_addr, 16) == 0) { ifa_name = iter->ifa_name; break; } @@ -538,8 +544,8 @@ const char* getInterfaceName(uint32_t _ip) { #endif #if defined(__linux__) -void getNetworkTraffic(uint32_t ip, uint64_t& bytesSent, uint64_t& bytesReceived, - uint64_t& outSegs, uint64_t& retransSegs) { +void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs, + uint64_t& retransSegs) { INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux... const char* ifa_name = nullptr; try { @@ -746,8 +752,8 @@ dev_t getDeviceId(std::string path) { #endif #ifdef __APPLE__ -void getNetworkTraffic(uint32_t ip, uint64_t& bytesSent, uint64_t& bytesReceived, - uint64_t& outSegs, uint64_t& retransSegs) { +void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs, + uint64_t& retransSegs) { INJECT_FAULT( platform_error, "getNetworkTraffic" ); const char* ifa_name = nullptr; @@ -1095,7 +1101,7 @@ void initPdhStrings(SystemStatisticsState *state, std::string dataFolder) { } #endif -SystemStatistics getSystemStatistics(std::string dataFolder, uint32_t ip, SystemStatisticsState **statState) { +SystemStatistics getSystemStatistics(std::string dataFolder, const IPAddress* ip, SystemStatisticsState** statState) { if( (*statState) == NULL ) (*statState) = new SystemStatisticsState(); SystemStatistics returnStats; @@ -1189,7 +1195,7 @@ SystemStatistics getSystemStatistics(std::string dataFolder, uint32_t ip, System uint64_t machineOutSegs = (*statState)->machineLastOutSegs; uint64_t machineRetransSegs = (*statState)->machineLastRetransSegs; - getNetworkTraffic(ip, machineNowSent, machineNowReceived, machineOutSegs, machineRetransSegs); + getNetworkTraffic(*ip, machineNowSent, machineNowReceived, machineOutSegs, machineRetransSegs); if( returnStats.initialized ) { returnStats.machineMegabitsSent = ((machineNowSent - (*statState)->machineLastSent) * 8e-6); returnStats.machineMegabitsReceived = ((machineNowReceived - (*statState)->machineLastReceived) * 8e-6); diff --git a/flow/Platform.h b/flow/Platform.h index 56b6fa9cdf..7cfbeab75f 100644 --- a/flow/Platform.h +++ b/flow/Platform.h @@ -245,7 +245,9 @@ struct SystemStatistics { struct SystemStatisticsState; -SystemStatistics getSystemStatistics(std::string dataFolder, uint32_t ip, SystemStatisticsState **statState); +class IPAddress; + +SystemStatistics getSystemStatistics(std::string dataFolder, const IPAddress* ip, SystemStatisticsState **statState); double getProcessorTimeThread(); diff --git a/flow/SystemMonitor.cpp b/flow/SystemMonitor.cpp index bc7a21081a..207e5c30d7 100644 --- a/flow/SystemMonitor.cpp +++ b/flow/SystemMonitor.cpp @@ -43,19 +43,18 @@ void systemMonitor() { SystemStatistics getSystemStatistics() { static StatisticsState statState = StatisticsState(); + const IPAddress ipAddr = machineState.ip.present() ? machineState.ip.get() : IPAddress(); return getSystemStatistics( - machineState.folder.present() ? machineState.folder.get() : "", - machineState.ip.present() ? machineState.ip.get() : 0, - &statState.systemState); + machineState.folder.present() ? machineState.folder.get() : "", &ipAddr, &statState.systemState); } #define TRACEALLOCATOR( size ) TraceEvent("MemSample").detail("Count", FastAllocator::getApproximateMemoryUnused()/size).detail("TotalSize", FastAllocator::getApproximateMemoryUnused()).detail("SampleCount", 1).detail("Hash", "FastAllocatedUnused" #size ).detail("Bt", "na") #define DETAILALLOCATORMEMUSAGE( size ) detail("TotalMemory"#size, FastAllocator::getTotalMemory()).detail("ApproximateUnusedMemory"#size, FastAllocator::getApproximateMemoryUnused()).detail("ActiveThreads"#size, FastAllocator::getActiveThreads()) SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *statState, bool machineMetrics) { - SystemStatistics currentStats = getSystemStatistics(machineState.folder.present() ? machineState.folder.get() : "", - machineState.ip.present() ? machineState.ip.get() : 0, - &statState->systemState); + const IPAddress ipAddr = machineState.ip.present() ? machineState.ip.get() : IPAddress(); + SystemStatistics currentStats = getSystemStatistics(machineState.folder.present() ? machineState.folder.get() : "", + &ipAddr, &statState->systemState); NetworkData netData; netData.init(); if (!DEBUG_DETERMINISM && currentStats.initialized) { diff --git a/flow/SystemMonitor.h b/flow/SystemMonitor.h index a99d00b0cb..8c7d41c5ba 100644 --- a/flow/SystemMonitor.h +++ b/flow/SystemMonitor.h @@ -29,14 +29,15 @@ struct SystemMonitorMachineState { Optional folder; Optional> zoneId; Optional> machineId; - Optional ip; + Optional ip; double monitorStartTime; SystemMonitorMachineState() : monitorStartTime(0) {} - SystemMonitorMachineState(uint32_t ip) : ip(ip), monitorStartTime(0) {} - SystemMonitorMachineState(std::string folder, Optional> zoneId, Optional> machineId, uint32_t ip) - : folder(folder), zoneId(zoneId), machineId(machineId), ip(ip), monitorStartTime(0) {} + explicit SystemMonitorMachineState(const IPAddress& ip) : ip(ip), monitorStartTime(0) {} + SystemMonitorMachineState(std::string folder, Optional> zoneId, + Optional> machineId, const IPAddress& ip) + : folder(folder), zoneId(zoneId), machineId(machineId), ip(ip), monitorStartTime(0) {} }; void initializeSystemMonitorMachineState(SystemMonitorMachineState machineState); diff --git a/flow/TDMetric.actor.h b/flow/TDMetric.actor.h index dfe153393d..84862aefcd 100755 --- a/flow/TDMetric.actor.h +++ b/flow/TDMetric.actor.h @@ -183,8 +183,7 @@ public: // Get and store the local address in the metric collection, but only if it is not 0.0.0.0:0 if( address.size() == 0 ) { NetworkAddress addr = g_network->getLocalAddress(); - if(addr.ip != 0 && addr.port != 0) - address = StringRef(addr.toString()); + if (addr.ip.isValid() && addr.port != 0) address = StringRef(addr.toString()); } return address.size() != 0; } diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 0fb6bcdfcd..ebbb22259a 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -333,7 +333,8 @@ public: void annotateEvent( TraceEventFields &fields ) { if(localAddress.present()) { - fields.addField("Machine", format("%d.%d.%d.%d:%d", (localAddress.get().ip>>24)&0xff, (localAddress.get().ip>>16)&0xff, (localAddress.get().ip>>8)&0xff, localAddress.get().ip&0xff, localAddress.get().port)); + fields.addField("Machine", + format("%s:%d", localAddress.get().ip.toString().c_str(), localAddress.get().port)); } fields.addField("LogGroup", logGroup); @@ -624,7 +625,7 @@ void openTraceFile(const NetworkAddress& na, uint64_t rollsize, uint64_t maxLogs if (baseOfBase.empty()) baseOfBase = "trace"; - std::string baseName = format("%s.%03d.%03d.%03d.%03d.%d", baseOfBase.c_str(), (na.ip>>24)&0xff, (na.ip>>16)&0xff, (na.ip>>8)&0xff, na.ip&0xff, na.port); + std::string baseName = format("%s.%s.%d", baseOfBase.c_str(), na.ip.toString().c_str(), na.port); g_traceLog.open( directory, baseName, logGroup, format("%lld", time(NULL)), rollsize, maxLogsSize, !g_network->isSimulated() ? na : Optional()); uncancellable(recurring(&flushTraceFile, FLOW_KNOBS->TRACE_FLUSH_INTERVAL, TaskFlushTrace)); @@ -716,7 +717,7 @@ bool TraceEvent::init() { detail("Type", type); if(g_network && g_network->isSimulated()) { NetworkAddress local = g_network->getLocalAddress(); - detailf("Machine", "%d.%d.%d.%d:%d", (local.ip>>24)&0xff, (local.ip>>16)&0xff, (local.ip>>8)&0xff, local.ip&0xff, local.port); + detailf("Machine", "%s:%d", local.ip.toString().c_str(), local.port); } detail("ID", id); if(err.isValid()) { @@ -1015,7 +1016,7 @@ void TraceBatch::dump() { std::string machine; if(g_network->isSimulated()) { NetworkAddress local = g_network->getLocalAddress(); - machine = format("%d.%d.%d.%d:%d", (local.ip>>24)&0xff,(local.ip>>16)&0xff,(local.ip>>8)&0xff,local.ip&0xff,local.port); + machine = format("%s:%d", local.ip.toString().c_str(), local.port); } for(int i = 0; i < attachBatch.size(); i++) { diff --git a/flow/network.cpp b/flow/network.cpp index 496537eacc..9e484d1dce 100644 --- a/flow/network.cpp +++ b/flow/network.cpp @@ -18,8 +18,54 @@ * limitations under the License. */ +#include "boost/asio.hpp" + #include "flow/network.h" #include "flow/flow.h" +#include "flow/UnitTest.h" + +IPAddress::IPAddress() : store({}), isV6addr(false) {} + +IPAddress::IPAddress(const IPAddressStore& v6addr) : store(v6addr), isV6addr(true) {} + +IPAddress::IPAddress(uint32_t v4addr) : store({}), isV6addr(false) { + uint32_t* parts = (uint32_t*)store.data(); + parts[0] = v4addr; +} + +uint32_t IPAddress::toV4() const { + const uint32_t* parts = (uint32_t*)store.data(); + return parts[0]; +} + +bool IPAddress::operator==(const IPAddress& addr) const { + return isV6addr == addr.isV6addr && store == addr.store; +} + +bool IPAddress::operator!=(const IPAddress& addr) const { + return !(*this == addr); +} + +bool IPAddress::operator<(const IPAddress& addr) const { + return isV6() == addr.isV6() ? store < addr.store : isV6() < addr.isV6(); +} + +std::string IPAddress::toString() const { + if (isV6()) { + return boost::asio::ip::address_v6(store).to_string(); + } else { + const uint32_t ip = toV4(); + return format("%d.%d.%d.%d", (ip >> 24) & 0xff, (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff); + } +} + +bool IPAddress::isValid() const { + if (!isV6()) { + return toV4() != 0; + } + + return std::any_of(store.begin(), store.end(), [](uint8_t part) { return part > 0; }); +} NetworkAddress NetworkAddress::parse( std::string const& s ) { bool isTLS = false; @@ -27,12 +73,31 @@ NetworkAddress NetworkAddress::parse( std::string const& s ) { if( s.size() > 4 && strcmp(s.c_str() + s.size() - 4, ":tls") == 0 ) { isTLS = true; f = s.substr(0, s.size() - 4); - } else + } else { f = s; - int a,b,c,d,port,count=-1; - if (sscanf(f.c_str(), "%d.%d.%d.%d:%d%n", &a,&b,&c,&d, &port, &count)<5 || count != f.size()) - throw connection_string_invalid(); - return NetworkAddress( (a<<24)+(b<<16)+(c<<8)+d, port, true, isTLS ); + } + + if (f[0] == '[') { + // IPv6 address/port pair is represented as "[ip]:port" + auto addrEnd = f.find_first_of(']'); + if (addrEnd == std::string::npos || f[addrEnd + 1] != ':') { + throw connection_string_invalid(); + } + + try { + auto port = std::stoi(f.substr(addrEnd + 2)); + auto addr = boost::asio::ip::address::from_string(f.substr(1, addrEnd - 1)); + ASSERT(addr.is_v6()); + return NetworkAddress(IPAddress(addr.to_v6().to_bytes()), port, true, isTLS); + } catch (...) { + throw connection_string_invalid(); + } + } else { + int a, b, c, d, port, count = -1; + if (sscanf(f.c_str(), "%d.%d.%d.%d:%d%n", &a, &b, &c, &d, &port, &count) < 5 || count != f.size()) + throw connection_string_invalid(); + return NetworkAddress((a << 24) + (b << 16) + (c << 8) + d, port, true, isTLS); + } } std::vector NetworkAddress::parseList( std::string const& addrs ) { @@ -49,11 +114,13 @@ std::vector NetworkAddress::parseList( std::string const& addrs } std::string NetworkAddress::toString() const { - return format( "%d.%d.%d.%d:%d%s", (ip>>24)&0xff, (ip>>16)&0xff, (ip>>8)&0xff, ip&0xff, port, isTLS() ? ":tls" : "" ); -} - -std::string toIPString(uint32_t ip) { - return format( "%d.%d.%d.%d", (ip>>24)&0xff, (ip>>16)&0xff, (ip>>8)&0xff, ip&0xff ); + const char* patt; + if (isV6()) { + patt = "[%s]:%d%s"; + } else { + patt = "%s:%d%s"; + } + return format(patt, ip.toString().c_str(), port, isTLS() ? ":tls" : ""); } std::string toIPVectorString(std::vector ips) { @@ -82,3 +149,26 @@ Future> INetworkConnections::connect( std::string host, s return connect(addr, host); }); } + +TEST_CASE("/flow/network/ipaddress") { + ASSERT(NetworkAddress::parse("[::1]:4800").toString() == "[::1]:4800"); + + { + auto addr = "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:4800"; + auto addrParsed = NetworkAddress::parse(addr); + auto addrCompressed = "[2001:db8:85a3::8a2e:370:7334]:4800"; + ASSERT(addrParsed.isV6()); + ASSERT(!addrParsed.isTLS()); + ASSERT(addrParsed.toString() == addrCompressed); + } + + { + auto addr = "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:4800:tls"; + auto addrParsed = NetworkAddress::parse(addr); + auto addrCompressed = "[2001:db8:85a3::8a2e:370:7334]:4800:tls"; + ASSERT(addrParsed.isV6()); + ASSERT(addrParsed.isTLS()); + ASSERT(addrParsed.toString() == addrCompressed); + return Void(); + } +} diff --git a/flow/network.h b/flow/network.h index 4c3a2404df..cf4689fb84 100644 --- a/flow/network.h +++ b/flow/network.h @@ -22,6 +22,7 @@ #define FLOW_OPENNETWORK_H #pragma once +#include #include #include #include "flow/serialize.h" @@ -75,25 +76,78 @@ enum { class Void; +struct IPAddress { + // Represents both IPv4 and IPv6 address. For IPv4 addresses, + // only the first 32bits are relevant and rest are initialized to + // 0. + typedef std::array IPAddressStore; + + IPAddress(); + explicit IPAddress(const IPAddressStore& v6addr); + explicit IPAddress(uint32_t v4addr); + + bool isV6() const { return isV6addr; } + bool isV4() const { return !isV6addr; } + bool isValid() const; + + // Returns raw v4/v6 representation of address. Caller is responsible + // to call these functions safely. + uint32_t toV4() const; + const IPAddressStore& toV6() const { return store; } + + std::string toString() const; + + bool operator==(const IPAddress& addr) const; + bool operator!=(const IPAddress& addr) const; + bool operator<(const IPAddress& addr) const; + + template + void serialize(Ar& ar) { + serializer(ar, isV6addr); + if (isV6addr) { + serializer(ar, store); + } else { + uint32_t* parts = (uint32_t*)store.data(); + serializer(ar, parts[0]); + } + } + +private: + bool isV6addr; + IPAddressStore store; +}; + struct NetworkAddress { // A NetworkAddress identifies a particular running server (i.e. a TCP endpoint). - uint32_t ip; + IPAddress ip; uint16_t port; uint16_t flags; enum { FLAG_PRIVATE = 1, FLAG_TLS = 2 }; - NetworkAddress() : ip(0), port(0), flags(FLAG_PRIVATE) {} - NetworkAddress( uint32_t ip, uint16_t port ) : ip(ip), port(port), flags(FLAG_PRIVATE) {} - NetworkAddress( uint32_t ip, uint16_t port, bool isPublic, bool isTLS ) : ip(ip), port(port), - flags( (isPublic ? 0 : FLAG_PRIVATE) | (isTLS ? FLAG_TLS : 0 ) ) {} + NetworkAddress() : ip(IPAddress(0)), port(0), flags(FLAG_PRIVATE) {} + NetworkAddress(const IPAddress& address, uint16_t port, bool isPublic, bool isTLS) + : ip(address), port(port), flags((isPublic ? 0 : FLAG_PRIVATE) | (isTLS ? FLAG_TLS : 0)) {} + NetworkAddress(uint32_t ip, uint16_t port, bool isPublic, bool isTLS) + : NetworkAddress(IPAddress(ip), port, isPublic, isTLS) {} - bool operator == (NetworkAddress const& r) const { return ip==r.ip && port==r.port && flags==r.flags; } - bool operator != (NetworkAddress const& r) const { return ip!=r.ip || port!=r.port || flags!=r.flags; } - bool operator< (NetworkAddress const& r) const { if (flags != r.flags) return flags < r.flags; if (ip != r.ip) return ip < r.ip; return port parseList( std::string const& ); @@ -101,13 +155,18 @@ struct NetworkAddress { template void serialize(Ar& ar) { - ar.serializeBinaryItem(*this); + if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) { + uint32_t ipV4; + serializer(ar, ipV4, port, flags); + ip = IPAddress(ipV4); + } else { + serializer(ar, ip, port, flags); + } } }; typedef std::vector NetworkAddressList; -std::string toIPString(uint32_t ip); std::string toIPVectorString(std::vector ips); template class Future; From 25daabdc0253beb3187196b0fe3231d7e269032e Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Wed, 27 Feb 2019 17:53:38 -0800 Subject: [PATCH 10/71] net: TraceEvent and toIPVectorString for new IPAddress structure #963 --- flow/Trace.cpp | 3 +++ flow/Trace.h | 1 + flow/network.cpp | 10 ++++++++++ flow/network.h | 1 + 4 files changed, 15 insertions(+) diff --git a/flow/Trace.cpp b/flow/Trace.cpp index ebbb22259a..55b178349d 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -828,6 +828,9 @@ TraceEvent& TraceEvent::detail( std::string key, long long unsigned int value ) TraceEvent& TraceEvent::detail( std::string key, const NetworkAddress& value ) { return detailImpl( std::move(key), value.toString() ); } +TraceEvent& TraceEvent::detail( std::string key, const IPAddress& value ) { + return detailImpl( std::move(key), value.toString() ); +} TraceEvent& TraceEvent::detail( std::string key, const UID& value ) { return detailf( std::move(key), "%016llx", value.first() ); // SOMEDAY: Log entire value? We also do this explicitly in some "lists" in various individual TraceEvent calls } diff --git a/flow/Trace.h b/flow/Trace.h index d3e62223d6..e0bdee46d6 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -158,6 +158,7 @@ struct TraceEvent { TraceEvent& detail( std::string key, int value ); TraceEvent& detail( std::string key, unsigned value ); TraceEvent& detail( std::string key, const struct NetworkAddress& value ); + TraceEvent& detail( std::string key, const IPAddress& value ); TraceEvent& detailf( std::string key, const char* valueFormat, ... ); TraceEvent& detailext( std::string key, const StringRef& value ); TraceEvent& detailext( std::string key, const Optional>& value ); diff --git a/flow/network.cpp b/flow/network.cpp index 9e484d1dce..93d34115d8 100644 --- a/flow/network.cpp +++ b/flow/network.cpp @@ -133,6 +133,16 @@ std::string toIPVectorString(std::vector ips) { return output; } +std::string toIPVectorString(const std::vector& ips) { + std::string output; + const char* space = ""; + for (auto ip : ips) { + output += format("%s%s", space, ip.toString().c_str()); + space = " "; + } + return output; +} + Future> INetworkConnections::connect( std::string host, std::string service, bool useTLS ) { // Use map to create an actor that returns an endpoint or throws Future pickEndpoint = map(resolveTCPEndpoint(host, service), [=](std::vector const &addresses) -> NetworkAddress { diff --git a/flow/network.h b/flow/network.h index cf4689fb84..2c9b14b6c7 100644 --- a/flow/network.h +++ b/flow/network.h @@ -168,6 +168,7 @@ struct NetworkAddress { typedef std::vector NetworkAddressList; std::string toIPVectorString(std::vector ips); +std::string toIPVectorString(const std::vector& ips); template class Future; template class Promise; From 82b2da4b786316815a3ee36e01205cb1ab183788 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Wed, 27 Feb 2019 18:45:34 -0800 Subject: [PATCH 11/71] net: Add IPAddress::parse() util #963 --- flow/network.cpp | 20 ++++++++++++++------ flow/network.h | 1 + 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/flow/network.cpp b/flow/network.cpp index 93d34115d8..f9ae15fa57 100644 --- a/flow/network.cpp +++ b/flow/network.cpp @@ -59,6 +59,15 @@ std::string IPAddress::toString() const { } } +Optional IPAddress::parse(std::string str) { + try { + auto addr = boost::asio::ip::address::from_string(str); + return addr.is_v6() ? IPAddress(addr.to_v6().to_bytes()) : IPAddress(addr.to_v4().to_ulong()); + } catch (...) { + return Optional(); + } +} + bool IPAddress::isValid() const { if (!isV6()) { return toV4() != 0; @@ -84,15 +93,14 @@ NetworkAddress NetworkAddress::parse( std::string const& s ) { throw connection_string_invalid(); } - try { - auto port = std::stoi(f.substr(addrEnd + 2)); - auto addr = boost::asio::ip::address::from_string(f.substr(1, addrEnd - 1)); - ASSERT(addr.is_v6()); - return NetworkAddress(IPAddress(addr.to_v6().to_bytes()), port, true, isTLS); - } catch (...) { + auto port = std::stoi(f.substr(addrEnd + 2)); + auto addr = IPAddress::parse(f.substr(1, addrEnd - 1)); + if (!addr.present()) { throw connection_string_invalid(); } + return NetworkAddress(addr.get(), port, true, isTLS); } else { + // TODO: Use IPAddress::parse int a, b, c, d, port, count = -1; if (sscanf(f.c_str(), "%d.%d.%d.%d:%d%n", &a, &b, &c, &d, &port, &count) < 5 || count != f.size()) throw connection_string_invalid(); diff --git a/flow/network.h b/flow/network.h index 2c9b14b6c7..8ecd37f7b1 100644 --- a/flow/network.h +++ b/flow/network.h @@ -96,6 +96,7 @@ struct IPAddress { const IPAddressStore& toV6() const { return store; } std::string toString() const; + static Optional parse(std::string str); bool operator==(const IPAddress& addr) const; bool operator!=(const IPAddress& addr) const; From cc9ad0e2021e006286977f00629c4526d102206e Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Thu, 28 Feb 2019 00:09:53 -0800 Subject: [PATCH 12/71] net: Use IPv6 in simulation testing #963 25% times we will use IPv6 addresses --- fdbrpc/sim2.actor.cpp | 25 +++- fdbrpc/simulator.h | 4 +- fdbserver/SimulatedCluster.actor.cpp | 146 ++++++++++++------- fdbserver/workloads/MemoryLifetime.actor.cpp | 5 +- fdbserver/workloads/SaveAndKill.actor.cpp | 4 +- 5 files changed, 116 insertions(+), 68 deletions(-) diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index c729675769..3ee1a07b46 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -790,9 +790,16 @@ public: Reference myc( new Sim2Conn( getCurrentProcess() ) ); Reference peerc( new Sim2Conn( peerp ) ); - // TODO Support IPv6 myc->connect(peerc, toAddr); - IPAddress localIp(getCurrentProcess()->address.ip.toV4() + g_random->randomInt(0, 256)); + IPAddress localIp; + if (getCurrentProcess()->address.ip.isV6()) { + IPAddress::IPAddressStore store = getCurrentProcess()->address.ip.toV6(); + uint16_t* ipParts = (uint16_t*)store.data(); + ipParts[7] += g_random->randomInt(0, 256); + localIp = IPAddress(store); + } else { + localIp = IPAddress(getCurrentProcess()->address.ip.toV4() + g_random->randomInt(0, 256)); + } peerc->connect(myc, NetworkAddress(localIp, g_random->randomInt(40000, 60000))); ((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*g_random->random01(), Reference(peerc) ); @@ -968,17 +975,21 @@ public: virtual void run() { _run(this); } - virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess, - LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) { + virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess, + LocalityData locality, ProcessClass startingClass, const char* dataFolder, + const char* coordinationFolder) { ASSERT( locality.machineId().present() ); MachineInfo& machine = machines[ locality.machineId().get() ]; if (!machine.machineId.present()) machine.machineId = locality.machineId(); for( int i = 0; i < machine.processes.size(); i++ ) { if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check - TraceEvent("Sim2Mismatch").detail("IP", format("%x", ip)) - .detailext("MachineId", locality.machineId()).detail("NewName", name) - .detailext("ExistingMachineId", machine.processes[i]->locality.machineId()).detail("ExistingName", machine.processes[i]->name); + TraceEvent("Sim2Mismatch") + .detail("IP", format("%s", ip.toString().c_str())) + .detailext("MachineId", locality.machineId()) + .detail("NewName", name) + .detailext("ExistingMachineId", machine.processes[i]->locality.machineId()) + .detail("ExistingName", machine.processes[i]->name); ASSERT( false ); } ASSERT( machine.processes[i]->address.port != port ); diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index 829b2c3554..a4834126a0 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -142,7 +142,9 @@ public: virtual Future onProcess( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0; virtual Future onMachine( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0; - virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess, LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) = 0; + virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess, + LocalityData locality, ProcessClass startingClass, const char* dataFolder, + const char* coordinationFolder) = 0; virtual void killProcess( ProcessInfo* machine, KillType ) = 0; virtual void rebootProcess(Optional> zoneId, bool allProcesses ) = 0; virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0; diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index f5d1bb7402..724cc49c20 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -191,22 +191,13 @@ ACTOR Future runDr( Reference connFile ) { // SOMEDAY: when a process can be rebooted in isolation from the other on that machine, // a loop{} will be needed around the waiting on simulatedFDBD(). For now this simply // takes care of house-keeping such as context switching and file closing. -ACTOR Future simulatedFDBDRebooter( - Reference connFile, - uint32_t ip, - bool sslEnabled, - Reference tlsOptions, - uint16_t port, - uint16_t listenPerProcess, - LocalityData localities, - ProcessClass processClass, - std::string* dataFolder, - std::string* coordFolder, - std::string baseFolder, - ClusterConnectionString connStr, - bool useSeedFile, - bool runBackupAgents) -{ +ACTOR Future simulatedFDBDRebooter(Reference connFile, IPAddress ip, + bool sslEnabled, Reference tlsOptions, + uint16_t port, uint16_t listenPerProcess, + LocalityData localities, ProcessClass processClass, + std::string* dataFolder, std::string* coordFolder, + std::string baseFolder, ClusterConnectionString connStr, + bool useSeedFile, bool runBackupAgents) { state ISimulator::ProcessInfo *simProcess = g_simulator.getCurrentProcess(); state UID randomId = g_nondeterministic_random->randomUniqueID(); state int cycles = 0; @@ -363,19 +354,10 @@ std::string describe(int const& val) { // Since a datacenter kill is considered to be the same as killing a machine, files cannot be swapped across datacenters std::map< Optional>, std::vector< std::vector< std::string > > > availableFolders; // process count is no longer needed because it is now the length of the vector of ip's, because it was one ip per process -ACTOR Future simulatedMachine( - ClusterConnectionString connStr, - std::vector ips, - bool sslEnabled, - Reference tlsOptions, - LocalityData localities, - ProcessClass processClass, - std::string baseFolder, - bool restarting, - bool useSeedFile, - bool runBackupAgents, - bool sslOnly) -{ +ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector ips, bool sslEnabled, + Reference tlsOptions, LocalityData localities, + ProcessClass processClass, std::string baseFolder, bool restarting, + bool useSeedFile, bool runBackupAgents, bool sslOnly) { state int bootCount = 0; state std::vector myFolders; state std::vector coordFolders; @@ -603,6 +585,20 @@ ACTOR Future simulatedMachine( } } +IPAddress makeIPAddressForSim(bool isIPv6, std::array parts) { + if (isIPv6) { + IPAddress::IPAddressStore addrStore{ 0xAB, 0xCD }; + uint16_t* ptr = (uint16_t*)addrStore.data(); + ptr[4] = (uint16_t)(parts[0] << 8); + ptr[5] = (uint16_t)(parts[1] << 8); + ptr[6] = (uint16_t)(parts[2] << 8); + ptr[7] = (uint16_t)(parts[3] << 8); + return IPAddress(addrStore); + } else { + return IPAddress(parts[0] << 24 | parts[1] << 16 | parts[2] << 8 | parts[3]); + } +} + #include "fdbclient/MonitorLeader.h" ACTOR Future restartSimulatedSystem( @@ -658,21 +654,40 @@ ACTOR Future restartSimulatedSystem( dcIds.push_back(dcUIDini); } - std::vector ipAddrs; + std::vector ipAddrs; int processes = atoi(ini.GetValue(machineIdString.c_str(), "processes")); auto ip = ini.GetValue(machineIdString.c_str(), "ipAddr"); + auto parseIp = [](const char* ipStr) -> IPAddress { + Optional parsedIp = IPAddress::parse(ipStr); + if (parsedIp.present()) { + return parsedIp.get(); + } else { + return IPAddress(strtoul(ipStr, NULL, 10)); + } + }; + if( ip == NULL ) { - for (int i = 0; i < processes; i++){ - ipAddrs.push_back(strtoul(ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i*listenersPerProcess).c_str()), NULL, 10)); + for (int i = 0; i < processes; i++) { + const char* val = + ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i * listenersPerProcess).c_str()); + ipAddrs.push_back(parseIp(val)); } } else { // old way - ipAddrs.push_back(strtoul(ip, NULL, 10)); + ipAddrs.push_back(parseIp(ip)); + for (int i = 1; i < processes; i++){ - ipAddrs.push_back(ipAddrs.back() + 1); + if (ipAddrs.back().isV6()) { + IPAddress::IPAddressStore store = ipAddrs.back().toV6(); + uint16_t* ptr = (uint16_t*)store.data(); + ptr[7] += 1; + ipAddrs.push_back(IPAddress(store)); + } else { + ipAddrs.push_back(IPAddress(ipAddrs.back().toV4() + 1)); + } } } @@ -1058,10 +1073,9 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR } } -void setupSimulatedSystem( vector> *systemActors, std::string baseFolder, - int* pTesterCount, Optional *pConnString, - Standalone *pStartingConfiguration, int extraDB, int minimumReplication, int minimumRegions, Reference tlsOptions) -{ +void setupSimulatedSystem(vector>* systemActors, std::string baseFolder, int* pTesterCount, + Optional* pConnString, Standalone* pStartingConfiguration, + int extraDB, int minimumReplication, int minimumRegions, Reference tlsOptions) { // SOMEDAY: this does not test multi-interface configurations SimulationConfig simconfig(extraDB, minimumReplication, minimumRegions); StatusObject startingConfigJSON = simconfig.db.toJSON(true); @@ -1138,6 +1152,11 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF TEST( sslEnabled ); // SSL enabled TEST( !sslEnabled ); // SSL disabled + // Use IPv6 25% of the time + bool useIPv6 = g_random->random01() < 0.25; + TEST( useIPv6 ); + TEST( !useIPv6 ); + vector coordinatorAddresses; if(minimumRegions > 1) { //do not put coordinators in the primary region so that we can kill that region safely @@ -1145,7 +1164,7 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF for( int dc = 1; dc < dataCenters; dc+=2 ) { int dcCoordinators = coordinatorCount / nonPrimaryDcs + ((dc-1)/2 < coordinatorCount%nonPrimaryDcs); for(int m = 0; m < dcCoordinators; m++) { - uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m; + auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m }); coordinatorAddresses.push_back(NetworkAddress(ip, sslEnabled && !sslOnly ? 2 : 1, true, sslEnabled && sslOnly)); TraceEvent("SelectedCoordinator").detail("Address", coordinatorAddresses.back()); } @@ -1161,10 +1180,16 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF int machines = machineCount / dataCenters + (dc < machineCount % dataCenters); for(int m = 0; m < dcCoordinators; m++) { if(coordinatorCount>4 && (assignedMachines==4 || (m+1==dcCoordinators && assignedMachines<4 && assignedMachines+machines-dcCoordinators>=4))) { - uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m; - TraceEvent("SkippedCoordinator").detail("Address", ip).detail("M", m).detail("Machines", machines).detail("Assigned", assignedMachines).detail("DcCoord", dcCoordinators).detail("CoordinatorCount", coordinatorCount); + auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m }); + TraceEvent("SkippedCoordinator") + .detail("Address", ip.toString()) + .detail("M", m) + .detail("Machines", machines) + .detail("Assigned", assignedMachines) + .detail("DcCoord", dcCoordinators) + .detail("CoordinatorCount", coordinatorCount); } else { - uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m; + auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m }); coordinatorAddresses.push_back(NetworkAddress(ip, sslEnabled && !sslOnly ? 2 : 1, true, sslEnabled && sslOnly)); TraceEvent("SelectedCoordinator").detail("Address", coordinatorAddresses.back()).detail("M", m).detail("Machines", machines).detail("Assigned", assignedMachines).detail("DcCoord", dcCoordinators).detail("P1", (m+1==dcCoordinators)).detail("P2", (assignedMachines<4)).detail("P3", (assignedMachines+machines-dcCoordinators>=4)).detail("CoordinatorCount", coordinatorCount); } @@ -1176,10 +1201,13 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF g_random->randomShuffle(coordinatorAddresses); for(int i = 0; i < (coordinatorAddresses.size()/2)+1; i++) { - TraceEvent("ProtectCoordinator").detail("Address", coordinatorAddresses[i]).detail("Coordinators", describe(coordinatorAddresses)).backtrace(); - g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip,coordinatorAddresses[i].port,true,false)); + TraceEvent("ProtectCoordinator") + .detail("Address", coordinatorAddresses[i]) + .detail("Coordinators", describe(coordinatorAddresses)); + g_simulator.protectedAddresses.insert( + NetworkAddress(coordinatorAddresses[i].ip, coordinatorAddresses[i].port, true, false)); if(coordinatorAddresses[i].port==2) { - g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip,1,true,false)); + g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip, 1, true, false)); } } g_random->randomShuffle(coordinatorAddresses); @@ -1235,9 +1263,9 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF nonVersatileMachines++; } - std::vector ips; - for (int i = 0; i < processesPerMachine; i++){ - ips.push_back(2 << 24 | dc << 16 | g_random->randomInt(1, i+2) << 8 | machine); + std::vector ips; + for (int i = 0; i < processesPerMachine; i++) { + ips.push_back(makeIPAddressForSim(useIPv6, { 2, dc, g_random->randomInt(1, i + 2), machine })); } // check the sslEnablementMap using only one ip( LocalityData localities(Optional>(), zoneId, machineId, dcUID); @@ -1246,9 +1274,9 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF localities, processClass, baseFolder, false, machine == useSeedForMachine, true, sslOnly ), "SimulatedMachine")); if (extraDB && g_simulator.extraDB->toString() != conn.toString()) { - std::vector extraIps; + std::vector extraIps; for (int i = 0; i < processesPerMachine; i++){ - extraIps.push_back(4 << 24 | dc << 16 | g_random->randomInt(1, i + 2) << 8 | machine); + extraIps.push_back(makeIPAddressForSim(useIPv6, { 4, dc, g_random->randomInt(1, i + 2), machine })); } Standalone newMachineId(g_random->randomUniqueID().toString()); @@ -1278,8 +1306,8 @@ void setupSimulatedSystem( vector> *systemActors, std::string baseF int testerCount = *pTesterCount = g_random->randomInt(4, 9); int useSeedForMachine = g_random->randomInt(0, testerCount); for(int i=0; i ips; - ips.push_back(0x03040301 + i); + std::vector ips; + ips.push_back(makeIPAddressForSim(useIPv6, { 3, 4, 3, i + 1 })); Standalone newZoneId = Standalone(g_random->randomUniqueID().toString()); LocalityData localities(Optional>(), newZoneId, newZoneId, Optional>()); systemActors->push_back( reportErrors( simulatedMachine( @@ -1351,8 +1379,15 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot state int minimumRegions = 0; checkExtraDB(testFile, extraDB, minimumReplication, minimumRegions); - wait( g_simulator.onProcess( g_simulator.newProcess( - "TestSystem", 0x01010101, 1, 1, LocalityData(Optional>(), Standalone(g_random->randomUniqueID().toString()), Standalone(g_random->randomUniqueID().toString()), Optional>()), ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource), "", "" ), TaskDefaultYield ) ); + // TODO (IPv6) Use IPv6? + wait(g_simulator.onProcess( + g_simulator.newProcess("TestSystem", IPAddress(0x01010101), 1, 1, + LocalityData(Optional>(), + Standalone(g_random->randomUniqueID().toString()), + Standalone(g_random->randomUniqueID().toString()), + Optional>()), + ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource), "", ""), + TaskDefaultYield)); Sim2FileSystem::newFileSystem(); FlowTransport::createInstance(1); if (tlsOptions->enabled()) { @@ -1368,7 +1403,8 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot } else { g_expect_full_pointermap = 1; - setupSimulatedSystem( &systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, extraDB, minimumReplication, minimumRegions, tlsOptions ); + setupSimulatedSystem(&systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, extraDB, + minimumReplication, minimumRegions, tlsOptions); wait( delay(1.0) ); // FIXME: WHY!!! //wait for machines to boot } std::string clusterFileDir = joinPath( dataFolder, g_random->randomUniqueID().toString() ); diff --git a/fdbserver/workloads/MemoryLifetime.actor.cpp b/fdbserver/workloads/MemoryLifetime.actor.cpp index 6b0cb6345a..cce1b8d34f 100644 --- a/fdbserver/workloads/MemoryLifetime.actor.cpp +++ b/fdbserver/workloads/MemoryLifetime.actor.cpp @@ -152,9 +152,8 @@ struct MemoryLifetime : KVWorkload { tr = ReadYourWritesTransaction(cx); wait( delay(0.01) ); //we cannot check the contents like other operations so just touch all the values to make sure we dont crash - for(int i = 0; i < getAddress_res1.size(); i++) { - int a,b,c,d,count=-1; - ASSERT(sscanf(getAddress_res1[i], "%d.%d.%d.%d%n", &a,&b,&c,&d, &count)==4 && count == strlen(getAddress_res1[i])); + for (int i = 0; i < getAddress_res1.size(); i++) { + ASSERT(IPAddress::parse(getAddress_res1[i]).present()); } } if(now() - startTime > self->testDuration) diff --git a/fdbserver/workloads/SaveAndKill.actor.cpp b/fdbserver/workloads/SaveAndKill.actor.cpp index 3a6a8fdd69..a2e2a1b761 100644 --- a/fdbserver/workloads/SaveAndKill.actor.cpp +++ b/fdbserver/workloads/SaveAndKill.actor.cpp @@ -92,14 +92,14 @@ struct SaveAndKillWorkload : TestWorkload { ini.SetValue(machineIdString, "zoneId", (process->locality.zoneId().present()) ? process->locality.zoneId().get().printable().c_str() : ""); ini.SetValue(machineIdString, "mClass", format("%d", process->startingClass.classType()).c_str()); ini.SetValue(machineIdString, format("ipAddr%d", process->address.port - 1).c_str(), - format("%d", process->address.ip.toV4()).c_str()); + process->address.ip.toString().c_str()); ini.SetValue(machineIdString, format("%d", process->address.port-1).c_str(), process->dataFolder); ini.SetValue(machineIdString, format("c%d", process->address.port-1).c_str(), process->coordinationFolder); j++; } else { ini.SetValue(machineIdString, format("ipAddr%d", process->address.port - 1).c_str(), - format("%d", process->address.ip.toV4()).c_str()); + process->address.ip.toString().c_str()); int oldValue = machines.find(machineId)->second; ini.SetValue(machineIdString, format("%d", process->address.port-1).c_str(), process->dataFolder); ini.SetValue(machineIdString, format("c%d", process->address.port-1).c_str(), process->coordinationFolder); From 592e2241552d46ec73e7d5f91ec08e3f0956f98d Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Thu, 28 Feb 2019 02:37:21 -0800 Subject: [PATCH 13/71] net: add/use formatIpPort to format IP:PORT pairs #963 --- fdbclient/FDBTypes.h | 9 ++++----- fdbrpc/simulator.h | 4 ++-- fdbserver/Status.actor.cpp | 2 +- flow/Trace.cpp | 7 +++---- flow/network.cpp | 30 ++++++++++++++++++++++-------- flow/network.h | 1 + 6 files changed, 33 insertions(+), 20 deletions(-) diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index 0dc1bccb83..5b7de5e818 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -657,17 +657,16 @@ struct AddressExclusion { // This is for debugging and IS NOT to be used for serialization to persistant state std::string toString() const { - std::string as = format("%s", ip.toString().c_str()); - const char* formatPatt = ip.isV6() ? "[%s]:%d" : "%s:%d"; - if (!isWholeMachine()) return format(formatPatt, as.c_str(), port); - return as; + if (!isWholeMachine()) + return formatIpPort(ip, port); + return ip.toString(); } static AddressExclusion parse( StringRef const& ); template void serialize(Ar& ar) { - ar.serializeBinaryItem(*this); + serializer(ar, ip, port); } }; diff --git a/fdbrpc/simulator.h b/fdbrpc/simulator.h index a4834126a0..2bfd34e98f 100644 --- a/fdbrpc/simulator.h +++ b/fdbrpc/simulator.h @@ -115,8 +115,8 @@ public: std::string toString() const { const NetworkAddress& address = addresses[0]; return format( - "name: %s address: %s:%d zone: %s datahall: %s class: %s excluded: %d cleared: %d", name, - address.ip.toString().c_str(), address.port, + "name: %s address: %s zone: %s datahall: %s class: %s excluded: %d cleared: %d", name, + formatIpPort(address.ip, address.port).c_str(), (locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"), (locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"), startingClass.toString().c_str(), excluded, cleared); diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index c14f18f5f0..165b77c01e 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -1673,7 +1673,7 @@ static JsonBuilderArray getClientIssuesAsMessages( ProcessIssuesMap const& _issu std::map> deduplicatedIssues; for(auto i : issues) { - deduplicatedIssues[i.second.first].push_back(format("%s:%d", i.first.ip.toString().c_str(), i.first.port)); + deduplicatedIssues[i.second.first].push_back(formatIpPort(i.first.ip, i.first.port)); } for (auto i : deduplicatedIssues) { diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 55b178349d..9ebfec24a2 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -333,8 +333,7 @@ public: void annotateEvent( TraceEventFields &fields ) { if(localAddress.present()) { - fields.addField("Machine", - format("%s:%d", localAddress.get().ip.toString().c_str(), localAddress.get().port)); + fields.addField("Machine", formatIpPort(localAddress.get().ip, localAddress.get().port)); } fields.addField("LogGroup", logGroup); @@ -717,7 +716,7 @@ bool TraceEvent::init() { detail("Type", type); if(g_network && g_network->isSimulated()) { NetworkAddress local = g_network->getLocalAddress(); - detailf("Machine", "%s:%d", local.ip.toString().c_str(), local.port); + detail("Machine", formatIpPort(local.ip, local.port)); } detail("ID", id); if(err.isValid()) { @@ -1019,7 +1018,7 @@ void TraceBatch::dump() { std::string machine; if(g_network->isSimulated()) { NetworkAddress local = g_network->getLocalAddress(); - machine = format("%s:%d", local.ip.toString().c_str(), local.port); + machine = formatIpPort(local.ip, local.port); } for(int i = 0; i < attachBatch.size(); i++) { diff --git a/flow/network.cpp b/flow/network.cpp index f9ae15fa57..1fde9ae04f 100644 --- a/flow/network.cpp +++ b/flow/network.cpp @@ -122,13 +122,7 @@ std::vector NetworkAddress::parseList( std::string const& addrs } std::string NetworkAddress::toString() const { - const char* patt; - if (isV6()) { - patt = "[%s]:%d%s"; - } else { - patt = "%s:%d%s"; - } - return format(patt, ip.toString().c_str(), port, isTLS() ? ":tls" : ""); + return formatIpPort(ip, port) + (isTLS() ? ":tls" : ""); } std::string toIPVectorString(std::vector ips) { @@ -151,6 +145,11 @@ std::string toIPVectorString(const std::vector& ips) { return output; } +std::string formatIpPort(const IPAddress& ip, uint16_t port) { + const char* patt = ip.isV6() ? "[%s]:%d" : "%s:%d"; + return format(patt, ip.toString().c_str(), port); +} + Future> INetworkConnections::connect( std::string host, std::string service, bool useTLS ) { // Use map to create an actor that returns an endpoint or throws Future pickEndpoint = map(resolveTCPEndpoint(host, service), [=](std::vector const &addresses) -> NetworkAddress { @@ -187,6 +186,21 @@ TEST_CASE("/flow/network/ipaddress") { ASSERT(addrParsed.isV6()); ASSERT(addrParsed.isTLS()); ASSERT(addrParsed.toString() == addrCompressed); - return Void(); } + + { + auto addr = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"; + auto addrCompressed = "2001:db8:85a3::8a2e:370:7334"; + auto addrParsed = IPAddress::parse(addr); + ASSERT(addrParsed.present()); + ASSERT(addrParsed.get().toString() == addrCompressed); + } + + { + auto addr = "2001"; + auto addrParsed = IPAddress::parse(addr); + ASSERT(!addrParsed.present()); + } + + return Void(); } diff --git a/flow/network.h b/flow/network.h index 8ecd37f7b1..88ff73b38f 100644 --- a/flow/network.h +++ b/flow/network.h @@ -170,6 +170,7 @@ typedef std::vector NetworkAddressList; std::string toIPVectorString(std::vector ips); std::string toIPVectorString(const std::vector& ips); +std::string formatIpPort(const IPAddress& ip, uint16_t port); template class Future; template class Promise; From 41d18db7b9f03dc9daaf2eac5feffa6dda58859a Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Thu, 28 Feb 2019 11:56:37 -0800 Subject: [PATCH 14/71] fix: update the encoding of AddressExclusion in SystemData #963 --- fdbclient/NativeAPI.actor.cpp | 11 +++++++++++ fdbclient/SystemData.cpp | 6 +----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index f4c982523d..887cbf4c4c 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1068,8 +1068,19 @@ bool GetRangeLimits::hasSatisfiedMinRows() { AddressExclusion AddressExclusion::parse( StringRef const& key ) { //Must not change: serialized to the database! + auto parsedIp = IPAddress::parse(key.toString()); + if (parsedIp.present()) { + return AddressExclusion(parsedIp.get()); + } + try { auto addr = NetworkAddress::parse(key.toString()); + if (addr.isTLS()) { + TraceEvent(SevWarnAlways, "AddressExclusionParseError") + .detail("String", printable(key)) + .detail("Description", "Address inclusion string should not include `:tls' suffix."); + return AddressExclusion(); + } return AddressExclusion(addr.ip, addr.port); } catch (Error& e) { TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key)); diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index cb54165469..3eac6d3dbc 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -374,11 +374,7 @@ const AddressExclusion decodeExcludedServersKey( KeyRef const& key ) { } std::string encodeExcludedServersKey( AddressExclusion const& addr ) { //FIXME: make sure what's persisted here is not affected by innocent changes elsewhere - std::string as = format("%s", addr.ip.toString().c_str()); - //ASSERT( StringRef(as).endsWith(LiteralStringRef(":0")) == (addr.port == 0) ); - if (!addr.isWholeMachine()) - as += format(":%d", addr.port); - return excludedServersPrefix.toString() + as; + return excludedServersPrefix.toString() + addr.toString(); } const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") ); From e93cd0ff21e4014122470fd59b30ab562ce335c7 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Thu, 28 Feb 2019 12:24:56 -0800 Subject: [PATCH 15/71] Add some checks and comments to IPv6 changes #963 --- fdbclient/NativeAPI.actor.cpp | 1 + fdbrpc/FlowTransport.actor.cpp | 6 +++++- fdbserver/SimulatedCluster.actor.cpp | 3 +++ flow/network.cpp | 10 ++++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 887cbf4c4c..3301c4d8d8 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1073,6 +1073,7 @@ AddressExclusion AddressExclusion::parse( StringRef const& key ) { return AddressExclusion(parsedIp.get()); } + // Not a whole machine, includes `port'. try { auto addr = NetworkAddress::parse(key.toString()); if (addr.isTLS()) { diff --git a/fdbrpc/FlowTransport.actor.cpp b/fdbrpc/FlowTransport.actor.cpp index 930d5113ee..592ca79bc7 100644 --- a/fdbrpc/FlowTransport.actor.cpp +++ b/fdbrpc/FlowTransport.actor.cpp @@ -198,10 +198,14 @@ public: #pragma pack( push, 1 ) struct ConnectPacket { - uint32_t connectPacketLength; // sizeof(ConnectPacket)-sizeof(uint32_t), or perhaps greater in later protocol versions + // The size of ConnectPacket depends on whether it is carrying IPv6 or IPv6 + // address. The value does not inclueds the size of `connectPacketLength` itself, + // but only the other fields of this structure. + uint32_t connectPacketLength; uint64_t protocolVersion; // Expect currentProtocolVersion uint16_t canonicalRemotePort; // Port number to reconnect to the originating process uint64_t connectionId; // Multi-version clients will use the same Id for both connections, other connections will set this to zero. Added at protocol Version 0x0FDB00A444020001. + union { uint32_t v4; uint8_t v6[16]; diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 724cc49c20..67a762e485 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -659,6 +659,9 @@ ACTOR Future restartSimulatedSystem( auto ip = ini.GetValue(machineIdString.c_str(), "ipAddr"); + // Helper to translate the IP address stored in INI file to out IPAddress representation. + // After IPv6 work, we store the actual string representation of IP address, however earlier, it was + // instead the 32 bit integer value. auto parseIp = [](const char* ipStr) -> IPAddress { Optional parsedIp = IPAddress::parse(ipStr); if (parsedIp.present()) { diff --git a/flow/network.cpp b/flow/network.cpp index 1fde9ae04f..69dc8c4509 100644 --- a/flow/network.cpp +++ b/flow/network.cpp @@ -77,6 +77,10 @@ bool IPAddress::isValid() const { } NetworkAddress NetworkAddress::parse( std::string const& s ) { + if (s.empty()) { + throw connection_string_invalid(); + } + bool isTLS = false; std::string f; if( s.size() > 4 && strcmp(s.c_str() + s.size() - 4, ":tls") == 0 ) { @@ -202,5 +206,11 @@ TEST_CASE("/flow/network/ipaddress") { ASSERT(!addrParsed.present()); } + { + auto addr = "8.8.8.8:12"; + auto addrParsed = IPAddress::parse(addr); + ASSERT(!addrParsed.present()); + } + return Void(); } From 1d3e62c4e3a24ad6bc357cb1d1e4577244d79f0f Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Thu, 28 Feb 2019 16:07:49 -0800 Subject: [PATCH 16/71] net: Don't use a union of IP in ConnectPacket #963 Since keeping a union and using the packet size to figure out whether the ConnectPacket is using IPv6 to IPv4 address is not easily maintainable. For simplicity, we just serialize everything in ConnectPacket and be backward compatible with older format. However, some code for some much older stuff is removed. --- fdbrpc/FlowTransport.actor.cpp | 88 ++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/fdbrpc/FlowTransport.actor.cpp b/fdbrpc/FlowTransport.actor.cpp index 592ca79bc7..2737119fb0 100644 --- a/fdbrpc/FlowTransport.actor.cpp +++ b/fdbrpc/FlowTransport.actor.cpp @@ -198,48 +198,59 @@ public: #pragma pack( push, 1 ) struct ConnectPacket { - // The size of ConnectPacket depends on whether it is carrying IPv6 or IPv6 - // address. The value does not inclueds the size of `connectPacketLength` itself, + // The value does not inclueds the size of `connectPacketLength` itself, // but only the other fields of this structure. uint32_t connectPacketLength; uint64_t protocolVersion; // Expect currentProtocolVersion + uint16_t canonicalRemotePort; // Port number to reconnect to the originating process uint64_t connectionId; // Multi-version clients will use the same Id for both connections, other connections will set this to zero. Added at protocol Version 0x0FDB00A444020001. - union { - uint32_t v4; - uint8_t v6[16]; - } canonicalRemoteIp46; // IP Address to reconnect to the originating process + // IP Address to reconnect to the originating process. Only one of these must be populated. + uint32_t canonicalRemoteIp4; + + enum ConnectPacketFlags { + FLAG_IPV6 = 1 + }; + uint16_t flags; + uint8_t canonicalRemoteIp6[16]; IPAddress canonicalRemoteIp() const { if (isIPv6()) { - IPAddress::IPAddressStore ip; - memcpy(ip.data(), &canonicalRemoteIp46.v6, ip.size()); - return IPAddress(ip); + IPAddress::IPAddressStore store; + memcpy(store.data(), canonicalRemoteIp6, sizeof(canonicalRemoteIp6)); + return IPAddress(store); } else { - return IPAddress(canonicalRemoteIp46.v4); + return IPAddress(canonicalRemoteIp4); } } void setCanonicalRemoteIp(const IPAddress& ip) { if (ip.isV6()) { - memcpy(&canonicalRemoteIp46.v6, ip.toV6().data(), 16); + flags = flags | FLAG_IPV6; + memcpy(&canonicalRemoteIp6, ip.toV6().data(), 16); } else { - canonicalRemoteIp46.v4 = ip.toV4(); + flags = flags & ~FLAG_IPV6; + canonicalRemoteIp4 = ip.toV4(); } } - bool isIPv6() const { return connectPacketLength == (sizeof(ConnectPacket) - sizeof(connectPacketLength)); } + bool isIPv6() const { return flags & FLAG_IPV6; } uint32_t totalPacketSize() const { return connectPacketLength + sizeof(connectPacketLength); } template void serialize(Ar& ar) { - serializer(ar, connectPacketLength, protocolVersion, canonicalRemotePort, connectionId); - if (isIPv6()) { - ar.serializeBytes(&canonicalRemoteIp46.v6, sizeof(canonicalRemoteIp46.v6)); + serializer(ar, connectPacketLength); + ASSERT(connectPacketLength <= sizeof(ConnectPacket)); + serializer(ar, protocolVersion, canonicalRemotePort, connectionId, canonicalRemoteIp4); + if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) { + flags = 0; } else { - serializer(ar, canonicalRemoteIp46.v4); + // We can send everything in serialized packet, since the current version of ConnectPacket + // is backward compatible with CONNECT_PACKET_V0. + serializer(ar, flags); + ar.serializeBytes(&canonicalRemoteIp6, sizeof(canonicalRemoteIp6)); } } }; @@ -286,7 +297,6 @@ struct Peer : NonCopyable { if(addr.isTLS() == destination.isTLS()) { pkt.canonicalRemotePort = addr.port; pkt.setCanonicalRemoteIp(addr.ip); - pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength) - (addr.isV6() ? 0 : 12); found = true; break; } @@ -294,9 +304,9 @@ struct Peer : NonCopyable { if (!found) { pkt.canonicalRemotePort = 0; // a "mixed" TLS/non-TLS connection is like a client/server connection - there's no way to reverse it pkt.setCanonicalRemoteIp(IPAddress(0)); - pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength); } + pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength); pkt.protocolVersion = currentProtocolVersion; pkt.connectionId = transport->transportId; @@ -674,19 +684,18 @@ ACTOR static Future connectionReader( if (expectConnectPacket && unprocessed_end-unprocessed_begin>=CONNECT_PACKET_V0_SIZE) { // At the beginning of a connection, we expect to receive a packet containing the protocol version and the listening port of the remote process - ConnectPacket* p = (ConnectPacket*)unprocessed_begin; - - uint64_t connectionId = 0; - int32_t connectPacketSize = p->totalPacketSize(); + int32_t connectPacketSize = ((ConnectPacket*)unprocessed_begin)->totalPacketSize(); if ( unprocessed_end-unprocessed_begin >= connectPacketSize ) { - if(p->protocolVersion >= 0x0FDB00A444020001) { - connectionId = p->connectionId; - } + uint64_t protocolVersion = ((ConnectPacket*)unprocessed_begin)->protocolVersion; + BinaryReader pktReader(unprocessed_begin, connectPacketSize, AssumeVersion(protocolVersion)); + ConnectPacket pkt; + serializer(pktReader, pkt); - if( (p->protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) { - incompatibleProtocolVersionNewer = p->protocolVersion > currentProtocolVersion; - NetworkAddress addr = p->canonicalRemotePort - ? NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort) + uint64_t connectionId = pkt.connectionId; + if( (pkt.protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) { + incompatibleProtocolVersionNewer = pkt.protocolVersion > currentProtocolVersion; + NetworkAddress addr = pkt.canonicalRemotePort + ? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort) : conn->getPeerAddress(); if(connectionId != 1) addr.port = 0; @@ -695,11 +704,10 @@ ACTOR static Future connectionReader( TraceEvent(SevWarn, "ConnectionRejected", conn->getDebugID()) .detail("Reason", "IncompatibleProtocolVersion") .detail("LocalVersion", currentProtocolVersion) - .detail("RejectedVersion", p->protocolVersion) + .detail("RejectedVersion", pkt.protocolVersion) .detail("VersionMask", compatibleProtocolVersionMask) - .detail("Peer", p->canonicalRemotePort ? NetworkAddress(p->canonicalRemoteIp(), - p->canonicalRemotePort) - : conn->getPeerAddress()) + .detail("Peer", pkt.canonicalRemotePort ? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort) + : conn->getPeerAddress()) .detail("ConnectionId", connectionId); transport->lastIncompatibleMessage = now(); } @@ -711,7 +719,7 @@ ACTOR static Future connectionReader( } compatible = false; - if(p->protocolVersion < 0x0FDB00A551000000LL) { + if(protocolVersion < 0x0FDB00A551000000LL) { // Older versions expected us to hang up. It may work even if we don't hang up here, but it's safer to keep the old behavior. throw incompatible_protocol_version(); } @@ -730,22 +738,22 @@ ACTOR static Future connectionReader( unprocessed_begin += connectPacketSize; expectConnectPacket = false; - peerProtocolVersion = p->protocolVersion; + peerProtocolVersion = protocolVersion; if (peer != nullptr) { // Outgoing connection; port information should be what we expect TraceEvent("ConnectedOutgoing") .suppressFor(1.0) - .detail("PeerAddr", NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort)); + .detail("PeerAddr", NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)); peer->compatible = compatible; peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer; if (!compatible) { peer->transport->numIncompatibleConnections++; incompatiblePeerCounted = true; } - ASSERT( p->canonicalRemotePort == peerAddress.port ); + ASSERT( pkt.canonicalRemotePort == peerAddress.port ); } else { - if (p->canonicalRemotePort) { - peerAddress = NetworkAddress(p->canonicalRemoteIp(), p->canonicalRemotePort, true, + if (pkt.canonicalRemotePort) { + peerAddress = NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort, true, peerAddress.isTLS()); } peer = transport->getPeer(peerAddress); From 89e5be7d706038992e5040726201653c9297d01a Mon Sep 17 00:00:00 2001 From: Bhaskar Muppana Date: Tue, 5 Mar 2019 13:09:26 -0800 Subject: [PATCH 17/71] Resolves #1027: Adding interfaces to Flow bindings (#1215) * Resolves #1027: Adding interfaces Database, ReadTransaction and Transaction. * Moving implementations into DatabaseImpl and TransactionImpl. * Also removed deprecated class Cluster. As we changed the interfaces, especially how we create transaction, its going to break the compilation of any app upgrades to this version of flow bindings. It doesn't change any of the fdb_c APIs. So, backward compatilbity with old servers is preserved through multi-version client. * Fixing indentation * Fixing format according to review comments. --- bindings/flow/fdb_flow.actor.cpp | 141 ++++++++++++----- bindings/flow/fdb_flow.h | 214 +++++++++++--------------- bindings/flow/tester/Tester.actor.cpp | 61 +++++--- bindings/flow/tester/Tester.actor.h | 2 +- 4 files changed, 231 insertions(+), 187 deletions(-) diff --git a/bindings/flow/fdb_flow.actor.cpp b/bindings/flow/fdb_flow.actor.cpp index ae79d7f303..43d6e5796f 100644 --- a/bindings/flow/fdb_flow.actor.cpp +++ b/bindings/flow/fdb_flow.actor.cpp @@ -35,7 +35,7 @@ THREAD_FUNC networkThread(void* fdb) { ACTOR Future _test() { API *fdb = FDB::API::selectAPIVersion(610); auto db = fdb->createDatabase(); - state Reference tr( new Transaction(db) ); + state Reference tr = db->createTransaction(); // tr->setVersion(1); @@ -98,6 +98,81 @@ void fdb_flow_test() { } namespace FDB { + class DatabaseImpl : public Database, NonCopyable { + public: + virtual ~DatabaseImpl() { fdb_database_destroy(db); } + + Reference createTransaction() override; + void setDatabaseOption(FDBDatabaseOption option, Optional value = Optional()) override; + + private: + FDBDatabase* db; + explicit DatabaseImpl(FDBDatabase* db) : db(db) {} + + friend class API; + }; + + class TransactionImpl : public Transaction, private NonCopyable, public FastAllocated { + friend class DatabaseImpl; + + public: + virtual ~TransactionImpl() { + if (tr) { + fdb_transaction_destroy(tr); + } + } + + void setReadVersion(Version v) override; + Future getReadVersion() override; + + Future>> get(const Key& key, bool snapshot = false) override; + Future> getKey(const KeySelector& key, bool snapshot = false) override; + + Future watch(const Key& key) override; + + using Transaction::getRange; + Future> getRange(const KeySelector& begin, const KeySelector& end, + GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, + bool reverse = false, + FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override; + + void addReadConflictRange(KeyRangeRef const& keys) override; + void addReadConflictKey(KeyRef const& key) override; + void addWriteConflictRange(KeyRangeRef const& keys) override; + void addWriteConflictKey(KeyRef const& key) override; + + void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) override; + void set(const KeyRef& key, const ValueRef& value) override; + void clear(const KeyRangeRef& range) override; + void clear(const KeyRef& key) override; + + Future commit() override; + Version getCommittedVersion() override; + Future> getVersionstamp() override; + + void setOption(FDBTransactionOption option, Optional value = Optional()) override; + + Future onError(Error const& e) override; + + void cancel() override; + void reset() override; + + TransactionImpl() : tr(NULL) {} + TransactionImpl(TransactionImpl&& r) noexcept(true) { + tr = r.tr; + r.tr = NULL; + } + TransactionImpl& operator=(TransactionImpl&& r) noexcept(true) { + tr = r.tr; + r.tr = NULL; + return *this; + } + + private: + FDBTransaction* tr; + + explicit TransactionImpl(FDBDatabase* db); + }; static inline void throw_on_error( fdb_error_t e ) { if (e) @@ -187,40 +262,36 @@ namespace FDB { return fdb_error_predicate( pred, e.code() ); } - Reference API::createCluster( std::string const& connFilename ) { - return Reference(new Cluster(connFilename)); - } - - Reference API::createDatabase(std::string const& connFilename) { + Reference API::createDatabase(std::string const& connFilename) { FDBDatabase *db; throw_on_error(fdb_create_database(connFilename.c_str(), &db)); - return Reference(new DatabaseContext(db)); + return Reference(new DatabaseImpl(db)); } int API::getAPIVersion() const { return version; } - Reference Cluster::createDatabase() { - return API::getInstance()->createDatabase(connFilename.c_str()); + Reference DatabaseImpl::createTransaction() { + return Reference(new TransactionImpl(db)); } - void DatabaseContext::setDatabaseOption(FDBDatabaseOption option, Optional value) { + void DatabaseImpl::setDatabaseOption(FDBDatabaseOption option, Optional value) { if (value.present()) throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size())); else throw_on_error(fdb_database_set_option(db, option, NULL, 0)); } - Transaction::Transaction( Reference const& db ) { - throw_on_error( fdb_database_create_transaction( db->db, &tr ) ); + TransactionImpl::TransactionImpl(FDBDatabase* db) { + throw_on_error(fdb_database_create_transaction(db, &tr)); } - void Transaction::setVersion( Version v ) { + void TransactionImpl::setReadVersion(Version v) { fdb_transaction_set_read_version( tr, v ); } - Future Transaction::getReadVersion() { + Future TransactionImpl::getReadVersion() { return backToFuture( fdb_transaction_get_read_version( tr ), [](Reference f){ Version value; @@ -230,7 +301,7 @@ namespace FDB { } ); } - Future< Optional> > Transaction::get( const Key& key, bool snapshot ) { + Future>> TransactionImpl::get(const Key& key, bool snapshot) { return backToFuture< Optional> >( fdb_transaction_get( tr, key.begin(), key.size(), snapshot ), [](Reference f) { fdb_bool_t present; uint8_t const* value; @@ -246,14 +317,14 @@ namespace FDB { } ); } - Future< Void > Transaction::watch( const Key& key ) { + Future TransactionImpl::watch(const Key& key) { return backToFuture< Void >( fdb_transaction_watch( tr, key.begin(), key.size() ), [](Reference f) { throw_on_error( fdb_future_get_error( f->f ) ); return Void(); } ); } - Future< FDBStandalone > Transaction::getKey( const KeySelector& key, bool snapshot ) { + Future> TransactionImpl::getKey(const KeySelector& key, bool snapshot) { return backToFuture< FDBStandalone >( fdb_transaction_get_key( tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot ), [](Reference f) { uint8_t const* key; int key_length; @@ -264,7 +335,7 @@ namespace FDB { } ); } - Future< FDBStandalone > Transaction::getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode ) { + Future> TransactionImpl::getRange(const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode) { // FIXME: iteration return backToFuture< FDBStandalone >( fdb_transaction_get_range( tr, begin.key.begin(), begin.key.size(), begin.orEqual, begin.offset, end.key.begin(), end.key.size(), end.orEqual, end.offset, limits.rows, limits.bytes, streamingMode, 1, snapshot, reverse ), [](Reference f) { FDBKeyValue const* kv; @@ -277,64 +348,64 @@ namespace FDB { } ); } - void Transaction::addReadConflictRange( KeyRangeRef const& keys ) { + void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) { throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) ); } - void Transaction::addReadConflictKey( KeyRef const& key ) { + void TransactionImpl::addReadConflictKey(KeyRef const& key) { return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key)))); } - void Transaction::addWriteConflictRange( KeyRangeRef const& keys ) { + void TransactionImpl::addWriteConflictRange(KeyRangeRef const& keys) { throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE ) ); } - void Transaction::addWriteConflictKey( KeyRef const& key ) { + void TransactionImpl::addWriteConflictKey(KeyRef const& key) { return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key)))); } - void Transaction::atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType ) { + void TransactionImpl::atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) { fdb_transaction_atomic_op( tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType ); } - void Transaction::set( const KeyRef& key, const ValueRef& value ) { + void TransactionImpl::set(const KeyRef& key, const ValueRef& value) { fdb_transaction_set( tr, key.begin(), key.size(), value.begin(), value.size() ); } - void Transaction::clear( const KeyRangeRef& range ) { + void TransactionImpl::clear(const KeyRangeRef& range) { fdb_transaction_clear_range( tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size() ); } - void Transaction::clear( const KeyRef& key ) { + void TransactionImpl::clear(const KeyRef& key) { fdb_transaction_clear( tr, key.begin(), key.size() ); } - Future Transaction::commit() { + Future TransactionImpl::commit() { return backToFuture< Void >( fdb_transaction_commit( tr ), [](Reference f) { throw_on_error( fdb_future_get_error( f->f ) ); return Void(); } ); } - Version Transaction::getCommittedVersion() { + Version TransactionImpl::getCommittedVersion() { Version v; throw_on_error( fdb_transaction_get_committed_version( tr, &v ) ); return v; } - Future> Transaction::getVersionstamp() { - return backToFuture< FDBStandalone >( fdb_transaction_get_versionstamp( tr ), [](Reference f) { + Future> TransactionImpl::getVersionstamp() { + return backToFuture>(fdb_transaction_get_versionstamp(tr), [](Reference f) { uint8_t const* key; int key_length; throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) ); return FDBStandalone( f, StringRef( key, key_length ) ); - } ); + }); } - void Transaction::setOption( FDBTransactionOption option, Optional value ) { + void TransactionImpl::setOption(FDBTransactionOption option, Optional value) { if ( value.present() ) { throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) ); } else { @@ -342,18 +413,18 @@ namespace FDB { } } - Future Transaction::onError( Error const& e ) { + Future TransactionImpl::onError(Error const& e) { return backToFuture< Void >( fdb_transaction_on_error( tr, e.code() ), [](Reference f) { throw_on_error( fdb_future_get_error( f->f ) ); return Void(); } ); } - void Transaction::cancel() { + void TransactionImpl::cancel() { fdb_transaction_cancel( tr ); } - void Transaction::reset() { + void TransactionImpl::reset() { fdb_transaction_reset( tr ); } diff --git a/bindings/flow/fdb_flow.h b/bindings/flow/fdb_flow.h index 790062cc09..3e84daecad 100644 --- a/bindings/flow/fdb_flow.h +++ b/bindings/flow/fdb_flow.h @@ -30,68 +30,9 @@ #include "FDBLoanerTypes.h" namespace FDB { - - class DatabaseContext : public ReferenceCounted, NonCopyable { - friend class Cluster; - friend class Transaction; - public: - ~DatabaseContext() { - fdb_database_destroy( db ); - } - - void setDatabaseOption(FDBDatabaseOption option, Optional value = Optional()); - - private: - FDBDatabase* db; - explicit DatabaseContext( FDBDatabase* db ) : db(db) {} - - friend class API; - }; - - // Deprecated: Use createDatabase instead. - class Cluster : public ReferenceCounted, NonCopyable { - public: - ~Cluster() {} - - Reference createDatabase(); - - private: - explicit Cluster( std::string connFilename ) : connFilename(connFilename) {} - - std::string connFilename; - friend class API; - }; - - class API { - public: - static API* selectAPIVersion(int apiVersion); - static API* getInstance(); - static bool isAPIVersionSelected(); - - void setNetworkOption(FDBNetworkOption option, Optional value = Optional()); - - void setupNetwork(); - void runNetwork(); - void stopNetwork(); - - // Deprecated: Use createDatabase instead. - Reference createCluster( std::string const& connFilename ); - - Reference createDatabase( std::string const& connFilename="" ); - - bool evaluatePredicate(FDBErrorPredicate pred, Error const& e); - int getAPIVersion() const; - - private: - static API* instance; - - API(int version); - int version; - }; - struct CFuture : NonCopyable, ReferenceCounted, FastAllocated { CFuture() : f(NULL) {} - explicit CFuture( FDBFuture* f ) : f(f) {} + explicit CFuture(FDBFuture* f) : f(f) {} ~CFuture() { if (f) { fdb_future_destroy(f); @@ -107,83 +48,102 @@ namespace FDB { class FDBStandalone : public T { public: FDBStandalone() {} - FDBStandalone( Reference f, T const& t ) : T(t), f(f) {} - FDBStandalone( FDBStandalone const& o ) : T((T const&)o), f(o.f) {} + FDBStandalone(Reference f, T const& t) : T(t), f(f) {} + FDBStandalone(FDBStandalone const& o) : T((T const&)o), f(o.f) {} + private: Reference f; }; - class Transaction : public ReferenceCounted, private NonCopyable, public FastAllocated { + class ReadTransaction : public ReferenceCounted { public: - explicit Transaction( Reference const& db ); - ~Transaction() { - if (tr) { - fdb_transaction_destroy(tr); - } + virtual ~ReadTransaction(){}; + virtual void setReadVersion(Version v) = 0; + virtual Future getReadVersion() = 0; + + virtual Future>> get(const Key& key, bool snapshot = false) = 0; + virtual Future> getKey(const KeySelector& key, bool snapshot = false) = 0; + virtual Future watch(const Key& key) = 0; + + virtual Future> getRange( + const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(), + bool snapshot = false, bool reverse = false, + FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) = 0; + virtual Future> getRange( + const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false, + FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) { + return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode); + } + virtual Future> getRange( + const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false, + FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) { + return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), + KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse, + streamingMode); + } + virtual Future> getRange( + const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, + FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) { + return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()), + KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, snapshot, reverse, + streamingMode); } - void setVersion( Version v ); - Future getReadVersion(); + virtual void addReadConflictRange(KeyRangeRef const& keys) = 0; + virtual void addReadConflictKey(KeyRef const& key) = 0; - Future< Optional> > get( const Key& key, bool snapshot = false ); - Future< Void > watch( const Key& key ); - Future< FDBStandalone > getKey( const KeySelector& key, bool snapshot = false ); - Future< FDBStandalone > getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL); - Future< FDBStandalone > getRange( const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) { - return getRange( begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode ); - } - Future< FDBStandalone > getRange( const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) { - return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ), - KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ), - limit, snapshot, reverse, streamingMode ); - } - Future< FDBStandalone > getRange( const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) { - return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ), - KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ), - limits, snapshot, reverse, streamingMode ); - } + virtual void setOption(FDBTransactionOption option, Optional value = Optional()) = 0; - // Future< Standalone> > getAddressesForKey(const Key& key); + virtual Future onError(Error const& e) = 0; - void addReadConflictRange( KeyRangeRef const& keys ); - void addReadConflictKey( KeyRef const& key ); - void addWriteConflictRange( KeyRangeRef const& keys ); - void addWriteConflictKey( KeyRef const& key ); - // void makeSelfConflicting() { tr.makeSelfConflicting(); } - - void atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType ); - void set( const KeyRef& key, const ValueRef& value ); - void clear( const KeyRangeRef& range ); - void clear( const KeyRef& key ); - - Future commit(); - Version getCommittedVersion(); - Future> getVersionstamp(); - - void setOption( FDBTransactionOption option, Optional value = Optional() ); - - Future onError( Error const& e ); - - void cancel(); - void reset(); - // double getBackoff() { return tr.getBackoff(); } - // void debugTransaction(UID dID) { tr.debugTransaction(dID); } - - Transaction() : tr(NULL) {} - Transaction( Transaction&& r ) noexcept(true) { - tr = r.tr; - r.tr = NULL; - } - Transaction& operator=( Transaction&& r ) noexcept(true) { - tr = r.tr; - r.tr = NULL; - return *this; - } - - private: - FDBTransaction* tr; + virtual void cancel() = 0; + virtual void reset() = 0; }; -} + class Transaction : public ReadTransaction { + public: + virtual void addWriteConflictRange(KeyRangeRef const& keys) = 0; + virtual void addWriteConflictKey(KeyRef const& key) = 0; -#endif + virtual void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) = 0; + virtual void set(const KeyRef& key, const ValueRef& value) = 0; + virtual void clear(const KeyRangeRef& range) = 0; + virtual void clear(const KeyRef& key) = 0; + + virtual Future commit() = 0; + virtual Version getCommittedVersion() = 0; + virtual Future> getVersionstamp() = 0; + }; + + class Database : public ReferenceCounted { + public: + virtual ~Database(){}; + virtual Reference createTransaction() = 0; + virtual void setDatabaseOption(FDBDatabaseOption option, Optional value = Optional()) = 0; + }; + + class API { + public: + static API* selectAPIVersion(int apiVersion); + static API* getInstance(); + static bool isAPIVersionSelected(); + + void setNetworkOption(FDBNetworkOption option, Optional value = Optional()); + + void setupNetwork(); + void runNetwork(); + void stopNetwork(); + + Reference createDatabase(std::string const& connFilename = ""); + + bool evaluatePredicate(FDBErrorPredicate pred, Error const& e); + int getAPIVersion() const; + + private: + static API* instance; + + API(int version); + int version; + }; + } // namespace FDB +#endif // FDB_FLOW_FDB_FLOW_H diff --git a/bindings/flow/tester/Tester.actor.cpp b/bindings/flow/tester/Tester.actor.cpp index eed67c6e5a..29b2d419e4 100644 --- a/bindings/flow/tester/Tester.actor.cpp +++ b/bindings/flow/tester/Tester.actor.cpp @@ -41,7 +41,8 @@ std::map, Reference> trMap; const int ITERATION_PROGRESSION[] = { 256, 1000, 4096, 6144, 9216, 13824, 20736, 31104, 46656, 69984, 80000 }; const int MAX_ITERATION = sizeof(ITERATION_PROGRESSION)/sizeof(int); -static Future runTest(Reference const& data, Reference const& db, StringRef const& prefix); +static Future runTest(Reference const& data, Reference const& db, + StringRef const& prefix); THREAD_FUNC networkThread( void* api ) { // This is the fdb_flow network we're running on a thread @@ -388,7 +389,7 @@ struct LogStackFunc : InstructionFunc { ACTOR static Future logStack(Reference data, std::map entries, Standalone prefix) { loop { - state Reference tr(new Transaction(data->db)); + state Reference tr = data->db->createTransaction(); try { for(auto it : entries) { Tuple tk; @@ -534,7 +535,7 @@ struct NewTransactionFunc : InstructionFunc { static const char* name; static Future call(Reference const& data, Reference const& instruction) { - trMap[data->trName] = Reference(new Transaction(data->db)); + trMap[data->trName] = data->db->createTransaction(); return Void(); } }; @@ -550,7 +551,7 @@ struct UseTransactionFunc : InstructionFunc { data->trName = name; if(trMap.count(data->trName) == 0) { - trMap[data->trName] = Reference(new Transaction(data->db)); + trMap[data->trName] = data->db->createTransaction(); } return Void(); } @@ -681,7 +682,7 @@ struct SetReadVersionFunc : InstructionFunc { static const char* name; static Future call(Reference const& data, Reference const& instruction) { - instruction->tr->setVersion(data->lastVersion); + instruction->tr->setReadVersion(data->lastVersion); return Void(); } }; @@ -1323,6 +1324,20 @@ struct StartThreadFunc : InstructionFunc { const char* StartThreadFunc::name = "START_THREAD"; REGISTER_INSTRUCTION_FUNC(StartThreadFunc); +ACTOR template +Future()(Reference()).getValue())> read(Reference db, + Function func) { + state Reference tr = db->createTransaction(); + loop { + try { + state decltype(fake()(Reference()).getValue()) result = wait(func(tr)); + return result; + } catch (Error& e) { + wait(tr->onError(e)); + } + } +} + // WAIT_EMPTY struct WaitEmptyFunc : InstructionFunc { static const char* name; @@ -1333,25 +1348,23 @@ struct WaitEmptyFunc : InstructionFunc { return Void(); Standalone s1 = wait(items[0].value); - state Standalone prefix = Tuple::unpack(s1).getString(0); + Standalone prefix = Tuple::unpack(s1).getString(0); // printf("=========WAIT_EMPTY:%s\n", printable(prefix).c_str()); - state Reference tr(new Transaction(data->db)); - loop { - try { - FDBStandalone results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1)); - if(results.size() > 0) { - throw not_committed(); - } - break; - } - catch(Error &e) { - wait(tr->onError(e)); - } - } + wait(read(data->db, + [=](Reference tr) -> Future { return checkEmptyPrefix(tr, prefix); })); return Void(); } + +private: + ACTOR static Future checkEmptyPrefix(Reference tr, Standalone prefix) { + FDBStandalone results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1)); + if (results.size() > 0) { + throw not_committed(); + } + return Void(); + } }; const char* WaitEmptyFunc::name = "WAIT_EMPTY"; REGISTER_INSTRUCTION_FUNC(WaitEmptyFunc); @@ -1529,7 +1542,7 @@ struct UnitTestsFunc : InstructionFunc { } API::selectAPIVersion(fdb->getAPIVersion()); - state Reference tr(new Transaction(data->db)); + state Reference tr = data->db->createTransaction(); tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE); tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE); tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_BATCH); @@ -1560,7 +1573,7 @@ const char* UnitTestsFunc::name = "UNIT_TESTS"; REGISTER_INSTRUCTION_FUNC(UnitTestsFunc); ACTOR static Future getInstructions(Reference data, StringRef prefix) { - state Reference tr(new Transaction(data->db)); + state Reference tr = data->db->createTransaction(); // get test instructions state Tuple testSpec; @@ -1610,7 +1623,7 @@ ACTOR static Future doInstructions(Reference data) { state Reference instruction = Reference(new InstructionData(isDatabase, isSnapshot, data->instructions[idx].value, Reference())); if (isDatabase) { - state Reference tr(new Transaction(data->db)); + state Reference tr = data->db->createTransaction(); instruction->tr = tr; } else { @@ -1644,7 +1657,7 @@ ACTOR static Future doInstructions(Reference data) { return Void(); } -ACTOR static Future runTest(Reference data, Reference db, StringRef prefix) { +ACTOR static Future runTest(Reference data, Reference db, StringRef prefix) { ASSERT(data); try { data->db = db; @@ -1744,7 +1757,7 @@ ACTOR void _test_versionstamp() { startThread(networkThread, fdb); auto db = fdb->createDatabase(); - state Reference tr(new Transaction(db)); + state Reference tr = db->createTransaction(); state Future> ftrVersion = tr->getVersionstamp(); diff --git a/bindings/flow/tester/Tester.actor.h b/bindings/flow/tester/Tester.actor.h index dc4bcff046..e33fae3f91 100644 --- a/bindings/flow/tester/Tester.actor.h +++ b/bindings/flow/tester/Tester.actor.h @@ -199,7 +199,7 @@ struct DirectoryTesterData { struct FlowTesterData : public ReferenceCounted { FDB::API *api; - Reference db; + Reference db; Standalone instructions; Standalone trName; FlowTesterStack stack; From a9562f61bebd6284ffc500a0b4922b6937bda18a Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Mon, 4 Mar 2019 22:13:47 -0800 Subject: [PATCH 18/71] fix: missing argument to printf in fdbserver --- fdbserver/fdbserver.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 0ed260e8bc..0a9f71d899 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -793,7 +793,7 @@ std::pair buildNetworkAddresses(const Cl const NetworkAddress& currentPublicAddress = publicNetworkAddresses.back(); if (!currentPublicAddress.isValid()) { - fprintf(stderr, "ERROR: %s is not valid a public ip address\n"); + fprintf(stderr, "ERROR: %s is not a valid IP address\n", currentPublicAddress.toString().c_str()); flushAndExit(FDB_EXIT_ERROR); } From 96ee95b9ad570f6b37516089360470d4c47f3a95 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Tue, 5 Mar 2019 11:57:49 -0800 Subject: [PATCH 19/71] fix: macOS build #963 Use the boost representation of IPv6 address internally and make sure it uses std::array. --- Makefile | 2 ++ flow/network.h | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 95828f5b85..049350b113 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,8 @@ else ifeq ($(PLATFORM),Darwin) .LIBPATTERNS := lib%.dylib lib%.a BOOSTDIR ?= $(HOME)/boost_1_52_0 + CXXFLAGS += -D BOOST_ASIO_HAS_STD_ARRAY # TODO: Remove this after Boost upgrades to 1.67 + TLS_LIBDIR ?= /usr/local/lib DLEXT := dylib java_DLEXT := jnilib diff --git a/flow/network.h b/flow/network.h index 88ff73b38f..3b569da349 100644 --- a/flow/network.h +++ b/flow/network.h @@ -25,6 +25,7 @@ #include #include #include +#include "boost/asio.hpp" #include "flow/serialize.h" #include "flow/IRandom.h" @@ -80,7 +81,9 @@ struct IPAddress { // Represents both IPv4 and IPv6 address. For IPv4 addresses, // only the first 32bits are relevant and rest are initialized to // 0. - typedef std::array IPAddressStore; + typedef boost::asio::ip::address_v6::bytes_type IPAddressStore; + static_assert(std::is_same>::value, + "IPAddressStore must be std::array"); IPAddress(); explicit IPAddress(const IPAddressStore& v6addr); From bdf0fc4ffbdbabb9923c64262d5419cffa2c4d7d Mon Sep 17 00:00:00 2001 From: Jingyu Zhou Date: Fri, 1 Mar 2019 16:43:17 -0800 Subject: [PATCH 20/71] Enable .pdb file generation on Windows Add compiler flag for generating program database file (PDB) for Visual Studio. --- cmake/ConfigureCompiler.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ConfigureCompiler.cmake b/cmake/ConfigureCompiler.cmake index ba56b1b067..9b4795547c 100644 --- a/cmake/ConfigureCompiler.cmake +++ b/cmake/ConfigureCompiler.cmake @@ -44,7 +44,7 @@ set(CMAKE_REQUIRED_LIBRARIES c) if(WIN32) - add_compile_options(/W3 /EHsc /std:c++14 /bigobj) + add_compile_options(/W3 /EHsc /std:c++14 /bigobj /Zi) else() if(USE_GOLD_LINKER) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags") From 590863479bd86e1491962ba94a222aaf135d2706 Mon Sep 17 00:00:00 2001 From: Markus Pilman Date: Tue, 5 Mar 2019 11:04:25 -0800 Subject: [PATCH 21/71] Update cmake/ConfigureCompiler.cmake Co-Authored-By: jzhou77 --- cmake/ConfigureCompiler.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/ConfigureCompiler.cmake b/cmake/ConfigureCompiler.cmake index 9b4795547c..f3cb94406f 100644 --- a/cmake/ConfigureCompiler.cmake +++ b/cmake/ConfigureCompiler.cmake @@ -44,7 +44,7 @@ set(CMAKE_REQUIRED_LIBRARIES c) if(WIN32) - add_compile_options(/W3 /EHsc /std:c++14 /bigobj /Zi) + add_compile_options(/W3 /EHsc /std:c++14 /bigobj $<$:/Zi>) else() if(USE_GOLD_LINKER) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags") From 981426bac9693e11e09c95bd771a4f7f648c2d99 Mon Sep 17 00:00:00 2001 From: anoyes Date: Tue, 5 Mar 2019 10:29:37 -0800 Subject: [PATCH 22/71] More ide fixes --- fdbrpc/dsltest.actor.cpp | 4 ++-- fdbserver/CMakeLists.txt | 2 +- fdbserver/DataDistribution.actor.cpp | 2 +- ...ataDistribution.h => DataDistribution.actor.h} | 15 ++++++++++++--- fdbserver/DataDistributionQueue.actor.cpp | 2 +- fdbserver/DataDistributionTracker.actor.cpp | 2 +- fdbserver/Status.actor.cpp | 2 +- fdbserver/fdbserver.actor.cpp | 2 +- fdbserver/fdbserver.vcxproj | 2 +- fdbserver/fdbserver.vcxproj.filters | 2 +- fdbserver/masterserver.actor.cpp | 2 +- fdbserver/workloads/ConsistencyCheck.actor.cpp | 2 +- fdbserver/workloads/Throttling.actor.cpp | 1 + 13 files changed, 25 insertions(+), 15 deletions(-) rename fdbserver/{DataDistribution.h => DataDistribution.actor.h} (94%) diff --git a/fdbrpc/dsltest.actor.cpp b/fdbrpc/dsltest.actor.cpp index bfde5ccee9..6d49c7f100 100644 --- a/fdbrpc/dsltest.actor.cpp +++ b/fdbrpc/dsltest.actor.cpp @@ -868,7 +868,7 @@ template<> Future chain<0>( Future const& x ) { return x; } -Future chain2( Future const& x, int const& i ); +ACTOR Future chain2(Future x, int i); ACTOR Future chain2( Future x, int i ) { if (i>1) { @@ -1017,7 +1017,7 @@ ACTOR void cycle(FutureStream in, PromiseStream out, int* ptotal){ loop{ waitNext(in); (*ptotal)++; - out.send(_); + out.send(Void()); } } diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index 9c6c47d65b..512cfed532 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -10,7 +10,7 @@ set(FDBSERVER_SRCS CoroFlow.actor.cpp CoroFlow.h DataDistribution.actor.cpp - DataDistribution.h + DataDistribution.actor.h DataDistributionQueue.actor.cpp DataDistributionTracker.actor.cpp DataDistributorInterface.h diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index a290e1f4c3..48f5732fca 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -19,7 +19,7 @@ */ #include "flow/ActorCollection.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbclient/SystemData.h" #include "fdbclient/DatabaseContext.h" #include "fdbserver/MoveKeys.actor.h" diff --git a/fdbserver/DataDistribution.h b/fdbserver/DataDistribution.actor.h similarity index 94% rename from fdbserver/DataDistribution.h rename to fdbserver/DataDistribution.actor.h index e348d65fe0..b838217192 100644 --- a/fdbserver/DataDistribution.h +++ b/fdbserver/DataDistribution.actor.h @@ -1,5 +1,5 @@ /* - * DataDistribution.h + * DataDistribution.actor.h * * This source file is part of the FoundationDB open source project * @@ -18,10 +18,17 @@ * limitations under the License. */ +#if defined(NO_INTELLISENSE) && !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H) +#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H +#include "fdbserver/DataDistribution.actor.g.h" +#elif !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_H) +#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_H + #include "fdbclient/NativeAPI.actor.h" #include "fdbserver/ClusterRecruitmentInterface.h" #include "fdbserver/MoveKeys.actor.h" #include "fdbserver/LogSystem.h" +#include "flow/actorcompiler.h" // This must be the last #include. struct RelocateShard { KeyRange keys; @@ -244,5 +251,7 @@ ShardSizeBounds getShardSizeBounds(KeyRangeRef shard, int64_t maxShardSize); int64_t getMaxShardSize( double dbSizeEstimate ); class DDTeamCollection; -Future teamRemover(DDTeamCollection* const& self); -Future teamRemoverPeriodic(DDTeamCollection* const& self); \ No newline at end of file +ACTOR Future teamRemover(DDTeamCollection* self); +ACTOR Future teamRemoverPeriodic(DDTeamCollection* self); + +#endif diff --git a/fdbserver/DataDistributionQueue.actor.cpp b/fdbserver/DataDistributionQueue.actor.cpp index 0d2beff8b3..f282ef12bc 100644 --- a/fdbserver/DataDistributionQueue.actor.cpp +++ b/fdbserver/DataDistributionQueue.actor.cpp @@ -25,7 +25,7 @@ #include "flow/Util.h" #include "fdbrpc/sim_validation.h" #include "fdbclient/SystemData.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbclient/DatabaseContext.h" #include "fdbserver/MoveKeys.actor.h" #include "fdbserver/Knobs.h" diff --git a/fdbserver/DataDistributionTracker.actor.cpp b/fdbserver/DataDistributionTracker.actor.cpp index f4d6e589f2..cb4d5dde5b 100644 --- a/fdbserver/DataDistributionTracker.actor.cpp +++ b/fdbserver/DataDistributionTracker.actor.cpp @@ -20,7 +20,7 @@ #include "fdbrpc/FailureMonitor.h" #include "fdbclient/SystemData.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbserver/Knobs.h" #include "fdbclient/DatabaseContext.h" #include "flow/ActorCollection.h" diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index 165b77c01e..09cb2cfdf3 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -27,7 +27,7 @@ #include "fdbserver/ClusterRecruitmentInterface.h" #include #include "fdbserver/CoordinationInterface.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "flow/UnitTest.h" #include "fdbserver/QuietDatabase.h" #include "fdbserver/RecoveryState.h" diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 0a9f71d899..f208eb92d8 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -38,7 +38,7 @@ #include "fdbserver/ServerDBInfo.h" #include "fdbserver/MoveKeys.actor.h" #include "fdbserver/ConflictSet.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbserver/NetworkTest.h" #include "fdbserver/IKeyValueStore.h" #include diff --git a/fdbserver/fdbserver.vcxproj b/fdbserver/fdbserver.vcxproj index 4f1ff8ea37..8ec940565a 100644 --- a/fdbserver/fdbserver.vcxproj +++ b/fdbserver/fdbserver.vcxproj @@ -159,7 +159,7 @@ - + diff --git a/fdbserver/fdbserver.vcxproj.filters b/fdbserver/fdbserver.vcxproj.filters index 7fce939368..9c27ac6fad 100644 --- a/fdbserver/fdbserver.vcxproj.filters +++ b/fdbserver/fdbserver.vcxproj.filters @@ -309,7 +309,7 @@ - + diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index 4758f27d43..f1c2181493 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -26,7 +26,7 @@ #include "fdbclient/Notified.h" #include "fdbclient/SystemData.h" #include "fdbserver/ConflictSet.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbserver/Knobs.h" #include #include "fdbserver/WaitFailure.h" diff --git a/fdbserver/workloads/ConsistencyCheck.actor.cpp b/fdbserver/workloads/ConsistencyCheck.actor.cpp index 24eab434fa..4d07ae1063 100644 --- a/fdbserver/workloads/ConsistencyCheck.actor.cpp +++ b/fdbserver/workloads/ConsistencyCheck.actor.cpp @@ -28,7 +28,7 @@ #include "fdbrpc/simulator.h" #include "fdbserver/Knobs.h" #include "fdbserver/StorageMetrics.h" -#include "fdbserver/DataDistribution.h" +#include "fdbserver/DataDistribution.actor.h" #include "fdbserver/QuietDatabase.h" #include "flow/DeterministicRandom.h" #include "fdbclient/ManagementAPI.actor.h" diff --git a/fdbserver/workloads/Throttling.actor.cpp b/fdbserver/workloads/Throttling.actor.cpp index 6d36838e54..0fdaf84be6 100644 --- a/fdbserver/workloads/Throttling.actor.cpp +++ b/fdbserver/workloads/Throttling.actor.cpp @@ -22,6 +22,7 @@ #include "fdbclient/ReadYourWrites.h" #include "fdbserver/workloads/workloads.actor.h" +#include "flow/actorcompiler.h" // This must be the last include struct TokenBucket { static constexpr const double addTokensInterval = 0.1; From ad0aca21b53cc9114c77de7cc564a9b3d5f46242 Mon Sep 17 00:00:00 2001 From: Alex Miller <35046903+alexmiller-apple@users.noreply.github.com> Date: Tue, 5 Mar 2019 16:32:15 -0800 Subject: [PATCH 23/71] Update fdbserver/fdbserver.vcxproj Co-Authored-By: atn34 --- fdbserver/fdbserver.vcxproj | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fdbserver/fdbserver.vcxproj b/fdbserver/fdbserver.vcxproj index 8ec940565a..febadcd23b 100644 --- a/fdbserver/fdbserver.vcxproj +++ b/fdbserver/fdbserver.vcxproj @@ -159,7 +159,9 @@ - + + false + From f12497232f7da41a65af2bf90835ddc475022d02 Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Thu, 24 Jan 2019 17:51:09 -0800 Subject: [PATCH 24/71] Split BOOSTDIR into BOOSTDIR_BASEDIR and BOOSTDIR_BASENAME (optionally). This is to give us a way to let which boost to use toggleable in the source code as BOOSTDIR_BASENAME, and let build enviornments configure where to find appropriately named boost folders via BOOSTDIR_BASEDIR. This isn't pretty, and CMake's FindBoost is a far better way to do this, but it'll work for now. --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 049350b113..e3c0846731 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ ifeq ($(NIGHTLY),true) CFLAGS += -DFDB_CLEAN_BUILD endif +BOOST_BASENAME ?= boost_1_52_0 ifeq ($(PLATFORM),Linux) PLATFORM := linux @@ -44,7 +45,7 @@ ifeq ($(PLATFORM),Linux) CXXFLAGS += -std=c++0x - BOOSTDIR ?= /opt/boost_1_52_0 + BOOST_BASEDIR ?= /opt TLS_LIBDIR ?= /usr/local/lib DLEXT := so java_DLEXT := so @@ -60,15 +61,14 @@ else ifeq ($(PLATFORM),Darwin) .LIBPATTERNS := lib%.dylib lib%.a - BOOSTDIR ?= $(HOME)/boost_1_52_0 - CXXFLAGS += -D BOOST_ASIO_HAS_STD_ARRAY # TODO: Remove this after Boost upgrades to 1.67 - + BOOST_BASEDIR ?= ${HOME} TLS_LIBDIR ?= /usr/local/lib DLEXT := dylib java_DLEXT := jnilib else $(error Not prepared to compile on platform $(PLATFORM)) endif +BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME} CCACHE := $(shell which ccache) ifneq ($(CCACHE),) From af617d68e6833a8a785531c104f96cc185b68aed Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 25 Jan 2019 10:14:00 -0800 Subject: [PATCH 25/71] boost 1.52.0 -> 1.67.0 in all vcxproj files --- bindings/c/fdb_c.vcxproj | 4 ++-- bindings/flow/fdb_flow.vcxproj | 4 ++-- bindings/flow/tester/fdb_flow_tester.vcxproj | 4 ++-- bindings/java/fdb_java.vcxproj | 2 +- fdbbackup/fdbbackup.vcxproj | 4 ++-- fdbcli/fdbcli.vcxproj | 4 ++-- fdbclient/fdbclient.vcxproj | 4 ++-- fdbrpc/fdbrpc.vcxproj | 4 ++-- fdbserver/fdbserver.vcxproj | 4 ++-- flow/flow.vcxproj | 4 ++-- 10 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bindings/c/fdb_c.vcxproj b/bindings/c/fdb_c.vcxproj index 76d305988d..cad3b4f468 100644 --- a/bindings/c/fdb_c.vcxproj +++ b/bindings/c/fdb_c.vcxproj @@ -67,14 +67,14 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath) - ..\..\;C:\Program Files\boost_1_52_0;$(IncludePath) + ..\..\;C:\Program Files\boost_1_67_0;$(IncludePath) $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ - ..\..\;C:\Program Files\boost_1_52_0;$(IncludePath) + ..\..\;C:\Program Files\boost_1_67_0;$(IncludePath) diff --git a/bindings/flow/fdb_flow.vcxproj b/bindings/flow/fdb_flow.vcxproj index ed5a85216c..4ced971c16 100755 --- a/bindings/flow/fdb_flow.vcxproj +++ b/bindings/flow/fdb_flow.vcxproj @@ -79,11 +79,11 @@ true - ..\..\;C:\Program Files\boost_1_52_0;$(IncludePath) + ..\..\;C:\Program Files\boost_1_67_0;$(IncludePath) false - ..\..\;C:\Program Files\boost_1_52_0;$(IncludePath) + ..\..\;C:\Program Files\boost_1_67_0;$(IncludePath) diff --git a/bindings/flow/tester/fdb_flow_tester.vcxproj b/bindings/flow/tester/fdb_flow_tester.vcxproj index 7eed24fdbd..9d3ed5f680 100644 --- a/bindings/flow/tester/fdb_flow_tester.vcxproj +++ b/bindings/flow/tester/fdb_flow_tester.vcxproj @@ -58,13 +58,13 @@ true $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ - $(IncludePath);../../../;C:\Program Files\boost_1_52_0 + $(IncludePath);../../../;C:\Program Files\boost_1_67_0 false $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 diff --git a/bindings/java/fdb_java.vcxproj b/bindings/java/fdb_java.vcxproj index 4ffa395971..8d6dd59f2a 100644 --- a/bindings/java/fdb_java.vcxproj +++ b/bindings/java/fdb_java.vcxproj @@ -45,7 +45,7 @@ - ..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_52_0;$(IncludePath) + ..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_67_0;$(IncludePath) $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ diff --git a/fdbbackup/fdbbackup.vcxproj b/fdbbackup/fdbbackup.vcxproj index d21279bc67..dfda99f96b 100644 --- a/fdbbackup/fdbbackup.vcxproj +++ b/fdbbackup/fdbbackup.vcxproj @@ -53,11 +53,11 @@ true - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 PreBuildEvent diff --git a/fdbcli/fdbcli.vcxproj b/fdbcli/fdbcli.vcxproj index a76ce05ac5..7d0db495b7 100644 --- a/fdbcli/fdbcli.vcxproj +++ b/fdbcli/fdbcli.vcxproj @@ -62,13 +62,13 @@ true $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false $(SolutionDir)bin\$(Configuration)\ $(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\ - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 diff --git a/fdbclient/fdbclient.vcxproj b/fdbclient/fdbclient.vcxproj index 7d8c55c412..0cddf0d7ac 100644 --- a/fdbclient/fdbclient.vcxproj +++ b/fdbclient/fdbclient.vcxproj @@ -165,11 +165,11 @@ true - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 diff --git a/fdbrpc/fdbrpc.vcxproj b/fdbrpc/fdbrpc.vcxproj index 3231afcfbe..2d7095b556 100644 --- a/fdbrpc/fdbrpc.vcxproj +++ b/fdbrpc/fdbrpc.vcxproj @@ -154,11 +154,11 @@ true - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 diff --git a/fdbserver/fdbserver.vcxproj b/fdbserver/fdbserver.vcxproj index febadcd23b..36e73c121a 100644 --- a/fdbserver/fdbserver.vcxproj +++ b/fdbserver/fdbserver.vcxproj @@ -255,11 +255,11 @@ true - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 PreBuildEvent diff --git a/flow/flow.vcxproj b/flow/flow.vcxproj index 764cbb8981..a7f7d6f7cb 100644 --- a/flow/flow.vcxproj +++ b/flow/flow.vcxproj @@ -132,11 +132,11 @@ true - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 false - $(IncludePath);../;C:\Program Files\boost_1_52_0 + $(IncludePath);../;C:\Program Files\boost_1_67_0 From 367bcff70091da74da9b61dcb52fe3ab92075c87 Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 25 Jan 2019 11:40:47 -0800 Subject: [PATCH 26/71] Error if boost != 1.67.0 --- flow/flow.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/flow.h b/flow/flow.h index 21adbaf7fc..5def73ca65 100644 --- a/flow/flow.h +++ b/flow/flow.h @@ -45,6 +45,14 @@ #include "flow/ThreadPrimitives.h" #include "flow/network.h" +#include + +#if BOOST_VERSION == 105200 +#error Boost is still 1.52.0 +#elif BOOST_VERSION != 106700 +#error Boost is not 1.67.0 +#endif + using namespace std::rel_ops; #define TEST( condition ) if (!(condition)); else { static TraceEvent* __test = &(TraceEvent("CodeCoverage").detail("File", __FILE__).detail("Line",__LINE__).detail("Condition", #condition)); } From df30904e27a4e4ea0fee68476d3185b85d179d67 Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 25 Jan 2019 11:41:27 -0800 Subject: [PATCH 27/71] Change default boost to 1.67.0 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e3c0846731..bf3cd68235 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ ifeq ($(NIGHTLY),true) CFLAGS += -DFDB_CLEAN_BUILD endif -BOOST_BASENAME ?= boost_1_52_0 +BOOST_BASENAME ?= boost_1_67_0 ifeq ($(PLATFORM),Linux) PLATFORM := linux From 224744c473e63823be7b6588ee35e14f99926698 Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 25 Jan 2019 12:09:05 -0800 Subject: [PATCH 28/71] Log BOOSTDIR --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index bf3cd68235..2cecf8104e 100644 --- a/Makefile +++ b/Makefile @@ -69,6 +69,7 @@ else $(error Not prepared to compile on platform $(PLATFORM)) endif BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME} +$(info BOOSTDIR is ${BOOSTDIR}) CCACHE := $(shell which ccache) ifneq ($(CCACHE),) From c6a65389ae86808ff8ac70a36659b4f37414a0ee Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 25 Jan 2019 16:49:59 -0800 Subject: [PATCH 29/71] Remove noexcept macro and replace with BOOST_NOEXCEPT. BOOST_NOEXCEPT does what the noexcept macro was supposed to do, but in a way that is correctly maintained over time. --- bindings/flow/fdb_flow.actor.cpp | 4 ++-- fdbclient/BackupAgent.actor.h | 8 ++++---- fdbclient/KeyRangeMap.h | 6 +++--- fdbclient/NativeAPI.actor.cpp | 2 +- fdbclient/NativeAPI.actor.h | 6 +++--- fdbclient/Notified.h | 4 ++-- fdbclient/ReadYourWrites.actor.cpp | 4 ++-- fdbclient/ReadYourWrites.h | 4 ++-- fdbclient/SnapshotCache.h | 4 ++-- fdbclient/ThreadSafeTransaction.actor.cpp | 4 ++-- fdbclient/ThreadSafeTransaction.h | 4 ++-- fdbclient/VersionedMap.h | 4 ++-- fdbclient/WriteMap.h | 4 ++-- fdbrpc/AsyncFileCached.actor.h | 2 +- fdbrpc/RangeMap.h | 2 +- fdbrpc/fdbrpc.h | 8 ++++---- fdbrpc/sim2.actor.cpp | 4 ++-- fdbserver/ClusterController.actor.cpp | 4 ++-- fdbserver/LogRouter.actor.cpp | 4 ++-- fdbserver/OldTLogServer_4_6.actor.cpp | 4 ++-- fdbserver/OldTLogServer_6_0.actor.cpp | 4 ++-- fdbserver/SkipList.cpp | 4 ++-- fdbserver/TLogServer.actor.cpp | 4 ++-- fdbserver/masterserver.actor.cpp | 4 ++-- flow/Arena.h | 8 ++++---- flow/Deque.h | 4 ++-- flow/FastRef.h | 4 ++-- flow/IndexedSet.h | 12 ++++++------ flow/Net2.actor.cpp | 4 ++-- flow/Platform.h | 8 ++------ flow/TDMetric.actor.h | 8 ++++---- flow/ThreadHelper.actor.h | 4 ++-- flow/flow.h | 16 ++++++++-------- flow/genericactors.actor.h | 6 +++--- 34 files changed, 86 insertions(+), 90 deletions(-) diff --git a/bindings/flow/fdb_flow.actor.cpp b/bindings/flow/fdb_flow.actor.cpp index 43d6e5796f..75fb7e7f10 100644 --- a/bindings/flow/fdb_flow.actor.cpp +++ b/bindings/flow/fdb_flow.actor.cpp @@ -158,11 +158,11 @@ namespace FDB { void reset() override; TransactionImpl() : tr(NULL) {} - TransactionImpl(TransactionImpl&& r) noexcept(true) { + TransactionImpl(TransactionImpl&& r) BOOST_NOEXCEPT { tr = r.tr; r.tr = NULL; } - TransactionImpl& operator=(TransactionImpl&& r) noexcept(true) { + TransactionImpl& operator=(TransactionImpl&& r) BOOST_NOEXCEPT { tr = r.tr; r.tr = NULL; return *this; diff --git a/fdbclient/BackupAgent.actor.h b/fdbclient/BackupAgent.actor.h index df2ef54982..d2f6797b51 100644 --- a/fdbclient/BackupAgent.actor.h +++ b/fdbclient/BackupAgent.actor.h @@ -199,14 +199,14 @@ class FileBackupAgent : public BackupAgentBase { public: FileBackupAgent(); - FileBackupAgent( FileBackupAgent&& r ) noexcept(true) : + FileBackupAgent( FileBackupAgent&& r ) BOOST_NOEXCEPT : subspace( std::move(r.subspace) ), config( std::move(r.config) ), lastRestorable( std::move(r.lastRestorable) ), taskBucket( std::move(r.taskBucket) ), futureBucket( std::move(r.futureBucket) ) {} - void operator=( FileBackupAgent&& r ) noexcept(true) { + void operator=( FileBackupAgent&& r ) BOOST_NOEXCEPT { subspace = std::move(r.subspace); config = std::move(r.config); lastRestorable = std::move(r.lastRestorable), @@ -315,7 +315,7 @@ public: DatabaseBackupAgent(); explicit DatabaseBackupAgent(Database src); - DatabaseBackupAgent( DatabaseBackupAgent&& r ) noexcept(true) : + DatabaseBackupAgent( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT : subspace( std::move(r.subspace) ), states( std::move(r.states) ), config( std::move(r.config) ), @@ -327,7 +327,7 @@ public: sourceStates( std::move(r.sourceStates) ), sourceTagNames( std::move(r.sourceTagNames) ) {} - void operator=( DatabaseBackupAgent&& r ) noexcept(true) { + void operator=( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT { subspace = std::move(r.subspace); states = std::move(r.states); config = std::move(r.config); diff --git a/fdbclient/KeyRangeMap.h b/fdbclient/KeyRangeMap.h index 3162f512be..aafd92cb69 100644 --- a/fdbclient/KeyRangeMap.h +++ b/fdbclient/KeyRangeMap.h @@ -36,7 +36,7 @@ template class KeyRangeMap : public RangeMap, NonCopyable, public ReferenceCounted> { public: explicit KeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap(endKey, v), mapEnd(endKey) {} - void operator=(KeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } + void operator=(KeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } void insert( const KeyRangeRef& keys, const Val& value ) { RangeMap::insert(keys, value); } void insert( const KeyRef& key, const Val& value ) { RangeMap::insert( singleKeyRange(key), value); } std::vector> getAffectedRangesAfterInsertion( const KeyRangeRef& keys, const Val &insertionValue = Val()); @@ -67,7 +67,7 @@ template class CoalescedKeyRefRangeMap : public RangeMap, NonCopyable { public: explicit CoalescedKeyRefRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap(endKey, v), mapEnd(endKey) {} - void operator=(CoalescedKeyRefRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } + void operator=(CoalescedKeyRefRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } void insert( const KeyRangeRef& keys, const Val& value ); void insert( const KeyRef& key, const Val& value, Arena& arena ); Key mapEnd; @@ -77,7 +77,7 @@ template class CoalescedKeyRangeMap : public RangeMap, NonCopyable { public: explicit CoalescedKeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap(endKey, v), mapEnd(endKey) {} - void operator=(CoalescedKeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } + void operator=(CoalescedKeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap::operator=(std::move(r)); } void insert( const KeyRangeRef& keys, const Val& value ); void insert( const KeyRef& key, const Val& value ); Key mapEnd; diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 3061379727..590cce7967 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1929,7 +1929,7 @@ Transaction::~Transaction() { cancelWatches(); } -void Transaction::operator=(Transaction&& r) noexcept(true) { +void Transaction::operator=(Transaction&& r) BOOST_NOEXCEPT { flushTrLogsIfEnabled(); cx = std::move(r.cx); tr = std::move(r.tr); diff --git a/fdbclient/NativeAPI.actor.h b/fdbclient/NativeAPI.actor.h index f5ba76e385..51205caa96 100644 --- a/fdbclient/NativeAPI.actor.h +++ b/fdbclient/NativeAPI.actor.h @@ -74,8 +74,8 @@ public: Database() {} // an uninitialized database can be destructed or reassigned safely; that's it void operator= ( Database const& rhs ) { db = rhs.db; } Database( Database const& rhs ) : db(rhs.db) {} - Database(Database&& r) noexcept(true) : db(std::move(r.db)) {} - void operator= (Database&& r) noexcept(true) { db = std::move(r.db); } + Database(Database&& r) BOOST_NOEXCEPT : db(std::move(r.db)) {} + void operator= (Database&& r) BOOST_NOEXCEPT { db = std::move(r.db); } // For internal use by the native client: explicit Database(Reference cx) : db(cx) {} @@ -280,7 +280,7 @@ public: // These are to permit use as state variables in actors: Transaction() : info( TaskDefaultEndpoint ) {} - void operator=(Transaction&& r) noexcept(true); + void operator=(Transaction&& r) BOOST_NOEXCEPT; void reset(); void fullReset(); diff --git a/fdbclient/Notified.h b/fdbclient/Notified.h index bc8fe482f2..3873e343cb 100644 --- a/fdbclient/Notified.h +++ b/fdbclient/Notified.h @@ -66,8 +66,8 @@ struct NotifiedVersion { set( v ); } - NotifiedVersion(NotifiedVersion&& r) noexcept(true) : waiting(std::move(r.waiting)), val(std::move(r.val)) {} - void operator=(NotifiedVersion&& r) noexcept(true) { waiting = std::move(r.waiting); val = std::move(r.val); } + NotifiedVersion(NotifiedVersion&& r) BOOST_NOEXCEPT : waiting(std::move(r.waiting)), val(std::move(r.val)) {} + void operator=(NotifiedVersion&& r) BOOST_NOEXCEPT { waiting = std::move(r.waiting); val = std::move(r.val); } private: typedef std::pair> Item; diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index b7240329db..cc6bce8a86 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -1805,7 +1805,7 @@ void ReadYourWritesTransaction::setOption( FDBTransactionOptions::Option option, tr.setOption( option, value ); } -void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcept(true) { +void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT { cache = std::move( r.cache ); writes = std::move( r.writes ); arena = std::move( r.arena ); @@ -1826,7 +1826,7 @@ void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcep writes.arena = &arena; } -ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true) : +ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT : cache( std::move(r.cache) ), writes( std::move(r.writes) ), arena( std::move(r.arena) ), diff --git a/fdbclient/ReadYourWrites.h b/fdbclient/ReadYourWrites.h index 926745621b..d91b36a270 100644 --- a/fdbclient/ReadYourWrites.h +++ b/fdbclient/ReadYourWrites.h @@ -111,8 +111,8 @@ public: // These are to permit use as state variables in actors: ReadYourWritesTransaction() : cache(&arena), writes(&arena) {} - void operator=(ReadYourWritesTransaction&& r) noexcept(true); - ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true); + void operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT; + ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT; virtual void addref() { ReferenceCounted::addref(); } virtual void delref() { ReferenceCounted::delref(); } diff --git a/fdbclient/SnapshotCache.h b/fdbclient/SnapshotCache.h index bb718e152f..d572b6034c 100644 --- a/fdbclient/SnapshotCache.h +++ b/fdbclient/SnapshotCache.h @@ -277,8 +277,8 @@ public: entries.insert( Entry( allKeys.end, afterAllKeys, VectorRef() ), NoMetric(), true ); } // Visual Studio refuses to generate these, apparently despite the standard - SnapshotCache(SnapshotCache&& r) noexcept(true) : entries(std::move(r.entries)), arena(r.arena) {} - SnapshotCache& operator=(SnapshotCache&& r) noexcept(true) { entries = std::move(r.entries); arena = r.arena; return *this; } + SnapshotCache(SnapshotCache&& r) BOOST_NOEXCEPT : entries(std::move(r.entries)), arena(r.arena) {} + SnapshotCache& operator=(SnapshotCache&& r) BOOST_NOEXCEPT { entries = std::move(r.entries); arena = r.arena; return *this; } bool empty() const { // Returns true iff anything is known about the contents of the snapshot diff --git a/fdbclient/ThreadSafeTransaction.actor.cpp b/fdbclient/ThreadSafeTransaction.actor.cpp index 4a9e053f47..41c7ba8879 100644 --- a/fdbclient/ThreadSafeTransaction.actor.cpp +++ b/fdbclient/ThreadSafeTransaction.actor.cpp @@ -286,12 +286,12 @@ ThreadFuture ThreadSafeTransaction::onError( Error const& e ) { return onMainThread( [tr, e](){ return tr->onError(e); } ); } -void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept(true) { +void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT { tr = r.tr; r.tr = NULL; } -ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true) { +ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT { tr = r.tr; r.tr = NULL; } diff --git a/fdbclient/ThreadSafeTransaction.h b/fdbclient/ThreadSafeTransaction.h index de40b6ae6a..d51ae7f20a 100644 --- a/fdbclient/ThreadSafeTransaction.h +++ b/fdbclient/ThreadSafeTransaction.h @@ -96,8 +96,8 @@ public: // These are to permit use as state variables in actors: ThreadSafeTransaction() : tr(NULL) {} - void operator=(ThreadSafeTransaction&& r) noexcept(true); - ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true); + void operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT; + ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT; void reset(); diff --git a/fdbclient/VersionedMap.h b/fdbclient/VersionedMap.h index 6102e300d9..f0dc5e2b46 100644 --- a/fdbclient/VersionedMap.h +++ b/fdbclient/VersionedMap.h @@ -489,10 +489,10 @@ public: VersionedMap() : oldestVersion(0), latestVersion(0) { latestRoot = &roots[0]; } - VersionedMap( VersionedMap&& v ) noexcept(true) : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) { + VersionedMap( VersionedMap&& v ) BOOST_NOEXCEPT : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) { latestRoot = &roots[latestVersion]; } - void operator = (VersionedMap && v) noexcept(true) { + void operator = (VersionedMap && v) BOOST_NOEXCEPT { oldestVersion = v.oldestVersion; latestVersion = v.latestVersion; roots = std::move(v.roots); diff --git a/fdbclient/WriteMap.h b/fdbclient/WriteMap.h index 733455f6d1..f9f4518ee8 100644 --- a/fdbclient/WriteMap.h +++ b/fdbclient/WriteMap.h @@ -128,8 +128,8 @@ public: PTreeImpl::insert( writes, ver, WriteMapEntry( afterAllKeys, OperationStack(), false, false, false, false, false ) ); } - WriteMap(WriteMap&& r) noexcept(true) : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {} - WriteMap& operator=(WriteMap&& r) noexcept(true) { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; } + WriteMap(WriteMap&& r) BOOST_NOEXCEPT : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {} + WriteMap& operator=(WriteMap&& r) BOOST_NOEXCEPT { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; } //a write with addConflict false on top of an existing write with a conflict range will not remove the conflict void mutate( KeyRef key, MutationRef::Type operation, ValueRef param, bool addConflict ) { diff --git a/fdbrpc/AsyncFileCached.actor.h b/fdbrpc/AsyncFileCached.actor.h index 1523565d17..51c04abc2e 100644 --- a/fdbrpc/AsyncFileCached.actor.h +++ b/fdbrpc/AsyncFileCached.actor.h @@ -77,7 +77,7 @@ struct OpenFileInfo : NonCopyable { Future> opened; // Only valid until the file is fully opened OpenFileInfo() : f(0) {} - OpenFileInfo(OpenFileInfo && r) noexcept(true) : f(r.f), opened(std::move(r.opened)) { r.f = 0; } + OpenFileInfo(OpenFileInfo && r) BOOST_NOEXCEPT : f(r.f), opened(std::move(r.opened)) { r.f = 0; } Future> get() { if (f) return Reference::addRef(f); diff --git a/fdbrpc/RangeMap.h b/fdbrpc/RangeMap.h index a1ed371a41..700db42d0a 100644 --- a/fdbrpc/RangeMap.h +++ b/fdbrpc/RangeMap.h @@ -149,7 +149,7 @@ public: void coalesce( const Range& k ); void validateCoalesced(); - void operator=(RangeMap&& r) noexcept(true) { map = std::move(r.map); } + void operator=(RangeMap&& r) BOOST_NOEXCEPT { map = std::move(r.map); } //void clear( const Val& value ) { ranges.clear(); ranges.insert(std::make_pair(Key(),value)); } void insert( const Range& keys, const Val& value ); diff --git a/fdbrpc/fdbrpc.h b/fdbrpc/fdbrpc.h index 90530e5bb4..dc5642db87 100644 --- a/fdbrpc/fdbrpc.h +++ b/fdbrpc/fdbrpc.h @@ -112,7 +112,7 @@ public: bool isValid() const { return sav != NULL; } ReplyPromise() : sav(new NetSAV(0, 1)) {} ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); } - ReplyPromise(ReplyPromise&& rhs) noexcept(true) : sav(rhs.sav) { rhs.sav = 0; } + ReplyPromise(ReplyPromise&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; } ~ReplyPromise() { if (sav) sav->delPromiseRef(); } ReplyPromise(const Endpoint& endpoint) : sav(new NetSAV(0, 1, endpoint)) {} @@ -123,7 +123,7 @@ public: if (sav) sav->delPromiseRef(); sav = rhs.sav; } - void operator=(ReplyPromise && rhs) noexcept(true) { + void operator=(ReplyPromise && rhs) BOOST_NOEXCEPT { if (sav != rhs.sav) { if (sav) sav->delPromiseRef(); sav = rhs.sav; @@ -323,13 +323,13 @@ public: FutureStream getFuture() const { queue->addFutureRef(); return FutureStream(queue); } RequestStream() : queue(new NetNotifiedQueue(0, 1)) {} RequestStream(const RequestStream& rhs) : queue(rhs.queue) { queue->addPromiseRef(); } - RequestStream(RequestStream&& rhs) noexcept(true) : queue(rhs.queue) { rhs.queue = 0; } + RequestStream(RequestStream&& rhs) BOOST_NOEXCEPT : queue(rhs.queue) { rhs.queue = 0; } void operator=(const RequestStream& rhs) { rhs.queue->addPromiseRef(); if (queue) queue->delPromiseRef(); queue = rhs.queue; } - void operator=(RequestStream&& rhs) noexcept(true) { + void operator=(RequestStream&& rhs) BOOST_NOEXCEPT { if (queue != rhs.queue) { if (queue) queue->delPromiseRef(); queue = rhs.queue; diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 3ee1a07b46..b58bd9e2c7 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -1584,10 +1584,10 @@ public: Promise action; Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Promise&& action ) : time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {} Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Future& future ) : time(time), taskID(taskID), stable(stable), machine(machine) { future = action.getFuture(); } - Task(Task&& rhs) noexcept(true) : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {} + Task(Task&& rhs) BOOST_NOEXCEPT : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {} void operator= ( Task const& rhs ) { taskID = rhs.taskID; time = rhs.time; stable = rhs.stable; machine = rhs.machine; action = rhs.action; } Task( Task const& rhs ) : taskID(rhs.taskID), time(rhs.time), stable(rhs.stable), machine(rhs.machine), action(rhs.action) {} - void operator= (Task&& rhs) noexcept(true) { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); } + void operator= (Task&& rhs) BOOST_NOEXCEPT { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); } bool operator < (Task const& rhs) const { // Ordering is reversed for priority_queue diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 53962c0340..aadebd93c2 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -60,9 +60,9 @@ struct WorkerInfo : NonCopyable { WorkerInfo( Future watcher, ReplyPromise reply, Generation gen, WorkerInterface interf, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo ) : watcher(watcher), reply(reply), gen(gen), reboots(0), lastAvailableTime(now()), interf(interf), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo) {} - WorkerInfo( WorkerInfo&& r ) noexcept(true) : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen), + WorkerInfo( WorkerInfo&& r ) BOOST_NOEXCEPT : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen), reboots(r.reboots), lastAvailableTime(r.lastAvailableTime), interf(std::move(r.interf)), initialClass(r.initialClass), processClass(r.processClass), priorityInfo(r.priorityInfo) {} - void operator=( WorkerInfo&& r ) noexcept(true) { + void operator=( WorkerInfo&& r ) BOOST_NOEXCEPT { watcher = std::move(r.watcher); reply = std::move(r.reply); gen = r.gen; diff --git a/fdbserver/LogRouter.actor.cpp b/fdbserver/LogRouter.actor.cpp index 8b59203250..2e0adf2c81 100644 --- a/fdbserver/LogRouter.actor.cpp +++ b/fdbserver/LogRouter.actor.cpp @@ -42,8 +42,8 @@ struct LogRouterData { TagData( Tag tag, Version popped, Version durableKnownCommittedVersion ) : tag(tag), popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion) {} - TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {} - void operator= (TagData&& r) noexcept(true) { + TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {} + void operator= (TagData&& r) BOOST_NOEXCEPT { version_messages = std::move(r.version_messages); tag = r.tag; popped = r.popped; diff --git a/fdbserver/OldTLogServer_4_6.actor.cpp b/fdbserver/OldTLogServer_4_6.actor.cpp index 58179dba61..f41fd0b21c 100644 --- a/fdbserver/OldTLogServer_4_6.actor.cpp +++ b/fdbserver/OldTLogServer_4_6.actor.cpp @@ -323,8 +323,8 @@ namespace oldTLog_4_6 { TagData( Version popped, bool nothing_persistent, bool popped_recently, OldTag tag ) : nothing_persistent(nothing_persistent), popped(popped), popped_recently(popped_recently), update_version_sizes(tag != txsTagOld) {} - TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {} - void operator= (TagData&& r) noexcept(true) { + TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {} + void operator= (TagData&& r) BOOST_NOEXCEPT { version_messages = std::move(r.version_messages); nothing_persistent = r.nothing_persistent; popped_recently = r.popped_recently; diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index 0d4469fe7e..ae077eacf0 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -294,8 +294,8 @@ struct LogData : NonCopyable, public ReferenceCounted { TagData( Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered ) : tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently), unpoppedRecovered(unpoppedRecovered) {} - TagData(TagData&& r) noexcept(true) : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {} - void operator= (TagData&& r) noexcept(true) { + TagData(TagData&& r) BOOST_NOEXCEPT : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {} + void operator= (TagData&& r) BOOST_NOEXCEPT { versionMessages = std::move(r.versionMessages); nothingPersistent = r.nothingPersistent; poppedRecently = r.poppedRecently; diff --git a/fdbserver/SkipList.cpp b/fdbserver/SkipList.cpp index 1f1f81e292..58c03b5b99 100644 --- a/fdbserver/SkipList.cpp +++ b/fdbserver/SkipList.cpp @@ -494,12 +494,12 @@ public: ~SkipList() { destroy(); } - SkipList(SkipList&& other) noexcept(true) + SkipList(SkipList&& other) BOOST_NOEXCEPT : header(other.header) { other.header = NULL; } - void operator=(SkipList&& other) noexcept(true) { + void operator=(SkipList&& other) BOOST_NOEXCEPT { destroy(); header = other.header; other.header = NULL; diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 064f9626a0..327082f253 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -321,8 +321,8 @@ struct LogData : NonCopyable, public ReferenceCounted { TagData( Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered ) : tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently), unpoppedRecovered(unpoppedRecovered) {} - TagData(TagData&& r) noexcept(true) : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {} - void operator= (TagData&& r) noexcept(true) { + TagData(TagData&& r) BOOST_NOEXCEPT : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {} + void operator= (TagData&& r) BOOST_NOEXCEPT { versionMessages = std::move(r.versionMessages); nothingPersistent = r.nothingPersistent; poppedRecently = r.poppedRecently; diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index f1c2181493..6460f49403 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -53,8 +53,8 @@ struct ProxyVersionReplies { std::map replies; NotifiedVersion latestRequestNum; - ProxyVersionReplies(ProxyVersionReplies&& r) noexcept(true) : replies(std::move(r.replies)), latestRequestNum(std::move(r.latestRequestNum)) {} - void operator=(ProxyVersionReplies&& r) noexcept(true) { replies = std::move(r.replies); latestRequestNum = std::move(r.latestRequestNum); } + ProxyVersionReplies(ProxyVersionReplies&& r) BOOST_NOEXCEPT : replies(std::move(r.replies)), latestRequestNum(std::move(r.latestRequestNum)) {} + void operator=(ProxyVersionReplies&& r) BOOST_NOEXCEPT { replies = std::move(r.replies); latestRequestNum = std::move(r.latestRequestNum); } ProxyVersionReplies() : latestRequestNum(0) {} }; diff --git a/flow/Arena.h b/flow/Arena.h index e89dc4dc6a..5433c28490 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -92,9 +92,9 @@ public: inline explicit Arena( size_t reservedSize ); //~Arena(); Arena(const Arena&); - Arena(Arena && r) noexcept(true); + Arena(Arena && r) BOOST_NOEXCEPT; Arena& operator=(const Arena&); - Arena& operator=(Arena&&) noexcept(true); + Arena& operator=(Arena&&) BOOST_NOEXCEPT; inline void dependsOn( const Arena& p ); inline size_t getSize() const; @@ -288,12 +288,12 @@ inline Arena::Arena(size_t reservedSize) : impl( 0 ) { ArenaBlock::create((int)reservedSize,impl); } inline Arena::Arena( const Arena& r ) : impl( r.impl ) {} -inline Arena::Arena(Arena && r) noexcept(true) : impl(std::move(r.impl)) {} +inline Arena::Arena(Arena && r) BOOST_NOEXCEPT : impl(std::move(r.impl)) {} inline Arena& Arena::operator=(const Arena& r) { impl = r.impl; return *this; } -inline Arena& Arena::operator=(Arena&& r) noexcept(true) { +inline Arena& Arena::operator=(Arena&& r) BOOST_NOEXCEPT { impl = std::move(r.impl); return *this; } diff --git a/flow/Deque.h b/flow/Deque.h index 9a10e3fadb..224a6e1373 100644 --- a/flow/Deque.h +++ b/flow/Deque.h @@ -65,13 +65,13 @@ public: // FIXME: Specialization for POD types using memcpy? } - Deque(Deque&& r) noexcept(true) : begin(r.begin), end(r.end), mask(r.mask), arr(r.arr) { + Deque(Deque&& r) BOOST_NOEXCEPT : begin(r.begin), end(r.end), mask(r.mask), arr(r.arr) { r.arr = 0; r.begin = r.end = 0; r.mask = -1; } - void operator=(Deque&& r) noexcept(true) { + void operator=(Deque&& r) BOOST_NOEXCEPT { cleanup(); begin = r.begin; diff --git a/flow/FastRef.h b/flow/FastRef.h index a817348c20..3ddcdd55e9 100644 --- a/flow/FastRef.h +++ b/flow/FastRef.h @@ -104,7 +104,7 @@ public: static Reference

addRef( P* ptr ) { ptr->addref(); return Reference(ptr); } Reference(const Reference& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); } - Reference(Reference && r) noexcept(true) : ptr(r.getPtr()) { r.ptr = NULL; } + Reference(Reference && r) BOOST_NOEXCEPT : ptr(r.getPtr()) { r.ptr = NULL; } template Reference(const Reference& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); } @@ -122,7 +122,7 @@ public: } return *this; } - Reference& operator=(Reference&& r) noexcept(true) { + Reference& operator=(Reference&& r) BOOST_NOEXCEPT { P* oldPtr = ptr; P* newPtr = r.ptr; if (oldPtr != newPtr) { diff --git a/flow/IndexedSet.h b/flow/IndexedSet.h index bb1113d464..c85b86bb83 100644 --- a/flow/IndexedSet.h +++ b/flow/IndexedSet.h @@ -96,8 +96,8 @@ public: IndexedSet() : root(NULL) {}; ~IndexedSet() { delete root; } - IndexedSet(IndexedSet&& r) noexcept(true) : root(r.root) { r.root = NULL; } - IndexedSet& operator=(IndexedSet&& r) noexcept(true) { delete root; root = r.root; r.root = 0; return *this; } + IndexedSet(IndexedSet&& r) BOOST_NOEXCEPT : root(r.root) { r.root = NULL; } + IndexedSet& operator=(IndexedSet&& r) BOOST_NOEXCEPT { delete root; root = r.root; r.root = 0; return *this; } iterator begin() const; iterator end() const { return iterator(); } @@ -243,8 +243,8 @@ public: void operator= ( MapPair const& rhs ) { key = rhs.key; value = rhs.value; } MapPair( MapPair const& rhs ) : key(rhs.key), value(rhs.value) {} - MapPair(MapPair&& r) noexcept(true) : key(std::move(r.key)), value(std::move(r.value)) {} - void operator=(MapPair&& r) noexcept(true) { key = std::move(r.key); value = std::move(r.value); } + MapPair(MapPair&& r) BOOST_NOEXCEPT : key(std::move(r.key)), value(std::move(r.value)) {} + void operator=(MapPair&& r) BOOST_NOEXCEPT { key = std::move(r.key); value = std::move(r.value); } bool operator<(MapPair const& r) const { return key < r.key; } bool operator<=(MapPair const& r) const { return key <= r.key; } @@ -317,8 +317,8 @@ public: static int getElementBytes() { return IndexedSet< Pair, Metric >::getElementBytes(); } - Map(Map&& r) noexcept(true) : set(std::move(r.set)) {} - void operator=(Map&& r) noexcept(true) { set = std::move(r.set); } + Map(Map&& r) BOOST_NOEXCEPT : set(std::move(r.set)) {} + void operator=(Map&& r) BOOST_NOEXCEPT { set = std::move(r.set); } private: Map( Map const& ); // unimplemented diff --git a/flow/Net2.actor.cpp b/flow/Net2.actor.cpp index 9afea3140c..e488f22c12 100644 --- a/flow/Net2.actor.cpp +++ b/flow/Net2.actor.cpp @@ -245,7 +245,7 @@ class BindPromise { public: BindPromise( const char* errContext, UID errID ) : errContext(errContext), errID(errID) {} BindPromise( BindPromise const& r ) : p(r.p), errContext(r.errContext), errID(r.errID) {} - BindPromise(BindPromise&& r) noexcept(true) : p(std::move(r.p)), errContext(r.errContext), errID(r.errID) {} + BindPromise(BindPromise&& r) BOOST_NOEXCEPT : p(std::move(r.p)), errContext(r.errContext), errID(r.errID) {} Future getFuture() { return p.getFuture(); } @@ -481,7 +481,7 @@ private: struct PromiseTask : public Task, public FastAllocated { Promise promise; PromiseTask() {} - explicit PromiseTask( Promise&& promise ) noexcept(true) : promise(std::move(promise)) {} + explicit PromiseTask( Promise&& promise ) BOOST_NOEXCEPT : promise(std::move(promise)) {} virtual void operator()() { promise.send(Void()); diff --git a/flow/Platform.h b/flow/Platform.h index 7cfbeab75f..a76ca334d7 100644 --- a/flow/Platform.h +++ b/flow/Platform.h @@ -544,12 +544,8 @@ inline static int ctzll( uint64_t value ) { #define ctzll __builtin_ctzll #endif -// MSVC not support noexcept yet -#ifndef __GNUG__ -#ifndef VS14 -#define noexcept(enabled) -#endif -#endif +#include +// The formerly existing BOOST_NOEXCEPT is now BOOST_NOEXCEPT #else #define EXTERNC diff --git a/flow/TDMetric.actor.h b/flow/TDMetric.actor.h index 84862aefcd..5d8c09957f 100755 --- a/flow/TDMetric.actor.h +++ b/flow/TDMetric.actor.h @@ -216,14 +216,14 @@ struct MetricData { appendStart(appendStart) { } - MetricData( MetricData&& r ) noexcept(true) : + MetricData( MetricData&& r ) BOOST_NOEXCEPT : start(r.start), rollTime(r.rollTime), appendStart(r.appendStart), writer(std::move(r.writer)) { } - void operator=( MetricData&& r ) noexcept(true) { + void operator=( MetricData&& r ) BOOST_NOEXCEPT { start = r.start; rollTime = r.rollTime; appendStart = r.appendStart; writer = std::move(r.writer); } @@ -626,9 +626,9 @@ template levels; - EventField( EventField&& r ) noexcept(true) : Descriptor(r), levels(std::move(r.levels)) {} + EventField( EventField&& r ) BOOST_NOEXCEPT : Descriptor(r), levels(std::move(r.levels)) {} - void operator=( EventField&& r ) noexcept(true) { + void operator=( EventField&& r ) BOOST_NOEXCEPT { levels = std::move(r.levels); } diff --git a/flow/ThreadHelper.actor.h b/flow/ThreadHelper.actor.h index 3e4b98fa9e..4fdd3c26ff 100644 --- a/flow/ThreadHelper.actor.h +++ b/flow/ThreadHelper.actor.h @@ -462,7 +462,7 @@ public: ThreadFuture( const ThreadFuture& rhs ) : sav(rhs.sav) { if (sav) sav->addref(); } - ThreadFuture(ThreadFuture&& rhs) noexcept(true) : sav(rhs.sav) { + ThreadFuture(ThreadFuture&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; } ThreadFuture( const T& presentValue ) @@ -487,7 +487,7 @@ public: if (sav) sav->delref(); sav = rhs.sav; } - void operator=(ThreadFuture&& rhs) noexcept(true) { + void operator=(ThreadFuture&& rhs) BOOST_NOEXCEPT { if (sav != rhs.sav) { if (sav) sav->delref(); sav = rhs.sav; diff --git a/flow/flow.h b/flow/flow.h index 5def73ca65..94a67b0dcc 100644 --- a/flow/flow.h +++ b/flow/flow.h @@ -624,7 +624,7 @@ public: if (sav) sav->addFutureRef(); //if (sav->endpoint.isValid()) cout << "Future copied for " << sav->endpoint.key << endl; } - Future(Future&& rhs) noexcept(true) : sav(rhs.sav) { + Future(Future&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; //if (sav->endpoint.isValid()) cout << "Future moved for " << sav->endpoint.key << endl; } @@ -658,7 +658,7 @@ public: if (sav) sav->delFutureRef(); sav = rhs.sav; } - void operator=(Future&& rhs) noexcept(true) { + void operator=(Future&& rhs) BOOST_NOEXCEPT { if (sav != rhs.sav) { if (sav) sav->delFutureRef(); sav = rhs.sav; @@ -734,7 +734,7 @@ public: bool isValid() const { return sav != NULL; } Promise() : sav(new SAV(0, 1)) {} Promise(const Promise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); } - Promise(Promise&& rhs) noexcept(true) : sav(rhs.sav) { rhs.sav = 0; } + Promise(Promise&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; } ~Promise() { if (sav) sav->delPromiseRef(); } void operator=(const Promise& rhs) { @@ -742,7 +742,7 @@ public: if (sav) sav->delPromiseRef(); sav = rhs.sav; } - void operator=(Promise && rhs) noexcept(true) { + void operator=(Promise && rhs) BOOST_NOEXCEPT { if (sav != rhs.sav) { if (sav) sav->delPromiseRef(); sav = rhs.sav; @@ -787,14 +787,14 @@ public: } FutureStream() : queue(NULL) {} FutureStream(const FutureStream& rhs) : queue(rhs.queue) { queue->addFutureRef(); } - FutureStream(FutureStream&& rhs) noexcept(true) : queue(rhs.queue) { rhs.queue = 0; } + FutureStream(FutureStream&& rhs) BOOST_NOEXCEPT : queue(rhs.queue) { rhs.queue = 0; } ~FutureStream() { if (queue) queue->delFutureRef(); } void operator=(const FutureStream& rhs) { rhs.queue->addFutureRef(); if (queue) queue->delFutureRef(); queue = rhs.queue; } - void operator=(FutureStream&& rhs) noexcept(true) { + void operator=(FutureStream&& rhs) BOOST_NOEXCEPT { if (rhs.queue != queue) { if (queue) queue->delFutureRef(); queue = rhs.queue; @@ -888,13 +888,13 @@ public: FutureStream getFuture() const { queue->addFutureRef(); return FutureStream(queue); } PromiseStream() : queue(new NotifiedQueue(0, 1)) {} PromiseStream(const PromiseStream& rhs) : queue(rhs.queue) { queue->addPromiseRef(); } - PromiseStream(PromiseStream&& rhs) noexcept(true) : queue(rhs.queue) { rhs.queue = 0; } + PromiseStream(PromiseStream&& rhs) BOOST_NOEXCEPT : queue(rhs.queue) { rhs.queue = 0; } void operator=(const PromiseStream& rhs) { rhs.queue->addPromiseRef(); if (queue) queue->delPromiseRef(); queue = rhs.queue; } - void operator=(PromiseStream&& rhs) noexcept(true) { + void operator=(PromiseStream&& rhs) BOOST_NOEXCEPT { if (queue != rhs.queue) { if (queue) queue->delPromiseRef(); queue = rhs.queue; diff --git a/flow/genericactors.actor.h b/flow/genericactors.actor.h index 4b1d6abbc6..d32c4da1f5 100644 --- a/flow/genericactors.actor.h +++ b/flow/genericactors.actor.h @@ -1199,7 +1199,7 @@ struct FlowLock : NonCopyable, public ReferenceCounted { int remaining; Releaser() : lock(0), remaining(0) {} Releaser( FlowLock& lock, int64_t amount = 1 ) : lock(&lock), remaining(amount) {} - Releaser(Releaser&& r) noexcept(true) : lock(r.lock), remaining(r.remaining) { r.remaining = 0; } + Releaser(Releaser&& r) BOOST_NOEXCEPT : lock(r.lock), remaining(r.remaining) { r.remaining = 0; } void operator=(Releaser&& r) { if (remaining) lock->release(remaining); lock = r.lock; remaining = r.remaining; r.remaining = 0; } void release( int64_t amount = -1 ) { @@ -1437,7 +1437,7 @@ public: futures = f.futures; } - AndFuture(AndFuture&& f) noexcept(true) { + AndFuture(AndFuture&& f) BOOST_NOEXCEPT { futures = std::move(f.futures); } @@ -1457,7 +1457,7 @@ public: futures = f.futures; } - void operator=(AndFuture&& f) noexcept(true) { + void operator=(AndFuture&& f) BOOST_NOEXCEPT { futures = std::move(f.futures); } From f699f85e30c3811c74a4bfe69e8c722e0b12c813 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Tue, 5 Mar 2019 14:25:44 -0800 Subject: [PATCH 30/71] boost: Update README and Dockerfile to use 1.67 --- README.md | 2 +- build/Dockerfile | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 851f374646..fd73723540 100755 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ become the only build system available. 1. Check out this repo on your Mac. 1. Install the Xcode command-line tools. -1. Download version 1.52 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.52.0/). +1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/). 1. Set the `BOOSTDIR` environment variable to the location containing this boost installation. 1. Install [Mono](http://www.mono-project.com/download/stable/). 1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8. diff --git a/build/Dockerfile b/build/Dockerfile index f62766237f..eb276548c5 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -10,12 +10,11 @@ RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R USER fdb # wget of bintray without forcing UTF-8 encoding results in 403 Forbidden -RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 &&\ +RUN cd /opt/ &&\ wget --local-encoding=UTF-8 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 &&\ echo '2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2' | sha256sum -c - &&\ - tar -xjf boost_1_52_0.tar.bz2 &&\ tar -xjf boost_1_67_0.tar.bz2 &&\ - rm boost_1_52_0.tar.bz2 boost_1_67_0.tar.bz2 + rm boost_1_67_0.tar.bz2 USER root From fd34626009d88eda25dd2e252ae10e8a6d9abfa2 Mon Sep 17 00:00:00 2001 From: Vishesh Yadav Date: Tue, 5 Mar 2019 17:08:39 -0800 Subject: [PATCH 31/71] boost: Remove log from Makefile and version check in flow.h --- Makefile | 1 - flow/flow.h | 6 ------ 2 files changed, 7 deletions(-) diff --git a/Makefile b/Makefile index 2cecf8104e..bf3cd68235 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,6 @@ else $(error Not prepared to compile on platform $(PLATFORM)) endif BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME} -$(info BOOSTDIR is ${BOOSTDIR}) CCACHE := $(shell which ccache) ifneq ($(CCACHE),) diff --git a/flow/flow.h b/flow/flow.h index 94a67b0dcc..3108ac9d3b 100644 --- a/flow/flow.h +++ b/flow/flow.h @@ -47,12 +47,6 @@ #include -#if BOOST_VERSION == 105200 -#error Boost is still 1.52.0 -#elif BOOST_VERSION != 106700 -#error Boost is not 1.67.0 -#endif - using namespace std::rel_ops; #define TEST( condition ) if (!(condition)); else { static TraceEvent* __test = &(TraceEvent("CodeCoverage").detail("File", __FILE__).detail("Line",__LINE__).detail("Condition", #condition)); } From 30b914c2a501529c35216c0381351659f0fb0eec Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 6 Mar 2019 15:38:13 +0100 Subject: [PATCH 32/71] Add instruction to install Python for CMake/Windows I followed the instructions to build with CMake on Windows, but cmake complains about missing Python interpreter. Installation Python 2.7.x solved the issue. ``` C:\Data\Git\GitHub\foundationdb\build>cmake -G "Visual Studio 15 2017 Win64" -DBOOST_ROOT=C:\Users\chevalier\Downloads\boost_1_67_0\boost_1_67_0 c:\data\git\github\foundationdb -- C:/Data/Git/GitHub/foundationdb C:/Data/Git/GitHub/foundationdb/build -- Could NOT find LibreSSL, try to set the path to LibreSSL root folder in the system variable LibreSSL_ROOT (missing: LIBRESSL_CRYPTO_LIBRARY LIBRESSL_SSL_LIBRARY LIBRESSL_TLS_LIBRARY LIBRESSL_INCLUDE_DIR) -- LibreSSL NOT Found - Will compile without TLS Support -- You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it -- Found JNI: C:/Program Files/Java/jdk-11.0.2/lib/jawt.lib (Required is at least version "1.8") -- Found Java: C:/Program Files/Java/jdk-11.0.2/bin/java.exe (found suitable version "11.0.2", minimum required is "1.8") found components: Development -- Could NOT find Python (missing: Python_EXECUTABLE Interpreter) CMake Error at cmake/FDBComponents.cmake:46 (message): Could not found a suitable python interpreter Call Stack (most recent call first): CMakeLists.txt:60 (include) -- Configuring incomplete, errors occurred! See also "C:/Data/Git/GitHub/foundationdb/build/CMakeFiles/CMakeOutput.log". See also "C:/Data/Git/GitHub/foundationdb/build/CMakeFiles/CMakeError.log". ``` After installing python ``` ... -- Found Python: C:/Python27/python.exe (found version "2.7.16") found components: Interpreter ... -- ========================================= -- Components Build Overview -- ========================================= -- Build Java Bindings: ON -- Build with TLS support: OFF -- Build Go bindings: OFF -- Build Ruby bindings: OFF -- Build Python sdist (make package): ON -- Build Documentation (make html): OFF -- ========================================= -- CPACK_COMPONENTS_ALL -- Configuring done -- Generating done -- Build files have been written to: C:/Data/Git/GitHub/foundationdb/build ``` --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index fd73723540..ce5a4f70cc 100755 --- a/README.md +++ b/README.md @@ -192,6 +192,7 @@ that Visual Studio is used to compile. 1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8. 1. Set `JAVA_HOME` to the unpacked location and JAVA_COMPILE to `$JAVA_HOME/bin/javac`. +1. Install [Python](https://www.python.org/downloads/). Required to build the Python binding. 1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio won't build the Windows installer. 1. Create a build directory (you can have the build directory anywhere you From eba77bc10c0d2f34e32daa068d520615ec83b328 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 6 Mar 2019 20:50:46 +0100 Subject: [PATCH 33/71] Update README.md Update wording of python requirements --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ce5a4f70cc..aca443f780 100755 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ that Visual Studio is used to compile. 1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8. 1. Set `JAVA_HOME` to the unpacked location and JAVA_COMPILE to `$JAVA_HOME/bin/javac`. -1. Install [Python](https://www.python.org/downloads/). Required to build the Python binding. +1. Install [Python](https://www.python.org/downloads/) if it is not already installed by Visual Studio. 1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio won't build the Windows installer. 1. Create a build directory (you can have the build directory anywhere you From 4a4af6fb2c0d6adbf886858509da1fec362b35f5 Mon Sep 17 00:00:00 2001 From: mpilman Date: Tue, 5 Mar 2019 16:06:46 -0800 Subject: [PATCH 34/71] Make cmake build fail if old build system was used This changes makes a cmake build check for an existing versions.h file in the source directory before it builds anything else. If it finds it it will fail the build. This is to prevent confusion when someone tries to use cmake on a source directory where the old build system was used before (as this is not supported). --- cmake/AssertFileDoesntExist.cmake | 11 +++++++++++ cmake/FlowCommands.cmake | 19 +++++++++++++++++++ fdbmonitor/CMakeLists.txt | 1 + 3 files changed, 31 insertions(+) create mode 100644 cmake/AssertFileDoesntExist.cmake diff --git a/cmake/AssertFileDoesntExist.cmake b/cmake/AssertFileDoesntExist.cmake new file mode 100644 index 0000000000..ac2ac2bc62 --- /dev/null +++ b/cmake/AssertFileDoesntExist.cmake @@ -0,0 +1,11 @@ +set(error_msg + ${CMAKE_SOURCE_DIR}/versions.h exists. This usually means that + you did run `make` "(the old build system)" in this directory before. + This can result in unexpected behavior. run `make clean` in the + source directory to continue) +if(EXISTS "${FILE}") + list(JOIN error_msg " " err) + message(FATAL_ERROR "${err}") +else() + message(STATUS "${FILE} does not exist") +endif() diff --git a/cmake/FlowCommands.cmake b/cmake/FlowCommands.cmake index f32b2bc5d8..7e9e444e1a 100644 --- a/cmake/FlowCommands.cmake +++ b/cmake/FlowCommands.cmake @@ -68,6 +68,24 @@ function(generate_coverage_xml) add_dependencies(${target_name} coverage_${target_name}) endfunction() +# This function asserts that `versions.h` does not exist in the source +# directory. It does this in the prebuild phase of the target. +# This is an ugly hack that should make sure that cmake isn't used with +# a source directory in which FDB was previously built with `make`. +function(assert_no_version_h target) + + message(STATUS "Check versions.h on ${target}") + set(target_name "${target}_versions_h_check") + add_custom_target("${target_name}" + COMMAND "${CMAKE_COMMAND}" -DFILE="${CMAKE_SOURCE_DIR}/versions.h" + -P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake" + COMMAND echo + "${CMAKE_COMMAND}" -P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake" + -DFILE="${CMAKE_SOURCE_DIR}/versions.h" + COMMENT "Check old build system wasn't used in source dir") + add_dependencies(${target} ${target_name}) +endfunction() + function(add_flow_target) set(options EXECUTABLE STATIC_LIBRARY DYNAMIC_LIBRARY) @@ -138,6 +156,7 @@ function(add_flow_target) add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files}) add_dependencies(${AFT_NAME} ${AFT_NAME}_actors) + assert_no_version_h(${AFT_NAME}_actors) generate_coverage_xml(${AFT_NAME}) endif() target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/fdbmonitor/CMakeLists.txt b/fdbmonitor/CMakeLists.txt index 98a96b1cf6..8d126ba7f0 100644 --- a/fdbmonitor/CMakeLists.txt +++ b/fdbmonitor/CMakeLists.txt @@ -1,6 +1,7 @@ set(FDBMONITOR_SRCS ConvertUTF.h SimpleIni.h fdbmonitor.cpp) add_executable(fdbmonitor ${FDBMONITOR_SRCS}) +assert_no_version_h(fdbmonitor) if(UNIX AND NOT APPLE) target_link_libraries(fdbmonitor rt) endif() From 845f8fdcbc0f5fc77a5fc3b28fd774d871159105 Mon Sep 17 00:00:00 2001 From: Meng Xu Date: Wed, 6 Mar 2019 15:05:21 -0800 Subject: [PATCH 35/71] Status:healthy: Add optimizing_team_collections Change removing_redundant_teams status name to optimizing_team_collections. The new name is more general and can be applied in the future when we switch storage engines. --- documentation/sphinx/source/mr-status.rst | 2 +- fdbclient/Schemas.cpp | 4 ++-- fdbserver/Status.actor.cpp | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/documentation/sphinx/source/mr-status.rst b/documentation/sphinx/source/mr-status.rst index 772db55f2e..61d2103b16 100644 --- a/documentation/sphinx/source/mr-status.rst +++ b/documentation/sphinx/source/mr-status.rst @@ -127,7 +127,7 @@ The following format informally describes the JSON containing the status data. T "name": < "initializing" | "missing_data" | "healing" - | "removing_redundant_teams" + | "optimizing_team_collections" | "healthy_repartitioning" | "healthy_removing_server" | "healthy_rebalancing" diff --git a/fdbclient/Schemas.cpp b/fdbclient/Schemas.cpp index 08efea0317..769d7e8d25 100644 --- a/fdbclient/Schemas.cpp +++ b/fdbclient/Schemas.cpp @@ -521,7 +521,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "initializing", "missing_data", "healing", - "removing_redundant_teams", + "optimizing_team_collections", "healthy_repartitioning", "healthy_removing_server", "healthy_rebalancing", @@ -554,7 +554,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema( "initializing", "missing_data", "healing", - "removing_redundant_teams", + "optimizing_team_collections", "healthy_repartitioning", "healthy_removing_server", "healthy_rebalancing", diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index 09cb2cfdf3..0b48d7fcf2 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -1204,8 +1204,8 @@ ACTOR static Future dataStatusFetcher(std::pair= PRIORITY_TEAM_REDUNDANT) { stateSectionObj["healthy"] = true; - stateSectionObj["name"] = "removing_redundant_teams"; - stateSectionObj["description"] = "Removing redundant machine teams"; + stateSectionObj["name"] = "optimizing_team_collections"; + stateSectionObj["description"] = "Optimizing team collections"; } else if (highestPriority >= PRIORITY_MERGE_SHARD) { stateSectionObj["healthy"] = true; From 2fbc7522e4c75168b156eccdc4f5e75bc99ed237 Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Wed, 6 Mar 2019 18:25:02 -0500 Subject: [PATCH 36/71] review feedback: fix bindingtester test, add comments to versionstamp and other structures, handle nested tuples, handle prefix []byte in PackWithVersionstamp --- bindings/go/src/_stacktester/stacktester.go | 17 +++-- bindings/go/src/fdb/tuple/tuple.go | 84 ++++++++++++++++----- 2 files changed, 74 insertions(+), 27 deletions(-) diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index b2012546e7..0c925af503 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -105,7 +105,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) { switch el := ret.item.(type) { case []byte: ret.item = el - case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple: + case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple, tuple.Versionstamp: ret.item = el case fdb.Key: ret.item = []byte(el) @@ -662,20 +662,21 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) { t = append(t, sm.waitAndPop().item) } sm.store(idx, []byte(t.Pack())) - case op == "TUPLE_PACK_VERSIONSTAMP": + case op == "TUPLE_PACK_WITH_VERSIONSTAMP": var t tuple.Tuple - count := sm.waitAndPop().item.(int64) - for i := 0; i < int(count); i++ { + + prefix := sm.waitAndPop().item.([]byte) + c := sm.waitAndPop().item.(int64) + for i := 0; i < int(c); i++ { t = append(t, sm.waitAndPop().item) } - incomplete, err := t.HasIncompleteVersionstamp() - if incomplete == false { + packed, err := t.PackWithVersionstamp(prefix) + if err != nil && strings.Contains(err.Error(), "No incomplete") { sm.store(idx, []byte("ERROR: NONE")) } else if err != nil { sm.store(idx, []byte("ERROR: MULTIPLE")) } else { - packed := t.Pack() sm.store(idx, []byte("OK")) sm.store(idx, packed) } @@ -911,7 +912,7 @@ func main() { log.Fatal("API version not equal to value selected") } - db, e = fdb.OpenDatabase(clusterFile) + db, e = fdb.Open(clusterFile, []byte("DB")) if e != nil { log.Fatal(e) } diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index 2c30705ba0..d3f289e2eb 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -73,7 +73,10 @@ type Tuple []TupleElement // an instance of this type. type UUID [16]byte -// Versionstamp . +// Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are +// 12 bytes long composed of a 10 byte transaction version and a 2 byte user +// version. The transaction version is filled in at commit time and the user +// version is provided by your layer during a transaction. type Versionstamp struct { TransactionVersion [10]byte UserVersion uint16 @@ -83,7 +86,8 @@ var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, const versionstampLength = 12 -// IncompleteVersionstamp . +// IncompleteVersionstamp is the constructor you should use to make +// an incomplete versionstamp to use in a tuple. func IncompleteVersionstamp(userVersion uint16) Versionstamp { return Versionstamp{ TransactionVersion: incompleteTransactionVersion, @@ -91,16 +95,14 @@ func IncompleteVersionstamp(userVersion uint16) Versionstamp { } } -// Bytes . +// Bytes converts a Versionstamp struct to a byte slice for encoding in a tuple. func (v Versionstamp) Bytes() []byte { - var scratch [12]byte + var scratch [versionstampLength]byte copy(scratch[:], v.TransactionVersion[:]) binary.BigEndian.PutUint16(scratch[10:], v.UserVersion) - fmt.Println(scratch) - return scratch[:] } @@ -293,8 +295,8 @@ func (p *packer) encodeUUID(u UUID) { func (p *packer) encodeVersionstamp(v Versionstamp) { p.putByte(versionstampCode) - if p.versionstampPos != 0 && v.TransactionVersion == incompleteTransactionVersion { - panic(fmt.Sprintf("Tuple can only contain one unbound versionstamp")) + if p.versionstampPos != -1 && v.TransactionVersion == incompleteTransactionVersion { + panic(fmt.Sprintf("Tuple can only contain one incomplete versionstamp")) } else { p.versionstampPos = int32(len(p.buf)) } @@ -368,49 +370,93 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { // Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to // call Pack when using a Tuple with a FoundationDB API function that requires a // key. +// +// This method will panic if it contains an incomplete Versionstamp. Use +// PackWithVersionstamp instead. +// func (t Tuple) Pack() []byte { p := newPacker() p.encodeTuple(t, false) return p.buf } -// PackWithVersionstamp packs the specified tuple into a key for versionstamp operations -func (t Tuple) PackWithVersionstamp() ([]byte, error) { +// PackWithVersionstamp packs the specified tuple into a key for versionstamp +// operations. See Pack for more information. This function will return an error +// if you attempt to pack a tuple with more than one versionstamp. This function will +// return an error if you attempt to pack a tuple with a versionstamp position larger +// than an uint16 on apiVersion < 520. +func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) { hasVersionstamp, err := t.HasIncompleteVersionstamp() if err != nil { return nil, err } + if hasVersionstamp == false { + return nil, errors.New("No incomplete versionstamp included in tuple pack with versionstamp") + } + p := newPacker() + + prefixLength := int32(0) + if prefix != nil { + prefixLength = int32(len(prefix)) + p.putBytes(prefix) + } + p.encodeTuple(t, false) if hasVersionstamp { var scratch [4]byte - binary.LittleEndian.PutUint32(scratch[:], uint32(p.versionstampPos)) - p.putBytes(scratch[:]) + var offsetIndex int + + apiVersion := fdb.MustGetAPIVersion() + if apiVersion < 520 { + if p.versionstampPos > math.MaxUint16 { + return nil, errors.New("Versionstamp position too large") + } + + offsetIndex = 1 + binary.LittleEndian.PutUint16(scratch[:], uint16(prefixLength+p.versionstampPos)) + } else { + offsetIndex = 3 + binary.LittleEndian.PutUint32(scratch[:], uint32(prefixLength+p.versionstampPos)) + } + + p.putBytes(scratch[0:offsetIndex]) } return p.buf, nil } -// HasIncompleteVersionstamp determines if there is at least one incomplete versionstamp in a tuple +// HasIncompleteVersionstamp determines if there is at least one incomplete +// versionstamp in a tuple. This function will return an error this tuple has +// more than one versionstamp. func (t Tuple) HasIncompleteVersionstamp() (bool, error) { + incompleteCount := t.countIncompleteVersionstamps() + + var err error + if incompleteCount > 1 { + err = errors.New("Tuple can only contain one incomplete versionstamp") + } + + return incompleteCount == 1, err +} + +func (t Tuple) countIncompleteVersionstamps() int { incompleteCount := 0 + for _, el := range t { switch e := el.(type) { case Versionstamp: if e.TransactionVersion == incompleteTransactionVersion { incompleteCount++ } + case Tuple: + incompleteCount += e.countIncompleteVersionstamps() } } - var err error - if incompleteCount > 1 { - err = errors.New("Tuple can only contain one unbound versionstamp") - } - - return incompleteCount == 1, err + return incompleteCount } func findTerminator(b []byte) int { From 77f7c0721f2ad93f5d4f03a7da49220987fc3c84 Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Wed, 6 Mar 2019 18:27:43 -0500 Subject: [PATCH 37/71] revent this again because my environment is dumb --- bindings/go/src/_stacktester/stacktester.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/go/src/_stacktester/stacktester.go b/bindings/go/src/_stacktester/stacktester.go index 0c925af503..d2dc1bcbe3 100644 --- a/bindings/go/src/_stacktester/stacktester.go +++ b/bindings/go/src/_stacktester/stacktester.go @@ -912,7 +912,7 @@ func main() { log.Fatal("API version not equal to value selected") } - db, e = fdb.Open(clusterFile, []byte("DB")) + db, e = fdb.OpenDatabase(clusterFile) if e != nil { log.Fatal(e) } From 1464bceecee8cb4185563fbde4c87cba5bd8cb7a Mon Sep 17 00:00:00 2001 From: Meng Xu Date: Wed, 6 Mar 2019 17:02:24 -0800 Subject: [PATCH 38/71] ReleaseNote: Correct the format Make the improved replication mechanism feature as bullet instead of a paragraph. --- documentation/sphinx/source/release-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/sphinx/source/release-notes.rst b/documentation/sphinx/source/release-notes.rst index fa83fc2875..0bdcc4360f 100644 --- a/documentation/sphinx/source/release-notes.rst +++ b/documentation/sphinx/source/release-notes.rst @@ -7,7 +7,7 @@ Release Notes Features -------- -Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) `. +* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) `. * Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) `_ From 43c290b5461de30af1d8f33edaeac8c6816df723 Mon Sep 17 00:00:00 2001 From: Balachandar Namasivayam Date: Wed, 6 Mar 2019 17:39:29 -0800 Subject: [PATCH 39/71] Address review comments. --- fdbclient/FileBackupAgent.actor.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fdbclient/FileBackupAgent.actor.cpp b/fdbclient/FileBackupAgent.actor.cpp index 05b96017ae..6ef06cb4ef 100644 --- a/fdbclient/FileBackupAgent.actor.cpp +++ b/fdbclient/FileBackupAgent.actor.cpp @@ -172,7 +172,11 @@ public: return configSpace.pack(LiteralStringRef(__FUNCTION__)); } - ACTOR static Future> getRestoreRangesOrDefault(RestoreConfig *self, Reference tr) { + Future> getRestoreRangesOrDefault(Reference tr) { + return getRestoreRangesOrDefault_impl(this, tr); + } + + ACTOR static Future> getRestoreRangesOrDefault_impl(RestoreConfig *self, Reference tr) { state std::vector ranges = wait(self->restoreRanges().getD(tr)); if (ranges.empty()) { state KeyRange range = wait(self->restoreRange().getD(tr)); @@ -378,7 +382,7 @@ ACTOR Future RestoreConfig::getFullStatus_impl(RestoreConfig restor tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); - state Future> ranges = RestoreConfig::getRestoreRangesOrDefault(&restore, tr); + state Future> ranges = restore.getRestoreRangesOrDefault(tr); state Future addPrefix = restore.addPrefix().getD(tr); state Future removePrefix = restore.removePrefix().getD(tr); state Future url = restore.sourceContainerURL().getD(tr); @@ -2538,7 +2542,7 @@ namespace fileBackup { tr->setOption(FDBTransactionOptions::LOCK_AWARE); bc = restore.sourceContainer().getOrThrow(tr); - restoreRanges = RestoreConfig::getRestoreRangesOrDefault(&restore, tr); + restoreRanges = restore.getRestoreRangesOrDefault(tr); addPrefix = restore.addPrefix().getD(tr); removePrefix = restore.removePrefix().getD(tr); From 83e4b966d5185fa4f3e35efc5936e0ce9c1a9457 Mon Sep 17 00:00:00 2001 From: Alec Grieser Date: Wed, 6 Mar 2019 18:34:36 -0800 Subject: [PATCH 40/71] Resolves #1235: Java: FDBExceptions are created successful operation completion (#1236) The native function `NativeFuture::Future_getError` now returns `null` when the error code is 0 instead of an `FDBException` with a "Success" message and an error code of 0. This was only used in two places within the codebase; those two places now check for `null` errors and treats them like successes. --- bindings/java/fdbJNI.cpp | 6 +++++- .../java/src/main/com/apple/foundationdb/FutureResults.java | 2 +- .../java/src/main/com/apple/foundationdb/FutureVoid.java | 2 +- documentation/sphinx/source/release-notes.rst | 2 ++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/bindings/java/fdbJNI.cpp b/bindings/java/fdbJNI.cpp index d93c34b0f0..3fac730bdb 100644 --- a/bindings/java/fdbJNI.cpp +++ b/bindings/java/fdbJNI.cpp @@ -206,7 +206,11 @@ JNIEXPORT jthrowable JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1ge return JNI_NULL; } FDBFuture *sav = (FDBFuture *)future; - return getThrowable( jenv, fdb_future_get_error( sav ) ); + fdb_error_t err = fdb_future_get_error( sav ); + if( err ) + return getThrowable( jenv, err ); + else + return JNI_NULL; } JNIEXPORT jboolean JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1isReady(JNIEnv *jenv, jobject, jlong future) { diff --git a/bindings/java/src/main/com/apple/foundationdb/FutureResults.java b/bindings/java/src/main/com/apple/foundationdb/FutureResults.java index a710fca8a4..4900d45dc2 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FutureResults.java +++ b/bindings/java/src/main/com/apple/foundationdb/FutureResults.java @@ -37,7 +37,7 @@ class FutureResults extends NativeFuture { protected RangeResultInfo getIfDone_internal(long cPtr) throws FDBException { FDBException err = Future_getError(cPtr); - if(!err.isSuccess()) { + if(err != null && !err.isSuccess()) { throw err; } diff --git a/bindings/java/src/main/com/apple/foundationdb/FutureVoid.java b/bindings/java/src/main/com/apple/foundationdb/FutureVoid.java index c5f7b6eb36..86ba565ea9 100644 --- a/bindings/java/src/main/com/apple/foundationdb/FutureVoid.java +++ b/bindings/java/src/main/com/apple/foundationdb/FutureVoid.java @@ -34,7 +34,7 @@ class FutureVoid extends NativeFuture { // with a get on the error and throw if the error is not success. FDBException err = Future_getError(cPtr); - if(!err.isSuccess()) { + if(err != null && !err.isSuccess()) { throw err; } return null; diff --git a/documentation/sphinx/source/release-notes.rst b/documentation/sphinx/source/release-notes.rst index e6cda1a0b3..112615bd0e 100644 --- a/documentation/sphinx/source/release-notes.rst +++ b/documentation/sphinx/source/release-notes.rst @@ -20,6 +20,8 @@ Improved replication mechanism, a new hierarchical replication technique that fu Performance ----------- +* Java: Succesful commits and range reads no longer create ``FDBException`` objects to reduce memory pressure. `(Issue #1235) `_ + Fixes ----- From 27d199409ee09fa096205faec9f209b75098d337 Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Wed, 6 Mar 2019 17:31:13 -0800 Subject: [PATCH 41/71] Add KillRegion.actor.cpp workload to cmake --- fdbserver/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index 512cfed532..9e630d4c7c 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -130,6 +130,7 @@ set(FDBSERVER_SRCS workloads/IndexScan.actor.cpp workloads/Inventory.actor.cpp workloads/KVStoreTest.actor.cpp + workloads/KillRegion.actor.cpp workloads/LockDatabase.actor.cpp workloads/LogMetrics.actor.cpp workloads/LowLatency.actor.cpp From 2537f26de6b519a29e35408d4697d025334d64f6 Mon Sep 17 00:00:00 2001 From: mpilman Date: Wed, 27 Feb 2019 20:17:11 -0800 Subject: [PATCH 42/71] First implementaion of more user-friendly cpack Up unto here this code is only very rudiemantery tested. This is a firest attempt of making cpack more user-friendly. The basic idea is to generate a component for package type so that we can have different paths depending on whether we build an RPM, a DEB, a TGZ, or a MacOS installer. The cpack package config file will then chose the correct components to use. In a later point this should make it possible to build these with `make packages` and the ugly iteration with calling cmake between each package would be obsolete. While this solution is a bit more bloated, it is also much more flexible and it will be much easier to use. Another benefit is, that this will get rid of all warnings during a cpack run --- bindings/c/CMakeLists.txt | 28 ++- cmake/CPackConfig.cmake | 17 ++ cmake/InstallLayout.cmake | 414 ++++++++++++++++++++------------------ fdbbackup/CMakeLists.txt | 34 ++-- fdbcli/CMakeLists.txt | 4 +- fdbmonitor/CMakeLists.txt | 4 +- fdbserver/CMakeLists.txt | 4 +- fdbservice/CMakeLists.txt | 4 - 8 files changed, 274 insertions(+), 235 deletions(-) create mode 100644 cmake/CPackConfig.cmake diff --git a/bindings/c/CMakeLists.txt b/bindings/c/CMakeLists.txt index a2420ceeec..8a36c8bb83 100644 --- a/bindings/c/CMakeLists.txt +++ b/bindings/c/CMakeLists.txt @@ -59,18 +59,16 @@ if(NOT WIN32) target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c) endif() -if(NOT OPEN_FOR_IDE) - # TODO: re-enable once the old vcxproj-based build system is removed. - #generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT" - # EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h) - install(TARGETS fdb_c - EXPORT fdbc - DESTINATION ${FDB_LIB_DIR} - COMPONENT clients) - install( - FILES foundationdb/fdb_c.h - ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h - ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options - DESTINATION ${FDB_INCLUDE_INSTALL_DIR}/foundationdb COMPONENT clients) - #install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients) -endif() +# TODO: re-enable once the old vcxproj-based build system is removed. +#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT" +# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h) +fdb_install(TARGETS fdb_c + EXPORT fdbc + DESTINATION lib + COMPONENT clients) +fdb_install( + FILES foundationdb/fdb_c.h + ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h + ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options + DESTINATION include COMPONENT clients) +#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients) diff --git a/cmake/CPackConfig.cmake b/cmake/CPackConfig.cmake new file mode 100644 index 0000000000..212177e2c8 --- /dev/null +++ b/cmake/CPackConfig.cmake @@ -0,0 +1,17 @@ +# RPM specifics +if(CPACK_GENERATOR MATCHES "RPM") + set(CPACK_COMPONENTS_ALL clients-el6 clients-el7 server-el6 server-el7) +elseif(CPACK_GENERATOR MATCHES "DEB") + set(CPACK_COMPONENTS_ALL clients-deb server-deb) +elseif(CPACK_GENERATOR MATCHES "PackageMaker") + set(CPACK_COMPONENTS_ALL clients-pm server-pm) + set(CPACK_STRIP_FILES TRUE) + set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) + set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall) + set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) +elseif(CPACK_GENERATOR MATCHES "TGZ") + set(CPACK_STRIP_FILES TRUE) + set(CPACK_COMPONENTS_ALL clients-tgz server-tgz) +else() + message(FATAL_ERROR "Unsupported package format ${CPACK_GENERATOR}") +endif() diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index e186ebc2bf..53e2b85ace 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -2,11 +2,11 @@ # Helper Functions ################################################################################ -function(install_symlink) +function(install_symlink_impl) if (NOT WIN32) set(options "") - set(one_value_options COMPONENT TO DESTINATION) - set(multi_value_options) + set(one_value_options TO DESTINATION) + set(multi_value_options COMPONENTS) cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}") file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks) @@ -14,95 +14,132 @@ function(install_symlink) get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY) set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname}) execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl}) - install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT}) + foreach(component IN LISTS SYM_COMPONENTS) + install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${component}) + endforeach() endif() endfunction() -if(NOT INSTALL_LAYOUT) - if(WIN32) - set(DEFAULT_INSTALL_LAYOUT "WIN") - else() - set(DEFAULT_INSTALL_LAYOUT "STANDALONE") +function(install_symlink) + if(NOT WIN32 AND NOT OPEN_FOR_IDE) + set(options "") + set(one_value_options COMPONENT LINK_DIR FILE_DIR LINK_NAME FILE_NAME) + set(multi_value_options "") + cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}") + + string(REGEX MATCHALL "\\/" slashes "${IN_LINK_NAME}") + list(LENGTH slashes num_link_subdirs) + foreach(i RANGE 1 ${num_link_subdirs}) + set(rel_path "../${rel_path}") + endforeach() + if("${IN_FILE_DIR}" MATCHES "bin") + if("${IN_LINK_DIR}" MATCHES "lib") + install_symlink_impl( + TO "../${rel_path}/bin/${IN_FILE_NAME}" + DESTINATION "lib/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-tgz") + install_symlink_impl( + TO "../${rel_path}/bin/${IN_FILE_NAME}" + DESTINATION "usr/lib64/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-el6" + "${IN_COMPONENT}-el7" + "${IN_COMPONENT}-deb") + install_symlink_impl( + TO "../${rel_path}/bin/${IN_FILE_NAME}" + DESTINATION "usr/lib64/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-deb") + elseif("${IN_LINK_DIR}" MATCHES "bin") + install_symlink_impl( + TO "../${rel_path}/bin/${IN_FILE_NAME}" + DESTINATION "bin/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-tgz") + install_symlink_impl( + TO "../${rel_path}/bin/${IN_FILE_NAME}" + DESTINATION "usr/bin/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-el6" + "${IN_COMPONENT}-el7" + "${IN_COMPONENT}-deb") + else() + message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}") + endif() + else() + message(FATAL_ERROR "Unknown FILE_DIR ${IN_FILE_DIR}") + endif() endif() -endif() -set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}" - CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX") +endfunction() -set(DIR_LAYOUT ${INSTALL_LAYOUT}) -if(DIR_LAYOUT MATCHES "TARGZ") - set(DIR_LAYOUT "STANDALONE") +function(fdb_install) + if(NOT WIN32 AND NOT OPEN_FOR_IDE) + set(options EXPORT) + set(one_value_options COMPONENT DESTINATION) + set(multi_value_options TARGETS FILES DIRECTORY) + cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}") + + if(IN_TARGETS) + set(args TARGETS ${IN_TARGETS}) + elseif(IN_FILES) + set(args FILES ${IN_FILES}) + elseif(IN_DIRECTORY) + set(args DIRECTORY ${IN_DIRECTORY}) + else() + message(FATAL_ERROR "Expected FILES or TARGETS") + endif() + if(IN_EXPORT) + set(args EXPORT) + endif() + if("${IN_DESTINATION}" MATCHES "bin") + install(${args} DESTINATION "bin" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/bin" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" MATCHES "sbin") + install(${args} DESTINATION "sbin" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" MATCHES "libexec") + install(${args} DESTINATION "libexec" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/lib/foundationdb" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" MATCHES "include") + install(${args} DESTINATION "include" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/include" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" MATCHES "etc") + install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/etc/foundationdb" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" MATCHES "log") + install(${args} DESTINATION "log/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el7") + elseif("${IN_DESTINATION}" MATCHES "data") + install(${args} DESTINATION "lib/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") + install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el7") + endif() + endif() +endfunction() + +if(APPLE) + set(CPACK_GENERATOR TGZ PackageMaker) +else() + set(CPACK_GENERATOR RPM DEB TGZ) endif() -get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS) set(CPACK_PACKAGE_CHECKSUM SHA256) - -set(FDB_CONFIG_DIR "etc/foundationdb") -if("${LIB64}" STREQUAL "TRUE") - set(LIBSUFFIX 64) -else() - set(LIBSUFFIX "") -endif() -set(FDB_LIB_NOSUFFIX "lib") -if(DIR_LAYOUT MATCHES "STANDALONE") - set(FDB_LIB_DIR "lib${LIBSUFFIX}") - set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}") - set(FDB_BIN_DIR "bin") - set(FDB_SBIN_DIR "sbin") - set(FDB_INCLUDE_INSTALL_DIR "include") - set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb") - set(FDB_SHARE_DIR "share") -elseif(DIR_LAYOUT MATCHES "WIN") - set(CPACK_GENERATOR "ZIP") - set(FDB_CONFIG_DIR "etc") - set(FDB_LIB_DIR "lib") - set(FDB_LIB_NOSUFFIX "lib") - set(FDB_LIBEXEC_DIR "bin") - set(FDB_SHARE_DIR "share") - set(FDB_BIN_DIR "bin") - set(FDB_SBIN_DIR "bin") - set(FDB_INCLUDE_INSTALL_DIR "include") - set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb") - set(FDB_SHARE_DIR "share") -elseif(DIR_LAYOUT MATCHES "OSX") - set(CPACK_GENERATOR productbuild) - set(CPACK_PACKAGING_INSTALL_PREFIX "/") - set(FDB_CONFIG_DIR "usr/local/etc/foundationdb") - set(FDB_LIB_DIR "usr/local/lib") - set(FDB_LIB_NOSUFFIX "usr/local/lib") - set(FDB_LIBEXEC_DIR "usr/local/libexec") - set(FDB_BIN_DIR "usr/local/bin") - set(FDB_SBIN_DIR "usr/local/libexec") - set(FDB_INCLUDE_INSTALL_DIR "usr/local/include") - set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb") - set(FDB_SHARE_DIR "usr/local/share") -else() - if(DIR_LAYOUT MATCHES "RPM") - set(CPACK_GENERATOR RPM) - else() - # DEB - set(CPACK_GENERATOR "DEB") - set(LIBSUFFIX "") - endif() - set(CMAKE_INSTALL_PREFIX "/") - set(CPACK_PACKAGING_INSTALL_PREFIX "/") - set(FDB_CONFIG_DIR "etc/foundationdb") - set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}") - set(FDB_LIB_NOSUFFIX "usr/lib") - set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR}) - set(FDB_BIN_DIR "usr/bin") - set(FDB_SBIN_DIR "usr/sbin") - set(FDB_INCLUDE_INSTALL_DIR "usr/include") - set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb") - set(FDB_SHARE_DIR "usr/share") -endif() - -if(INSTALL_LAYOUT MATCHES "OSX") - set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIBEXEC_DIR}") -else() - set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIB_NOSUFFIX}/foundationdb") -endif() - +set(CPACK_PROJECT_CONFIG_FILE "${CMAKE_SOURCE_DIR}/cmake/CPackConfig.cmake") ################################################################################ # Version information @@ -146,87 +183,97 @@ endif() # Configuration for RPM ################################################################################ -if(UNIX AND NOT APPLE) - install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server) - install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server) -endif() +file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir") +fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server) +fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION lib COMPONENT server) -if(INSTALL_LAYOUT MATCHES "RPM") - set(CPACK_RPM_server_USER_FILELIST - "%config(noreplace) /etc/foundationdb/foundationdb.conf" - "%attr(0700,foundationdb,foundationdb) /var/log/foundationdb" - "%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb") - set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION - "/usr/sbin" - "/usr/share/java" - "/usr/lib64/python2.7" - "/usr/lib64/python2.7/site-packages" - "/var" - "/var/log" - "/var/lib" - "/lib" - "/lib/systemd" - "/lib/systemd/system" - "/etc/rc.d/init.d") - set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON) - set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON) - set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src) - set(CPACK_RPM_COMPONENT_INSTALL ON) - set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh) - set(CPACK_RPM_clients_POST_INSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh) - set(CPACK_RPM_server_PRE_INSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) - set(CPACK_RPM_server_POST_INSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) - set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) - set(CPACK_RPM_server_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") - set(CPACK_RPM_server_PACKAGE_RE) - #set(CPACK_RPM_java_PACKAGE_REQUIRES - # "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") - set(CPACK_RPM_python_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") -endif() +set(CPACK_RPM_server_USER_FILELIST + "%config(noreplace) /etc/foundationdb/foundationdb.conf" + "%attr(0700,foundationdb,foundationdb) /var/log/foundationdb" + "%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb") +set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION + "/usr/sbin" + "/usr/share/java" + "/usr/lib64/python2.7" + "/usr/lib64/python2.7/site-packages" + "/var" + "/var/log" + "/var/lib" + "/lib" + "/lib/systemd" + "/lib/systemd/system" + "/etc/rc.d/init.d") +set(CPACK_RPM_DEBUGINFO_PACKAGE ON) +set(CPACK_RPM_DEBUGINFO_PACKAGE ON) +set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src) +set(CPACK_RPM_COMPONENT_INSTALL ON) + +set(CPACK_RPM_clients-el6_PRE_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh) +set(CPACK_RPM_clients-el7_PRE_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh) + +set(CPACK_RPM_clients-el6_POST_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh) +set(CPACK_RPM_clients-el7_POST_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh) + +set(CPACK_RPM_server-el6_PRE_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) +set(CPACK_RPM_server-el7_PRE_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) + +set(CPACK_RPM_server-el6_POST_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) +set(CPACK_RPM_server-el7_POST_INSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) + +set(CPACK_RPM_server-el6_PRE_UNINSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) +set(CPACK_RPM_server-el7_PRE_UNINSTALL_SCRIPT_FILE + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) + +set(CPACK_RPM_server-el6_PACKAGE_REQUIRES + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") +set(CPACK_RPM_server-el7_PACKAGE_REQUIRES + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") +#set(CPACK_RPM_java_PACKAGE_REQUIRES +# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") +set(CPACK_RPM_python_PACKAGE_REQUIRES + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") ################################################################################ # Configuration for DEB ################################################################################ -if(INSTALL_LAYOUT MATCHES "DEB") - set(CPACK_DEB_COMPONENT_INSTALL ON) - set(CPACK_DEBIAN_PACKAGE_SECTION "database") - set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON) +set(CPACK_DEB_COMPONENT_INSTALL ON) +set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON) +set(CPACK_DEBIAN_PACKAGE_SECTION "database") +set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON) - set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})") - set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)") - set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org") - set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst) - set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm - ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm) -endif() +set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})") +set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)") +set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org") +set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_CONTROL_EXTRA + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst) +set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_CONTROL_EXTRA + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm + ${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm) ################################################################################ # MacOS configuration ################################################################################ -if(INSTALL_LAYOUT MATCHES "OSX") - set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) - set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall) - set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) +if(NOT WIN32) install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh DESTINATION "usr/local/foundationdb" - COMPONENT clients) + COMPONENT clients-pm) install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist DESTINATION "Library/LaunchDaemons" - COMPONENT server) + COMPONENT server-pm) endif() ################################################################################ @@ -239,54 +286,33 @@ set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description") set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description") if(NOT WIN32) - if(INSTALL_LAYOUT MATCHES "OSX") - install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new - DESTINATION ${FDB_CONFIG_DIR} - COMPONENT server) - else() - install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf - DESTINATION ${FDB_CONFIG_DIR} - COMPONENT server) - endif() + install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new + DESTINATION "usr/local/etc" + COMPONENT server-pm) + fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf + DESTINATION etc + COMPONENT server) install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py - DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb" - COMPONENT server) + DESTINATION "usr/lib/foundationdb" + COMPONENT server-el6) install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py - DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb" - COMPONENT server) -else() - install(FILES ${CMAKE_BINARY_DIR}/fdb.cluster - DESTINATION "etc" - COMPONENT server) -endif() -if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB")) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb - ${CMAKE_BINARY_DIR}/packaging/rpm) - install( - DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb - DESTINATION "var/log" - COMPONENT server) - install( - DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb - DESTINATION "var/lib" - COMPONENT server) - execute_process( - COMMAND pidof systemd - RESULT_VARIABLE IS_SYSTEMD - OUTPUT_QUIET - ERROR_QUIET) + DESTINATION "usr/lib/foundationdb" + COMPONENT server-el6) + install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py + DESTINATION "usr/lib/foundationdb" + COMPONENT server-deb) + install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py + DESTINATION "usr/lib/foundationdb" + COMPONENT server-deb) install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service DESTINATION "lib/systemd/system" - COMPONENT server) - if(INSTALL_LAYOUT MATCHES "RPM") - install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init - DESTINATION "etc/rc.d/init.d" - RENAME "foundationdb" - COMPONENT server) - else() - install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init - DESTINATION "etc/init.d" - RENAME "foundationdb" - COMPONENT server) - endif() + COMPONENT server-el7) + install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init + DESTINATION "etc/rc.d/init.d" + RENAME "foundationdb" + COMPONENT server-el6) + install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init + DESTINATION "etc/init.d" + RENAME "foundationdb" + COMPONENT server-deb) endif() diff --git a/fdbbackup/CMakeLists.txt b/fdbbackup/CMakeLists.txt index fc4b6f3597..b892a83565 100644 --- a/fdbbackup/CMakeLists.txt +++ b/fdbbackup/CMakeLists.txt @@ -5,21 +5,29 @@ add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS}) target_link_libraries(fdbbackup PRIVATE fdbclient) if(NOT OPEN_FOR_IDE) - install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients) + fdb_install(TARGETS fdbbackup DESTINATION bin COMPONENT clients) install_symlink( - TO /${FDB_BIN_DIR}/fdbbackup - DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent - COMPONENT clients) + COMPONENT clients + FILE_DIR bin + LINK_DIR lib + FILE_NAME fdbbackup + LINK_NAME foundationdb/backup_agent/backup_agent) install_symlink( - TO /${FDB_BIN_DIR}/fdbbackup - DESTINATION ${FDB_BIN_DIR}/fdbrestore - COMPONENT clients) + COMPONENT clients + FILE_DIR bin + LINK_DIR bin + FILE_NAME fdbbackup + LINK_NAME fdbrestore) install_symlink( - TO /${FDB_BIN_DIR}/fdbbackup - DESTINATION ${FDB_BIN_DIR}/dr_agent - COMPONENT clients) + COMPONENT clients + FILE_DIR bin + LINK_DIR bin + FILE_NAME fdbbackup + LINK_NAME dr_agent) install_symlink( - TO /${FDB_BIN_DIR}/fdbbackup - DESTINATION ${FDB_BIN_DIR}/fdbdr - COMPONENT clients) + COMPONENT clients + FILE_DIR bin + LINK_DIR bin + FILE_NAME fdbbackup + LINK_NAME fdbdr) endif() diff --git a/fdbcli/CMakeLists.txt b/fdbcli/CMakeLists.txt index 34d5fe821c..9624bc1a37 100644 --- a/fdbcli/CMakeLists.txt +++ b/fdbcli/CMakeLists.txt @@ -11,6 +11,4 @@ endif() add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS}) target_link_libraries(fdbcli PRIVATE fdbclient) -if(NOT OPEN_FOR_IDE) - install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients) -endif() +fdb_install(TARGETS fdbcli DESTINATION bin COMPONENT clients) diff --git a/fdbmonitor/CMakeLists.txt b/fdbmonitor/CMakeLists.txt index 8d126ba7f0..445638a9b3 100644 --- a/fdbmonitor/CMakeLists.txt +++ b/fdbmonitor/CMakeLists.txt @@ -9,6 +9,4 @@ endif() # as soon as we get rid of the old build system target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient) -if(NOT OPEN_FOR_IDE) - install(TARGETS fdbmonitor DESTINATION "${FDBMONITOR_INSTALL_LOCATION}" COMPONENT server) -endif() +fdb_install(TARGETS fdbmonitor DESTINATION libexec COMPONENT server) diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index 9e630d4c7c..6749db266d 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -185,6 +185,4 @@ target_include_directories(fdbserver PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/workloads) target_link_libraries(fdbserver PRIVATE fdbclient) -if(NOT OPEN_FOR_IDE) - install(TARGETS fdbserver DESTINATION ${FDB_SBIN_DIR} COMPONENT server) -endif() +fdb_install(TARGETS fdbserver DESTINATION sbin COMPONENT server) diff --git a/fdbservice/CMakeLists.txt b/fdbservice/CMakeLists.txt index e7a9877577..6db8c54d86 100644 --- a/fdbservice/CMakeLists.txt +++ b/fdbservice/CMakeLists.txt @@ -5,7 +5,3 @@ add_executable(fdbmonitor ${FDBSERVICE_SRCS}) # FIXME: This include directory is an ugly hack. We probably want to fix this # as soon as we get rid of the old build system target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient) - -if(NOT OPEN_FOR_IDE) - install(TARGETS fdbmonitor DESTINATION "${FDB_BIN_DIR}" COMPONENT server) -endif() From 42e0a89a669efa56a4b6ab77d584c35b2f603aad Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 28 Feb 2019 14:05:24 -0800 Subject: [PATCH 43/71] This makes package generation work Resulting packages are not tested yet --- build/cmake/build.sh | 1 + cmake/CPackConfig.cmake | 9 +++++++++ cmake/InstallLayout.cmake | 35 ++++++++++++++++++++++++++--------- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/build/cmake/build.sh b/build/cmake/build.sh index e0ad315a1b..75d33646b1 100644 --- a/build/cmake/build.sh +++ b/build/cmake/build.sh @@ -87,6 +87,7 @@ package_fast() { for _ in 1 do make -j`nproc` packages + make -j`nproc` package __res=$? if [ ${__res} -ne 0 ] then diff --git a/cmake/CPackConfig.cmake b/cmake/CPackConfig.cmake index 212177e2c8..15580aa84b 100644 --- a/cmake/CPackConfig.cmake +++ b/cmake/CPackConfig.cmake @@ -1,17 +1,26 @@ # RPM specifics if(CPACK_GENERATOR MATCHES "RPM") set(CPACK_COMPONENTS_ALL clients-el6 clients-el7 server-el6 server-el7) + set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) + set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) elseif(CPACK_GENERATOR MATCHES "DEB") set(CPACK_COMPONENTS_ALL clients-deb server-deb) + set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) + set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) elseif(CPACK_GENERATOR MATCHES "PackageMaker") set(CPACK_COMPONENTS_ALL clients-pm server-pm) set(CPACK_STRIP_FILES TRUE) set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall) set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) + set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf) + set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources) + set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt) elseif(CPACK_GENERATOR MATCHES "TGZ") set(CPACK_STRIP_FILES TRUE) set(CPACK_COMPONENTS_ALL clients-tgz server-tgz) + set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) + set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) else() message(FATAL_ERROR "Unsupported package format ${CPACK_GENERATOR}") endif() diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 53e2b85ace..a5354568bb 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -168,21 +168,28 @@ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico) set(CPACK_PACKAGE_CONTACT "The FoundationDB Community") set(CPACK_COMPONENT_server_DEPENDS clients) -if (INSTALL_LAYOUT MATCHES "OSX") - # MacOS needs a file exiension for the LICENSE file - set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf) - set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources) - configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY) - set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt) -else() - set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) - set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) + +# MacOS needs a file exiension for the LICENSE file +configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY) + +################################################################################ +# Filename of packages +################################################################################ + +if(NOT FDB_RELEASE) + set(prerelease_string ".PRERELEASE") endif() +set(clients-filename "foundationdb-clients-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}") +set(server-filename "foundationdb-server-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}") ################################################################################ # Configuration for RPM ################################################################################ +set(CPACK_RPM_clients-el6_FILE_NAME "${clients-filename}.el6.x86_64.rpm") +set(CPACK_RPM_clients-el7_FILE_NAME "${clients-filename}.el7.x86_64.rpm") +set(CPACK_RPM_server-el6_FILE_NAME "${server-filename}.el6.x86_64.rpm") +set(CPACK_RPM_server-el7_FILE_NAME "${server-filename}.el7.x86_64.rpm") file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir") fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server) fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION lib COMPONENT server) @@ -246,6 +253,8 @@ set(CPACK_RPM_python_PACKAGE_REQUIRES # Configuration for DEB ################################################################################ +set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${clients-filename}_amd64.deb") +set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${server-filename}_amd64.deb") set(CPACK_DEB_COMPONENT_INSTALL ON) set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON) set(CPACK_DEBIAN_PACKAGE_SECTION "database") @@ -276,6 +285,14 @@ if(NOT WIN32) COMPONENT server-pm) endif() +################################################################################ +# Configuration for DEB +################################################################################ + +set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) +set(CPACK_ARCHIVE_CLIENTS-TGZ_FILE_NAME "${clients-filename}.x86_64") +set(CPACK_ARCHIVE_SERVER-TGZ_FILE_NAME "${server-filename}.x86_64") + ################################################################################ # Server configuration ################################################################################ From 2b11a66ff95c64110b323e71d8c0ed858ba75488 Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 28 Feb 2019 18:01:31 -0800 Subject: [PATCH 44/71] Improved package testing The requirements changed here - so in order to test these cmake changes properly, we need a framework that does that --- .../{centos-test => centos6-test}/Dockerfile | 0 build/cmake/centos7-test/Dockerfile | 3 + build/cmake/docker.ini | 18 +- build/cmake/package_tester/deb_tests.sh | 1 + build/cmake/package_tester/modules/config.sh | 15 +- build/cmake/package_tester/modules/deb.sh | 2 +- build/cmake/package_tester/modules/docker.sh | 192 +++++------------- build/cmake/package_tester/modules/globals.sh | 23 +++ build/cmake/package_tester/modules/rpm.sh | 4 +- .../cmake/package_tester/modules/test_args.sh | 3 +- build/cmake/package_tester/modules/tests.sh | 4 - build/cmake/package_tester/modules/util.sh | 2 +- build/cmake/package_tester/rpm_tests.sh | 1 + build/cmake/package_tester/test_packages.sh | 1 + 14 files changed, 101 insertions(+), 168 deletions(-) rename build/cmake/{centos-test => centos6-test}/Dockerfile (100%) create mode 100644 build/cmake/centos7-test/Dockerfile create mode 100644 build/cmake/package_tester/modules/globals.sh diff --git a/build/cmake/centos-test/Dockerfile b/build/cmake/centos6-test/Dockerfile similarity index 100% rename from build/cmake/centos-test/Dockerfile rename to build/cmake/centos6-test/Dockerfile diff --git a/build/cmake/centos7-test/Dockerfile b/build/cmake/centos7-test/Dockerfile new file mode 100644 index 0000000000..6412aa71b1 --- /dev/null +++ b/build/cmake/centos7-test/Dockerfile @@ -0,0 +1,3 @@ +FROM centos:7 + +RUN yum install -y yum-utils diff --git a/build/cmake/docker.ini b/build/cmake/docker.ini index a6d638c255..367644b503 100644 --- a/build/cmake/docker.ini +++ b/build/cmake/docker.ini @@ -1,7 +1,17 @@ -[RPM_1] -name = fdb-centos -location = centos-test +[centos6] +name = fdb-centos6 +location = centos6-test +packages = ^.*el6((?!debuginfo).)*\.rpm$ +format = rpm -[DEB_1] +[centos7] +name = fdb-centos7 +location = centos7-test +packages = ^.*el7((?!debuginfo).)*\.rpm$ +format = rpm + +[ubuntu_16_04] name = fdb-debian location = debian-test +packages = ^.*\.deb$ +format = deb diff --git a/build/cmake/package_tester/deb_tests.sh b/build/cmake/package_tester/deb_tests.sh index 0735d238cc..0dc85d005b 100644 --- a/build/cmake/package_tester/deb_tests.sh +++ b/build/cmake/package_tester/deb_tests.sh @@ -2,6 +2,7 @@ source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" +source ${source_dir}/modules/globals.sh source ${source_dir}/modules/util.sh source ${source_dir}/modules/deb.sh source ${source_dir}/modules/tests.sh diff --git a/build/cmake/package_tester/modules/config.sh b/build/cmake/package_tester/modules/config.sh index efe412f8da..70fc35692c 100644 --- a/build/cmake/package_tester/modules/config.sh +++ b/build/cmake/package_tester/modules/config.sh @@ -17,10 +17,8 @@ then fi # parse the ini file and read it into an # associative array - declare -gA ini_name - declare -gA ini_location - - eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) print "ini_" $1 section "=" "\"" $2 "\"" }' ${docker_file})" + eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) printf "ini_%s%s=\47%s\47\n", $1, section, $2 }' ${docker_file})" + vms=( "${!ini_name[@]}" ) if [ $? -ne 0 ] then echo "ERROR: Could not parse config-file ${docker_file}" @@ -112,15 +110,6 @@ then then break fi - if [ -z ${fdb_packages+x} ] - then - config_find_packages - if [ $? -ne 0 ] - then - __res=1 - break - fi - fi config_load_vms __res=$? if [ ${__res} -ne 0 ] diff --git a/build/cmake/package_tester/modules/deb.sh b/build/cmake/package_tester/modules/deb.sh index 0124e8fb1c..575a2388de 100644 --- a/build/cmake/package_tester/modules/deb.sh +++ b/build/cmake/package_tester/modules/deb.sh @@ -10,7 +10,7 @@ then local __res=0 enterfun echo "Install FoundationDB" - cd /build + cd /build/packages package_names=() for f in "${package_files[@]}" do diff --git a/build/cmake/package_tester/modules/docker.sh b/build/cmake/package_tester/modules/docker.sh index 965be78162..db010d7ebb 100644 --- a/build/cmake/package_tester/modules/docker.sh +++ b/build/cmake/package_tester/modules/docker.sh @@ -38,7 +38,6 @@ then docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" ) docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" ) # prune - set -x if [ "${pruning_strategy}" = "ALL" ] then docker container rm "${docker_id}" > /dev/null @@ -49,7 +48,6 @@ then then docker container rm "${docker_id}" > /dev/null fi - set +x fi done sleep 1 @@ -69,157 +67,67 @@ then return ${__res} } - docker_build_and_run() { - local __res=0 - enterfun - for _ in 1 - do - if [[ "$location" = /* ]] - then - cd "${location}" - else - cd ${source_dir}/../${location} - fi - docker_logs="${log_dir}/docker_build_${name}" - docker build . -t ${name} 1> "${docker_logs}.log" 2> "${docker_logs}.err" - successOr "Building Docker image ${name} failed - see ${docker_logs}.log and ${docker_logs}.err" - # we start docker in interactive mode, otherwise CTRL-C won't work - if [ ! -z "${tests_to_run+x}"] - then - tests=() - IFS=';' read -ra tests <<< "${tests_to_run}" - fi - for t in "${tests[@]}" - do - if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ] - then - docker_wait_any - fi - echo "Starting Test ${PKG,,}_${t}" - docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\ - -v "${fdb_build}:/build"\ - ${name}\ - bash /foundationdb/build/cmake/package_tester/${PKG,,}_tests.sh -n ${t} ${packages_to_test[@]} ) - docker_ids+=( "${docker_id}" ) - docker_threads+=( "${PKG} - ${t} (ID: ${docker_id})" ) - done - done - exitfun - return ${__res} - } - - docker_run_tests() { - local __res=0 - enterfun - counter=1 - while true - do - if [ -z "${ini_name[${PKG}_${counter}]+x}" ] - then - # we are done - break - fi - name="${ini_name[${PKG}_${counter}]}" - location="${ini_location[${PKG}_${counter}]}" - docker_build_and_run - __res=$? - counter=$((counter+1)) - if [ ${__res} -ne 0 ] - then - break - fi - done - if [ ${counter} -eq 1 ] - then - echo -e "${YELLOW}WARNING: No docker config found!${NC}" - fi - exitfun - return ${__res} - } - - docker_debian_tests() { - local __res=0 - enterfun - PKG=DEB - packages_to_test=("${deb_packages[@]}") - docker_run_tests - __res=$? - exitfun - return ${__res} - } - - docker_rpm_tests() { - local __res=0 - enterfun - PKG=RPM - packages_to_test=("${rpm_packages[@]}") - docker_run_tests - __res=$? - exitfun - return ${__res} - } - docker_run() { local __res=0 enterfun for _ in 1 do - log_dir="${fdb_build}/pkg_tester" - mkdir -p "${log_dir}" - # create list of package files to test - IFS=':' read -ra packages <<< "${fdb_packages}" - deb_packages=() - rpm_packages=() - for i in "${packages[@]}" + echo "Testing the following:" + echo "======================" + for K in "${vms[@]}" do - if [[ "${i}" =~ .*".deb" ]] - then - if [ ${run_deb_tests} -ne 0 ] - then - deb_packages+=("${i}") - fi - else - if [ ${run_rpm_tests} -ne 0 ] - then - rpm_packages+=("${i}") - fi - fi - done - do_deb_tests=0 - do_rpm_tests=0 - if [ "${#deb_packages[@]}" -gt 0 ] - then - do_deb_tests=1 - echo "Will test the following debian packages:" - echo "========================================" - for i in "${deb_packages[@]}" + curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${K}]} ) ) + echo "Will test the following ${#curr_packages[@]} packages in docker-image ${K}:" + for p in "${curr_packages[@]}" do - echo " - ${i}" + echo " ${p}" done echo - fi - if [ "${#rpm_packages[@]}" -gt 0 ] + done + log_dir="${fdb_build}/pkg_tester" + mkdir -p "${log_dir}" + # setup the containers + # TODO: shall we make this parallel as well? + for vm in "${vms[@]}" + do + curr_name="${ini_name[$vm]}" + curr_location="${ini_location[$vm]}" + if [[ "$curr_location" = /* ]] + then + cd "${curr_location}" + else + cd ${source_dir}/../${curr_location} + fi + pwd + docker_logs="${log_dir}/docker_build_${curr_name}" + docker build . -t ${curr_name} 1> "${docker_logs}.log" 2> "${docker_logs}.err" + successOr "Building Docker image ${name} failed - see ${docker_logs}.log and ${docker_logs}.err" + done + if [ ! -z "${tests_to_run+x}"] then - do_rpm_tests=1 - echo "Will test the following rpm packages" - echo "====================================" - for i in "${rpm_packages[@]}" + tests=() + IFS=';' read -ra tests <<< "${tests_to_run}" + fi + for vm in "${vms[@]}" + do + curr_name="${ini_name[$vm]}" + curr_format="${ini_format[$vm]}" + curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${vm}]} ) ) + for curr_test in "${tests[@]}" do - echo " - ${i}" + if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ] + then + docker_wait_any + fi + echo "Starting Test ${curr_test}" + docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\ + -v "${fdb_build}:/build"\ + ${curr_name}\ + bash /foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]} ) + docker_ids+=( "${docker_id}" ) + docker_threads+=( "${PKG} - ${t} (ID: ${docker_id})" ) done - fi - if [ "${do_deb_tests}" -eq 0 ] && [ "${do_rpm_tests}" -eq 0 ] - then - echo "nothing to do" - fi - if [ "${do_deb_tests}" -ne 0 ] - then - docker_debian_tests - fi - if [ "${do_rpm_tests}" -ne 0 ] - then - docker_rpm_tests - fi + done docker_wait_all if [ "${#failed_tests[@]}" -eq 0 ] then @@ -235,6 +143,6 @@ then fi done exitfun - return ${__res} + return "${__res}" } fi diff --git a/build/cmake/package_tester/modules/globals.sh b/build/cmake/package_tester/modules/globals.sh new file mode 100644 index 0000000000..795a4adc66 --- /dev/null +++ b/build/cmake/package_tester/modules/globals.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# This module has to be included first and only once. +# This is because of a limitation of older bash versions +# that doesn't allow us to declare associative arrays +# globally. + +if [ -z "${global_sh_included+x}"] +then + global_sh_included=1 +else + echo "global.sh can only be included once" + exit 1 +fi + +declare -A ini_name +declare -A ini_location +declare -A ini_packages +declare -A ini_format +declare -A test_start_state +declare -A test_exit_state +declare -a tests +declare -a vms diff --git a/build/cmake/package_tester/modules/rpm.sh b/build/cmake/package_tester/modules/rpm.sh index 7fbf7e631c..f76de6b102 100644 --- a/build/cmake/package_tester/modules/rpm.sh +++ b/build/cmake/package_tester/modules/rpm.sh @@ -9,8 +9,8 @@ then install() { local __res=0 enterfun - cd /build - declare -ga package_names + cd /build/packages + package_names=() for f in "${package_files[@]}" do package_names+=( "$(rpm -qp ${f})" ) diff --git a/build/cmake/package_tester/modules/test_args.sh b/build/cmake/package_tester/modules/test_args.sh index 49d6221137..bb88da945f 100644 --- a/build/cmake/package_tester/modules/test_args.sh +++ b/build/cmake/package_tester/modules/test_args.sh @@ -25,11 +25,12 @@ EOF do case ${opt} in h ) - arguments_usage + test_args_usage __res=2 break ;; n ) + echo "test_name=${OPTARG}" test_name="${OPTARG}" ;; \? ) diff --git a/build/cmake/package_tester/modules/tests.sh b/build/cmake/package_tester/modules/tests.sh index 063de291a0..f995b6cc07 100644 --- a/build/cmake/package_tester/modules/tests.sh +++ b/build/cmake/package_tester/modules/tests.sh @@ -28,10 +28,6 @@ # build directory can be found in `/build`, the # source code will be located in `/foundationdb` -declare -A test_start_state -declare -A test_exit_state -declare -a tests - if [ -z "${tests_sh_included}" ] then tests_sh_included=1 diff --git a/build/cmake/package_tester/modules/util.sh b/build/cmake/package_tester/modules/util.sh index 12e422a973..c3d643bdfc 100644 --- a/build/cmake/package_tester/modules/util.sh +++ b/build/cmake/package_tester/modules/util.sh @@ -24,7 +24,7 @@ then successOr ${@:1} } - successOrOr() { + successOr() { local __res=$? if [ ${__res} -ne 0 ] then diff --git a/build/cmake/package_tester/rpm_tests.sh b/build/cmake/package_tester/rpm_tests.sh index bdc87b4cc9..a88bfb4f15 100755 --- a/build/cmake/package_tester/rpm_tests.sh +++ b/build/cmake/package_tester/rpm_tests.sh @@ -2,6 +2,7 @@ source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" +source ${source_dir}/modules/globals.sh source ${source_dir}/modules/util.sh source ${source_dir}/modules/rpm.sh source ${source_dir}/modules/tests.sh diff --git a/build/cmake/package_tester/test_packages.sh b/build/cmake/package_tester/test_packages.sh index 6db2955466..05642073d8 100755 --- a/build/cmake/package_tester/test_packages.sh +++ b/build/cmake/package_tester/test_packages.sh @@ -2,6 +2,7 @@ source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" +source ${source_dir}/modules/globals.sh source ${source_dir}/modules/config.sh source ${source_dir}/modules/util.sh source ${source_dir}/modules/arguments.sh From e8624efb3bb6112f73b010541bd8baf8764f9663 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 1 Mar 2019 14:13:16 -0800 Subject: [PATCH 45/71] several minor improvements --- build/cmake/build.sh | 8 +-- cmake/CPackConfig.cmake | 3 ++ cmake/InstallLayout.cmake | 111 +++++++++++++++++++++++++------------- fdbmonitor/CMakeLists.txt | 2 +- 4 files changed, 83 insertions(+), 41 deletions(-) diff --git a/build/cmake/build.sh b/build/cmake/build.sh index 75d33646b1..809b1c6eb2 100644 --- a/build/cmake/build.sh +++ b/build/cmake/build.sh @@ -121,7 +121,7 @@ rpm() { local __res=0 for _ in 1 do - cmake -DINSTALL_LAYOUT=RPM ../foundationdb + cmake ../foundationdb __res=$? if [ ${__res} -ne 0 ] then @@ -133,7 +133,7 @@ rpm() { then break fi - fakeroot cpack + fakeroot cpack -G RPM __res=$? if [ ${__res} -ne 0 ] then @@ -147,7 +147,7 @@ deb() { local __res=0 for _ in 1 do - cmake -DINSTALL_LAYOUT=DEB ../foundationdb + cmake ../foundationdb __res=$? if [ ${__res} -ne 0 ] then @@ -159,7 +159,7 @@ deb() { then break fi - fakeroot cpack + fakeroot cpack -G DEB __res=$? if [ ${__res} -ne 0 ] then diff --git a/cmake/CPackConfig.cmake b/cmake/CPackConfig.cmake index 15580aa84b..25aab266dd 100644 --- a/cmake/CPackConfig.cmake +++ b/cmake/CPackConfig.cmake @@ -1,13 +1,16 @@ # RPM specifics if(CPACK_GENERATOR MATCHES "RPM") + set(CPACK_PACKAGING_INSTALL_PREFIX "/") set(CPACK_COMPONENTS_ALL clients-el6 clients-el7 server-el6 server-el7) set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) elseif(CPACK_GENERATOR MATCHES "DEB") + set(CPACK_PACKAGING_INSTALL_PREFIX "/") set(CPACK_COMPONENTS_ALL clients-deb server-deb) set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) elseif(CPACK_GENERATOR MATCHES "PackageMaker") + set(CPACK_PACKAGING_INSTALL_PREFIX "/") set(CPACK_COMPONENTS_ALL clients-pm server-pm) set(CPACK_STRIP_FILES TRUE) set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall) diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index a5354568bb..76bff5597a 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -87,42 +87,42 @@ function(fdb_install) if(IN_EXPORT) set(args EXPORT) endif() - if("${IN_DESTINATION}" MATCHES "bin") + if("${IN_DESTINATION}" STREQUAL "bin") install(${args} DESTINATION "bin" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el6") install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el7") install(${args} DESTINATION "usr/local/bin" COMPONENT "${IN_COMPONENT}-pm") - elseif("${IN_DESTINATION}" MATCHES "sbin") + elseif("${IN_DESTINATION}" STREQUAL "sbin") install(${args} DESTINATION "sbin" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el6") install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el7") install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm") - elseif("${IN_DESTINATION}" MATCHES "libexec") + elseif("${IN_DESTINATION}" STREQUAL "fdbmonitor") install(${args} DESTINATION "libexec" COMPONENT "${IN_COMPONENT}-tgz") - install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-deb") - install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-el6") - install(${args} DESTINATION "usr/libexec" COMPONENT "${IN_COMPONENT}-el7") - install(${args} DESTINATION "usr/local/lib/foundationdb" COMPONENT "${IN_COMPONENT}-pm") - elseif("${IN_DESTINATION}" MATCHES "include") + install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm") + elseif("${IN_DESTINATION}" STREQUAL "include") install(${args} DESTINATION "include" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el6") install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el7") install(${args} DESTINATION "usr/local/include" COMPONENT "${IN_COMPONENT}-pm") - elseif("${IN_DESTINATION}" MATCHES "etc") + elseif("${IN_DESTINATION}" STREQUAL "etc") install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el6") install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el7") install(${args} DESTINATION "usr/local/etc/foundationdb" COMPONENT "${IN_COMPONENT}-pm") - elseif("${IN_DESTINATION}" MATCHES "log") + elseif("${IN_DESTINATION}" STREQUAL "log") install(${args} DESTINATION "log/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el6") install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el7") - elseif("${IN_DESTINATION}" MATCHES "data") + elseif("${IN_DESTINATION}" STREQUAL "data") install(${args} DESTINATION "lib/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb") install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6") @@ -167,7 +167,25 @@ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions.") set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico) set(CPACK_PACKAGE_CONTACT "The FoundationDB Community") -set(CPACK_COMPONENT_server_DEPENDS clients) + +set(CPACK_COMPONENT_SERVER-EL6_DEPENDS clients-el6) +set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7) +set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb) +set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz) +set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm) + +set(CPACK_COMPONENT_SERVER-EL6_DISPLAY_NAME "foundationdb-server") +set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server") +set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server") +set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server") +set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server") + +set(CPACK_COMPONENT_CLIENTS-EL6_DISPLAY_NAME "foundationdb-clients") +set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients") +set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients") +set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients") +set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients") + # MacOS needs a file exiension for the LICENSE file configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY) @@ -186,15 +204,33 @@ set(server-filename "foundationdb-server-${PROJECT_VERSION}.${CURRENT_GIT_VERSIO # Configuration for RPM ################################################################################ -set(CPACK_RPM_clients-el6_FILE_NAME "${clients-filename}.el6.x86_64.rpm") -set(CPACK_RPM_clients-el7_FILE_NAME "${clients-filename}.el7.x86_64.rpm") -set(CPACK_RPM_server-el6_FILE_NAME "${server-filename}.el6.x86_64.rpm") -set(CPACK_RPM_server-el7_FILE_NAME "${server-filename}.el7.x86_64.rpm") -file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir") -fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server) -fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION lib COMPONENT server) +set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0") -set(CPACK_RPM_server_USER_FILELIST +set(CPACK_RPM_PACKAGE_NAME "foundationdb") +set(CPACK_RPM_CLIENTS-EL6-PACKAGE_NAME "foundationdb-clients") +set(CPACK_RPM_CLIENTS-EL7-PACKAGE_NAME "foundationdb-clients") +set(CPACK_RPM_SERVER-EL6-PACKAGE_NAME "foundationdb-server") +set(CPACK_RPM_SERVER-EL7-PACKAGE_NAME "foundationdb-server") + +set(CPACK_RPM_CLIENTS-EL6_FILE_NAME "${clients-filename}.el6.x86_64.rpm") +set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${clients-filename}.el7.x86_64.rpm") +set(CPACK_RPM_SERVER-EL6_FILE_NAME "${server-filename}.el6.x86_64.rpm") +set(CPACK_RPM_SERVER-EL7_FILE_NAME "${server-filename}.el7.x86_64.rpm") + +set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO-FILE_NAME "${clients-filename}.el6-debuginfo.x86_64.rpm") +set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO-FILE_NAME "${clients-filename}.el7-debuginfo.x86_64.rpm") +set(CPACK_RPM_SERVER-EL6_DEBUGINFO-FILE_NAME "${server-filename}.el6-debuginfo.x86_64.rpm") +set(CPACK_RPM_SERVER-EL7_DEBUGINFO-FILE_NAME "${server-filename}.el7-debuginfo.x86_64.rpm") + +file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir") +fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server) +fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server) + +set(CPACK_RPM_SERVER-EL6_USER_FILELIST + "%config(noreplace) /etc/foundationdb/foundationdb.conf" + "%attr(0700,foundationdb,foundationdb) /var/log/foundationdb" + "%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb") +set(CPACK_RPM_SERVER-EL7_USER_FILELIST "%config(noreplace) /etc/foundationdb/foundationdb.conf" "%attr(0700,foundationdb,foundationdb) /var/log/foundationdb" "%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb") @@ -212,42 +248,42 @@ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/etc/rc.d/init.d") set(CPACK_RPM_DEBUGINFO_PACKAGE ON) set(CPACK_RPM_DEBUGINFO_PACKAGE ON) -set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src) +#set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src) set(CPACK_RPM_COMPONENT_INSTALL ON) -set(CPACK_RPM_clients-el6_PRE_INSTALL_SCRIPT_FILE +set(CPACK_RPM_CLIENTS-EL6_PRE_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh) set(CPACK_RPM_clients-el7_PRE_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh) -set(CPACK_RPM_clients-el6_POST_INSTALL_SCRIPT_FILE +set(CPACK_RPM_CLIENTS-EL6_POST_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh) -set(CPACK_RPM_clients-el7_POST_INSTALL_SCRIPT_FILE +set(CPACK_RPM_CLIENTS-EL7_POST_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh) -set(CPACK_RPM_server-el6_PRE_INSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL6_PRE_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) -set(CPACK_RPM_server-el7_PRE_INSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL7_PRE_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) -set(CPACK_RPM_server-el6_POST_INSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL6_POST_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) -set(CPACK_RPM_server-el7_POST_INSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL7_POST_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) -set(CPACK_RPM_server-el6_PRE_UNINSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL6_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) -set(CPACK_RPM_server-el7_PRE_UNINSTALL_SCRIPT_FILE +set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) -set(CPACK_RPM_server-el6_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") -set(CPACK_RPM_server-el7_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") +set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES + "foundationdb-clients-el6 = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") +set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES + "foundationdb-clients-el7 = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") #set(CPACK_RPM_java_PACKAGE_REQUIRES # "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") -set(CPACK_RPM_python_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") +#set(CPACK_RPM_python_PACKAGE_REQUIRES +# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") ################################################################################ # Configuration for DEB @@ -260,6 +296,9 @@ set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON) set(CPACK_DEBIAN_PACKAGE_SECTION "database") set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON) +set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server") +set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients") + set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})") set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)") set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org") diff --git a/fdbmonitor/CMakeLists.txt b/fdbmonitor/CMakeLists.txt index 445638a9b3..bed70f15c0 100644 --- a/fdbmonitor/CMakeLists.txt +++ b/fdbmonitor/CMakeLists.txt @@ -9,4 +9,4 @@ endif() # as soon as we get rid of the old build system target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient) -fdb_install(TARGETS fdbmonitor DESTINATION libexec COMPONENT server) +fdb_install(TARGETS fdbmonitor DESTINATION fdbmonitor COMPONENT server) From 294baa4091ffc3cd78fc59adc11a4ddc41908925 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 1 Mar 2019 15:21:21 -0800 Subject: [PATCH 46/71] Fix package naming --- cmake/InstallLayout.cmake | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 76bff5597a..9437398839 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -207,20 +207,20 @@ set(server-filename "foundationdb-server-${PROJECT_VERSION}.${CURRENT_GIT_VERSIO set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0") set(CPACK_RPM_PACKAGE_NAME "foundationdb") -set(CPACK_RPM_CLIENTS-EL6-PACKAGE_NAME "foundationdb-clients") -set(CPACK_RPM_CLIENTS-EL7-PACKAGE_NAME "foundationdb-clients") -set(CPACK_RPM_SERVER-EL6-PACKAGE_NAME "foundationdb-server") -set(CPACK_RPM_SERVER-EL7-PACKAGE_NAME "foundationdb-server") +set(CPACK_RPM_CLIENTS-EL6_PACKAGE_NAME "foundationdb-clients") +set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients") +set(CPACK_RPM_SERVER-EL6_PACKAGE_NAME "foundationdb-server") +set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server") set(CPACK_RPM_CLIENTS-EL6_FILE_NAME "${clients-filename}.el6.x86_64.rpm") set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${clients-filename}.el7.x86_64.rpm") set(CPACK_RPM_SERVER-EL6_FILE_NAME "${server-filename}.el6.x86_64.rpm") set(CPACK_RPM_SERVER-EL7_FILE_NAME "${server-filename}.el7.x86_64.rpm") -set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO-FILE_NAME "${clients-filename}.el6-debuginfo.x86_64.rpm") -set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO-FILE_NAME "${clients-filename}.el7-debuginfo.x86_64.rpm") -set(CPACK_RPM_SERVER-EL6_DEBUGINFO-FILE_NAME "${server-filename}.el6-debuginfo.x86_64.rpm") -set(CPACK_RPM_SERVER-EL7_DEBUGINFO-FILE_NAME "${server-filename}.el7-debuginfo.x86_64.rpm") +set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO_FILE_NAME "${clients-filename}.el6-debuginfo.x86_64.rpm") +set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${clients-filename}.el7-debuginfo.x86_64.rpm") +set(CPACK_RPM_SERVER-EL6_DEBUGINFO_FILE_NAME "${server-filename}.el6-debuginfo.x86_64.rpm") +set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${server-filename}.el7-debuginfo.x86_64.rpm") file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir") fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server) From 55f4d78fcfede1b7f4ccdd85ef647cb73bad0c1d Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 1 Mar 2019 16:02:51 -0800 Subject: [PATCH 47/71] Support generating el6 and el7 rpms --- build/cmake/build.sh | 4 +++- cmake/CPackConfig.cmake | 6 +++++- cmake/InstallLayout.cmake | 5 ++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/build/cmake/build.sh b/build/cmake/build.sh index 809b1c6eb2..72a76c8110 100644 --- a/build/cmake/build.sh +++ b/build/cmake/build.sh @@ -87,7 +87,8 @@ package_fast() { for _ in 1 do make -j`nproc` packages - make -j`nproc` package + cpack + cpack -G RPM -D GENERATE_EL6=ON __res=$? if [ ${__res} -ne 0 ] then @@ -133,6 +134,7 @@ rpm() { then break fi + fakeroot cpack -G RPM -D GENERATE_EL6=ON fakeroot cpack -G RPM __res=$? if [ ${__res} -ne 0 ] diff --git a/cmake/CPackConfig.cmake b/cmake/CPackConfig.cmake index 25aab266dd..831103a989 100644 --- a/cmake/CPackConfig.cmake +++ b/cmake/CPackConfig.cmake @@ -1,7 +1,11 @@ # RPM specifics if(CPACK_GENERATOR MATCHES "RPM") set(CPACK_PACKAGING_INSTALL_PREFIX "/") - set(CPACK_COMPONENTS_ALL clients-el6 clients-el7 server-el6 server-el7) + if(GENERATE_EL6) + set(CPACK_COMPONENTS_ALL clients-el6 server-el6) + else() + set(CPACK_COMPONENTS_ALL clients-el7 server-el7) + endif() set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md) set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) elseif(CPACK_GENERATOR MATCHES "DEB") diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 9437398839..f32ff4f469 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -247,7 +247,6 @@ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/lib/systemd/system" "/etc/rc.d/init.d") set(CPACK_RPM_DEBUGINFO_PACKAGE ON) -set(CPACK_RPM_DEBUGINFO_PACKAGE ON) #set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src) set(CPACK_RPM_COMPONENT_INSTALL ON) @@ -277,9 +276,9 @@ set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES - "foundationdb-clients-el6 = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES - "foundationdb-clients-el7 = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") #set(CPACK_RPM_java_PACKAGE_REQUIRES # "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") #set(CPACK_RPM_python_PACKAGE_REQUIRES From 645eba7b58fbd13c5a8d5e895d19131e6af65f59 Mon Sep 17 00:00:00 2001 From: mpilman Date: Sat, 2 Mar 2019 10:58:54 -0800 Subject: [PATCH 48/71] fixed cmake generated symlinks --- cmake/InstallLayout.cmake | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index f32ff4f469..4151248637 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -27,34 +27,34 @@ function(install_symlink) set(multi_value_options "") cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}") + set(rel_path "") string(REGEX MATCHALL "\\/" slashes "${IN_LINK_NAME}") - list(LENGTH slashes num_link_subdirs) - foreach(i RANGE 1 ${num_link_subdirs}) + foreach(ignored IN LISTS slashes) set(rel_path "../${rel_path}") endforeach() if("${IN_FILE_DIR}" MATCHES "bin") if("${IN_LINK_DIR}" MATCHES "lib") install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" + TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "lib/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-tgz") install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" + TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "usr/lib64/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-el6" "${IN_COMPONENT}-el7" "${IN_COMPONENT}-deb") install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" + TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "usr/lib64/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-deb") elseif("${IN_LINK_DIR}" MATCHES "bin") install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" + TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "bin/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-tgz") install_symlink_impl( - TO "../${rel_path}/bin/${IN_FILE_NAME}" + TO "../${rel_path}bin/${IN_FILE_NAME}" DESTINATION "usr/bin/${IN_LINK_NAME}" COMPONENTS "${IN_COMPONENT}-el6" "${IN_COMPONENT}-el7" From 66cf5438bd02edb6fac1dec9d0254d6ec3550083 Mon Sep 17 00:00:00 2001 From: mpilman Date: Sat, 2 Mar 2019 10:59:24 -0800 Subject: [PATCH 49/71] Account for rpmsave in keep_config test --- build/cmake/package_tester/modules/docker.sh | 2 +- build/cmake/package_tester/modules/rpm.sh | 2 ++ build/cmake/package_tester/modules/tests.sh | 16 +++++++++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/build/cmake/package_tester/modules/docker.sh b/build/cmake/package_tester/modules/docker.sh index db010d7ebb..92e3508f5b 100644 --- a/build/cmake/package_tester/modules/docker.sh +++ b/build/cmake/package_tester/modules/docker.sh @@ -119,7 +119,7 @@ then then docker_wait_any fi - echo "Starting Test ${curr_test}" + echo "Starting Test ${curr_name}/${curr_test}" docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\ -v "${fdb_build}:/build"\ ${curr_name}\ diff --git a/build/cmake/package_tester/modules/rpm.sh b/build/cmake/package_tester/modules/rpm.sh index f76de6b102..bd3348d102 100644 --- a/build/cmake/package_tester/modules/rpm.sh +++ b/build/cmake/package_tester/modules/rpm.sh @@ -6,6 +6,8 @@ then source ${source_dir}/modules/util.sh + conf_save_extension=".rpmsave" + install() { local __res=0 enterfun diff --git a/build/cmake/package_tester/modules/tests.sh b/build/cmake/package_tester/modules/tests.sh index f995b6cc07..9b4e0e1ed5 100644 --- a/build/cmake/package_tester/modules/tests.sh +++ b/build/cmake/package_tester/modules/tests.sh @@ -102,13 +102,23 @@ then uninstall # make sure config didn't get deleted - if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f /etc/foundationdb/foundationdb.conf ] + # RPM, however, renames the file on remove, so we need to check for this + conffile="/etc/foundationdb/foundationdb.conf${conf_save_extension}" + if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f "${conffile}" ] then fail "Uninstall removed configuration" fi + differences="$(diff /tmp/foundationdb.conf ${conffile})" + if [ -n "${differences}" ] + then + fail "${conffile} changed during remove" + fi + differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)" + if [ -n "${differences}" ] + then + fail "/etc/foundationdb/fdb.cluster changed during remove" + fi - rm /tmp/fdb.cluster - rm /tmp/foundationdb.conf return 0 } fi From da723066422e4c5b777b85be3b29e4ebc64358e0 Mon Sep 17 00:00:00 2001 From: mpilman Date: Sat, 2 Mar 2019 14:05:05 -0800 Subject: [PATCH 50/71] several minor bug fixes --- build/cmake/centos7-test/Dockerfile | 2 +- cmake/InstallLayout.cmake | 13 ++++++++++++- fdbbackup/CMakeLists.txt | 4 ++-- packaging/rpm/scripts/postserver.sh | 4 ++-- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/build/cmake/centos7-test/Dockerfile b/build/cmake/centos7-test/Dockerfile index 6412aa71b1..f31bc703ec 100644 --- a/build/cmake/centos7-test/Dockerfile +++ b/build/cmake/centos7-test/Dockerfile @@ -1,3 +1,3 @@ FROM centos:7 -RUN yum install -y yum-utils +RUN yum install -y yum-utils systemd diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 4151248637..980622cc04 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -59,6 +59,17 @@ function(install_symlink) COMPONENTS "${IN_COMPONENT}-el6" "${IN_COMPONENT}-el7" "${IN_COMPONENT}-deb") + elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor") + install_symlink_impl( + TO "../../${rel_path}bin/${IN_FILE_NAME}" + DESTINATION "lib/foundationdb/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-tgz") + install_symlink_impl( + TO "../../${rel_path}bin/${IN_FILE_NAME}" + DESTINATION "usr/lib/foundationdb/${IN_LINK_NAME}" + COMPONENTS "${IN_COMPONENT}-el6" + "${IN_COMPONENT}-el7" + "${IN_COMPONENT}-deb") else() message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}") endif() @@ -276,7 +287,7 @@ set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh) set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES - "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03") + "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}") #set(CPACK_RPM_java_PACKAGE_REQUIRES diff --git a/fdbbackup/CMakeLists.txt b/fdbbackup/CMakeLists.txt index b892a83565..5839f2c874 100644 --- a/fdbbackup/CMakeLists.txt +++ b/fdbbackup/CMakeLists.txt @@ -9,9 +9,9 @@ if(NOT OPEN_FOR_IDE) install_symlink( COMPONENT clients FILE_DIR bin - LINK_DIR lib + LINK_DIR fdbmonitor FILE_NAME fdbbackup - LINK_NAME foundationdb/backup_agent/backup_agent) + LINK_NAME backup_agent/backup_agent) install_symlink( COMPONENT clients FILE_DIR bin diff --git a/packaging/rpm/scripts/postserver.sh b/packaging/rpm/scripts/postserver.sh index cdffdbdb34..9fe2ee1e12 100644 --- a/packaging/rpm/scripts/postserver.sh +++ b/packaging/rpm/scripts/postserver.sh @@ -8,7 +8,7 @@ if [ $1 -eq 1 ]; then NEWDB=1 fi - if pidof systemd + if pidof systemd > /dev/null then /usr/bin/systemctl enable foundationdb >/dev/null 2>&1 /usr/bin/systemctl start foundationdb >/dev/null 2>&1 @@ -21,7 +21,7 @@ if [ $1 -eq 1 ]; then /usr/bin/fdbcli -C /etc/foundationdb/fdb.cluster --exec "configure new single memory" --timeout 20 >/dev/null 2>&1 fi else - if pidof systemd + if pidof systemd > /dev/null then /usr/bin/systemctl condrestart foundationdb >/dev/null 2>&1 else From 51ccdb1c9b862bf92bd8903a67ce26fe423cf4b0 Mon Sep 17 00:00:00 2001 From: mpilman Date: Sat, 2 Mar 2019 14:36:55 -0800 Subject: [PATCH 51/71] Remove pidof hack as it was not stable --- build/cmake/centos7-test/Dockerfile | 2 +- cmake/InstallLayout.cmake | 2 +- packaging/rpm/scripts/postserver-el6.sh | 20 ++++++++++++++++++++ packaging/rpm/scripts/postserver.sh | 19 ++++--------------- 4 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 packaging/rpm/scripts/postserver-el6.sh diff --git a/build/cmake/centos7-test/Dockerfile b/build/cmake/centos7-test/Dockerfile index f31bc703ec..2be4b61f06 100644 --- a/build/cmake/centos7-test/Dockerfile +++ b/build/cmake/centos7-test/Dockerfile @@ -1,3 +1,3 @@ FROM centos:7 -RUN yum install -y yum-utils systemd +RUN yum install -y yum-utils systemd sysvinit-tools diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 980622cc04..d54fc9cec9 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -277,7 +277,7 @@ set(CPACK_RPM_SERVER-EL7_PRE_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh) set(CPACK_RPM_SERVER-EL6_POST_INSTALL_SCRIPT_FILE - ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) + ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver-el6.sh) set(CPACK_RPM_SERVER-EL7_POST_INSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh) diff --git a/packaging/rpm/scripts/postserver-el6.sh b/packaging/rpm/scripts/postserver-el6.sh new file mode 100644 index 0000000000..e5114049b3 --- /dev/null +++ b/packaging/rpm/scripts/postserver-el6.sh @@ -0,0 +1,20 @@ +if [ $1 -eq 1 ]; then + if [ ! -f /etc/foundationdb/fdb.cluster ]; then + description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8) + random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8) + echo $description:$random_str@127.0.0.1:4500 > /etc/foundationdb/fdb.cluster + chown foundationdb:foundationdb /etc/foundationdb/fdb.cluster + chmod 0664 /etc/foundationdb/fdb.cluster + NEWDB=1 + fi + + /sbin/chkconfig --add foundationdb >/dev/null 2>&1 + /sbin/service foundationdb start >/dev/null 2>&1 + + if [ "$NEWDB" != "" ]; then + /usr/bin/fdbcli -C /etc/foundationdb/fdb.cluster --exec "configure new single memory" --timeout 20 >/dev/null 2>&1 + fi +else + /sbin/service foundationdb condrestart >/dev/null 2>&1 +fi +exit 0 diff --git a/packaging/rpm/scripts/postserver.sh b/packaging/rpm/scripts/postserver.sh index 9fe2ee1e12..cad72803e3 100644 --- a/packaging/rpm/scripts/postserver.sh +++ b/packaging/rpm/scripts/postserver.sh @@ -3,30 +3,19 @@ if [ $1 -eq 1 ]; then description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8) random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8) echo $description:$random_str@127.0.0.1:4500 > /etc/foundationdb/fdb.cluster - chown foundationdb:foundationdb /etc/foundationdb/fdb.cluster + chown foundationdb:foundationdb /etc/foundationdb/fdb.cluster chmod 0664 /etc/foundationdb/fdb.cluster NEWDB=1 fi - if pidof systemd > /dev/null - then - /usr/bin/systemctl enable foundationdb >/dev/null 2>&1 - /usr/bin/systemctl start foundationdb >/dev/null 2>&1 - else - /sbin/chkconfig --add foundationdb >/dev/null 2>&1 - /sbin/service foundationdb start >/dev/null 2>&1 - fi + /usr/bin/systemctl enable foundationdb >/dev/null 2>&1 + /usr/bin/systemctl start foundationdb >/dev/null 2>&1 if [ "$NEWDB" != "" ]; then /usr/bin/fdbcli -C /etc/foundationdb/fdb.cluster --exec "configure new single memory" --timeout 20 >/dev/null 2>&1 fi else - if pidof systemd > /dev/null - then - /usr/bin/systemctl condrestart foundationdb >/dev/null 2>&1 - else - /sbin/service foundationdb condrestart >/dev/null 2>&1 - fi + /usr/bin/systemctl condrestart foundationdb >/dev/null 2>&1 fi exit 0 From 668eaeb8aecf01045346bf8362bcaffddd4bc968 Mon Sep 17 00:00:00 2001 From: mpilman Date: Sun, 3 Mar 2019 13:40:15 -0800 Subject: [PATCH 52/71] Packages tested and all seems working --- build/cmake/build.sh | 2 +- build/cmake/centos6-test/Dockerfile | 2 +- build/cmake/debian-test/Dockerfile | 1 + build/cmake/package_tester/modules/docker.sh | 121 ++++++++++++------- cmake/InstallLayout.cmake | 6 +- 5 files changed, 85 insertions(+), 47 deletions(-) diff --git a/build/cmake/build.sh b/build/cmake/build.sh index 72a76c8110..7bf437221d 100644 --- a/build/cmake/build.sh +++ b/build/cmake/build.sh @@ -102,7 +102,7 @@ package() { local __res=0 for _ in 1 do - configure + build __res=$? if [ ${__res} -ne 0 ] then diff --git a/build/cmake/centos6-test/Dockerfile b/build/cmake/centos6-test/Dockerfile index 3a2f07a4f8..a12d3cd2a1 100644 --- a/build/cmake/centos6-test/Dockerfile +++ b/build/cmake/centos6-test/Dockerfile @@ -1,3 +1,3 @@ FROM centos:6 -RUN yum install -y yum-utils +RUN yum install -y yum-utils upstart initscripts diff --git a/build/cmake/debian-test/Dockerfile b/build/cmake/debian-test/Dockerfile index 94ecd6dbef..8e9dd8c246 100644 --- a/build/cmake/debian-test/Dockerfile +++ b/build/cmake/debian-test/Dockerfile @@ -1,3 +1,4 @@ FROM ubuntu:16.04 RUN apt-get update +RUN apt-get install -y systemd diff --git a/build/cmake/package_tester/modules/docker.sh b/build/cmake/package_tester/modules/docker.sh index 92e3508f5b..a4341d47a8 100644 --- a/build/cmake/package_tester/modules/docker.sh +++ b/build/cmake/package_tester/modules/docker.sh @@ -9,54 +9,55 @@ then failed_tests=() - docker_threads=() docker_ids=() + docker_threads=() + docker_logs=() + docker_error_logs=() docker_wait_any() { - # docker wait waits on all containers (unlike what is documented) - # so we need to do polling - success=0 - while [ "${success}" -eq 0 ] + local __res=0 + enterfun + while [ "${#docker_threads[@]}" -gt 0 ] do - for ((i=0;i<"${#docker_ids[@]}";++i)) + IFS=";" read -ra res <${pipe_file} + docker_id=${res[0]} + result=${res[1]} + i=0 + for (( idx=0; idx<${#docker_ids[@]}; idx++ )) do - docker_id="${docker_ids[$i]}" - status="$(docker ps -a -f id=${docker_id} --format '{{.Status}}' | awk '{print $1;}')" - if [ "${status}" = "Exited" ] + if [ "${docker_id}" = "${docker_ids[idx]}" ] then - success=1 - ret_code="$(docker wait ${docker_id})" - if [ "${ret_code}" -ne 0 ] - then - failed_tests+=( "${docker_threads[$i]}" ) - echo -e "${RED}Test failed: ${docker_threads[$i]} ${NC}" - else - echo -e "${GREEN}Test succeeded: ${docker_threads[$i]} ${NC}" - fi - # remove it - n=$((i+1)) - docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" ) - docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" ) - # prune - if [ "${pruning_strategy}" = "ALL" ] - then - docker container rm "${docker_id}" > /dev/null - elif [ "${ret_code}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ] - then - docker container rm "${docker_id}" > /dev/null - elif [ "${ret_code}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ] - then - docker container rm "${docker_id}" > /dev/null - fi + i=idx + break fi done - sleep 1 + if [ "${result}" -eq 0 ] + then + echo -e "${GREEN}Test succeeded: ${docker_threads[$i]}" + echo -e "\tDocker-ID: ${docker_ids[$i]} " + echo -e "\tLog-File: ${docker_logs[$i]}" + echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}" + else + echo -e "${RED}Test FAILED: ${docker_threads[$i]}" + echo -e "\tDocker-ID: ${docker_ids[$i]} " + echo -e "\tLog-File: ${docker_logs[$i]}" + echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}" + failed_tests+=( "${docker_threads[$i]}" ) + fi + n=$((i+1)) + docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" ) + docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" ) + docker_logs=( "${docker_logs[@]:0:$i}" "${docker_logs[@]:$n}" ) + docker_error_logs=( "${docker_error_logs[@]:0:$i}" "${docker_error_logs[@]:$n}" ) + break done + exitfun + return "${__res}" } docker_wait_all() { local __res=0 - while [ "${#docker_ids[@]}" -gt 0 ] + while [ "${#docker_threads[@]}" -gt 0 ] do docker_wait_any if [ "$?" -ne 0 ] @@ -85,6 +86,22 @@ then echo done log_dir="${fdb_build}/pkg_tester" + pipe_file="${fdb_build}/pkg_tester.pipe" + lock_file="${fdb_build}/pkg_tester.lock" + if [ -p "${pipe_file}" ] + then + rm "${pipe_file}" + successOr "Could not delete old pipe file" + fi + if [ -f "${lock_file}" ] + then + rm "${lock_file}" + successOr "Could not delete old pipe file" + fi + touch "${lock_file}" + successOr "Could not create lock file" + mkfifo "${pipe_file}" + successOr "Could not create pipe file" mkdir -p "${log_dir}" # setup the containers # TODO: shall we make this parallel as well? @@ -98,10 +115,9 @@ then else cd ${source_dir}/../${curr_location} fi - pwd - docker_logs="${log_dir}/docker_build_${curr_name}" - docker build . -t ${curr_name} 1> "${docker_logs}.log" 2> "${docker_logs}.err" - successOr "Building Docker image ${name} failed - see ${docker_logs}.log and ${docker_logs}.err" + docker_buid_logs="${log_dir}/docker_build_${curr_name}" + docker build . -t ${curr_name} 1> "${docker_buid_logs}.log" 2> "${docker_buid_logs}.err" + successOr "Building Docker image ${name} failed - see ${docker_buid_logs}.log and ${docker_buid_logs}.err" done if [ ! -z "${tests_to_run+x}"] then @@ -120,15 +136,36 @@ then docker_wait_any fi echo "Starting Test ${curr_name}/${curr_test}" + log_file="${log_dir}/${curr_name}_${curr_test}.log" + err_file="${log_dir}/${curr_name}_${curr_test}.err" docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\ -v "${fdb_build}:/build"\ - ${curr_name}\ - bash /foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]} ) + ${curr_name} /sbin/init ) + { + docker exec "${docker_id}" bash \ + /foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]}\ + 2> ${err_file} 1> ${log_file} + res=$? + if [ "${pruning_strategy}" = "ALL" ] + then + docker kill "${docker_id}" > /dev/null + elif [ "${res}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ] + then + docker kill "${docker_id}" > /dev/null + elif [ "${res}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ] + then + docker kill "${docker_id}" > /dev/null + fi + flock "${lock_file}" echo "${docker_id};${res}" >> "${pipe_file}" + } & docker_ids+=( "${docker_id}" ) - docker_threads+=( "${PKG} - ${t} (ID: ${docker_id})" ) + docker_threads+=( "${curr_name}/${curr_test}" ) + docker_logs+=( "${log_file}" ) + docker_error_logs+=( "${err_file}" ) done done docker_wait_all + rm ${pipe_file} if [ "${#failed_tests[@]}" -eq 0 ] then echo -e "${GREEN}SUCCESS${NC}" diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index d54fc9cec9..e7a46799b7 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -135,9 +135,9 @@ function(fdb_install) install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el7") elseif("${IN_DESTINATION}" STREQUAL "data") install(${args} DESTINATION "lib/foundationdb" COMPONENT "${IN_COMPONENT}-tgz") - install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb") - install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6") - install(${args} DESTINATION "var/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el7") + install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-deb") + install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el6") + install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el7") endif() endif() endfunction() From 4891d31cc0afc87df44ffe984ca3286421d64f0c Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Thu, 7 Mar 2019 18:31:18 -0800 Subject: [PATCH 53/71] Remove a broken ASSERT. It's now totally valid to have: permits=3 take(1) take(2) take(72) and the take(72) will only be granted once the first two finish. --- flow/genericactors.actor.h | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/genericactors.actor.h b/flow/genericactors.actor.h index d32c4da1f5..b591137c4a 100644 --- a/flow/genericactors.actor.h +++ b/flow/genericactors.actor.h @@ -1218,7 +1218,6 @@ struct FlowLock : NonCopyable, public ReferenceCounted { explicit FlowLock(int64_t permits) : permits(permits), active(0) {} Future take(int taskID = TaskDefaultYield, int64_t amount = 1) { - ASSERT(amount <= permits || active == 0); if (active + amount <= permits || active == 0) { active += amount; return safeYieldActor(this, taskID, amount); From 426da90493a0a9ff2e8fef47ca105e4e42679058 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 8 Mar 2019 11:20:54 -0800 Subject: [PATCH 54/71] Add 8192 bit fast allocator StorageServer is getting close to 4KB on older gccs. However, on some systems (for example Fedora) the storageServer actor is already larger than 4KB. This results in a linker error. --- flow/FastAlloc.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/flow/FastAlloc.cpp b/flow/FastAlloc.cpp index 4f9adf70de..12ada3e743 100644 --- a/flow/FastAlloc.cpp +++ b/flow/FastAlloc.cpp @@ -224,7 +224,8 @@ static int64_t getSizeCode(int i) { case 1024: return 7; case 2048: return 8; case 4096: return 9; - default: return 10; + case 8192: return 10; + default: return 11; } } @@ -476,6 +477,7 @@ void releaseAllThreadMagazines() { FastAllocator<1024>::releaseThreadMagazines(); FastAllocator<2048>::releaseThreadMagazines(); FastAllocator<4096>::releaseThreadMagazines(); + FastAllocator<8192>::releaseThreadMagazines(); } int64_t getTotalUnusedAllocatedMemory() { @@ -490,6 +492,7 @@ int64_t getTotalUnusedAllocatedMemory() { unusedMemory += FastAllocator<1024>::getApproximateMemoryUnused(); unusedMemory += FastAllocator<2048>::getApproximateMemoryUnused(); unusedMemory += FastAllocator<4096>::getApproximateMemoryUnused(); + unusedMemory += FastAllocator<8192>::getApproximateMemoryUnused(); return unusedMemory; } @@ -503,4 +506,5 @@ template class FastAllocator<512>; template class FastAllocator<1024>; template class FastAllocator<2048>; template class FastAllocator<4096>; +template class FastAllocator<8192>; From f59356274321e2a64bf95e0455a8590936d871c6 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 8 Mar 2019 12:34:20 -0800 Subject: [PATCH 55/71] use 8KB pages in Arena --- flow/Arena.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flow/Arena.h b/flow/Arena.h index 5433c28490..ef5f77bda9 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -227,7 +227,8 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted else if (reqSize <= 512) { b = (ArenaBlock*)FastAllocator<512>::allocate(); b->bigSize = 512; INSTRUMENT_ALLOCATE("Arena512"); } else if (reqSize <= 1024) { b = (ArenaBlock*)FastAllocator<1024>::allocate(); b->bigSize = 1024; INSTRUMENT_ALLOCATE("Arena1024"); } else if (reqSize <= 2048) { b = (ArenaBlock*)FastAllocator<2048>::allocate(); b->bigSize = 2048; INSTRUMENT_ALLOCATE("Arena2048"); } - else { b = (ArenaBlock*)FastAllocator<4096>::allocate(); b->bigSize = 4096; INSTRUMENT_ALLOCATE("Arena4096"); } + else if (reqSize <= 4096) { b = (ArenaBlock*)FastAllocator<4096>::allocate(); b->bigSize = 4096; INSTRUMENT_ALLOCATE("Arena4096"); } + else { b = (ArenaBlock*)FastAllocator<8192>::allocate(); b->bigSize = 8192; INSTRUMENT_ALLOCATE("Arena8192"); } b->tinySize = b->tinyUsed = NOT_TINY; b->bigUsed = sizeof(ArenaBlock); } else { @@ -269,6 +270,7 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted else if (bigSize <= 1024) { FastAllocator<1024>::release(this); INSTRUMENT_RELEASE("Arena1024"); } else if (bigSize <= 2048) { FastAllocator<2048>::release(this); INSTRUMENT_RELEASE("Arena2048"); } else if (bigSize <= 4096) { FastAllocator<4096>::release(this); INSTRUMENT_RELEASE("Arena4096"); } + else if (bigSize <= 8192) { FastAllocator<8192>::release(this); INSTRUMENT_RELEASE("Arena8192"); } else { #ifdef ALLOC_INSTRUMENTATION allocInstr[ "ArenaHugeKB" ].dealloc( (bigSize+1023)>>10 ); From ebffe8c633c93cf6ee6664ecc454be4cc26f692d Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 8 Mar 2019 12:37:04 -0800 Subject: [PATCH 56/71] print correct pahes in alloc instrumentation --- fdbbackup/backup.actor.cpp | 3 ++- fdbserver/fdbserver.actor.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index d4fc79cc09..af1d8fffaa 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -3318,7 +3318,8 @@ int main(int argc, char* argv[]) { << FastAllocator<512>::pageCount << " " << FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " " - << FastAllocator<4096>::pageCount << endl; + << FastAllocator<4096>::pageCount << " " + << FastAllocator<8192>::pageCount << endl; vector< std::pair > typeNames; for( auto i = allocInstr.begin(); i != allocInstr.end(); ++i ) { diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index f208eb92d8..6a8d6b0320 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -1749,7 +1749,8 @@ int main(int argc, char* argv[]) { << FastAllocator<512>::pageCount << " " << FastAllocator<1024>::pageCount << " " << FastAllocator<2048>::pageCount << " " - << FastAllocator<4096>::pageCount << std::endl; + << FastAllocator<4096>::pageCount << " " + << FastAllocator<8192>::pageCount << std::endl; vector< std::pair > typeNames; for( auto i = allocInstr.begin(); i != allocInstr.end(); ++i ) { From 657c11b00b63876121d48849087110bc5dbaeac9 Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Thu, 7 Mar 2019 14:51:59 -0800 Subject: [PATCH 57/71] Add snowflake-ci to docker-compose This is intended to be used for easy reproduction of failures found by snowflake's planned CI. --- build/cmake/build.sh | 8 ++++---- build/cmake/docker-compose.yml | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/build/cmake/build.sh b/build/cmake/build.sh index 7bf437221d..99973b8d0e 100644 --- a/build/cmake/build.sh +++ b/build/cmake/build.sh @@ -38,7 +38,7 @@ configure() { local __res=0 for _ in 1 do - cmake ../foundationdb + cmake ../foundationdb ${CMAKE_EXTRA_ARGS} __res=$? if [ ${__res} -ne 0 ] then @@ -122,7 +122,7 @@ rpm() { local __res=0 for _ in 1 do - cmake ../foundationdb + configure __res=$? if [ ${__res} -ne 0 ] then @@ -149,7 +149,7 @@ deb() { local __res=0 for _ in 1 do - cmake ../foundationdb + configure __res=$? if [ ${__res} -ne 0 ] then @@ -175,7 +175,7 @@ test-fast() { local __res=0 for _ in 1 do - ctest -j`nproc` + ctest -j`nproc` ${CTEST_EXTRA_ARGS} __res=$? if [ ${__res} -ne 0 ] then diff --git a/build/cmake/docker-compose.yml b/build/cmake/docker-compose.yml index b85962e15b..e41d05abc2 100644 --- a/build/cmake/docker-compose.yml +++ b/build/cmake/docker-compose.yml @@ -59,6 +59,10 @@ services: <<: *build-setup command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test + snowflake-ci: &snowflake-ci + <<: *build-setup + command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package test-fast + shell: <<: *build-setup command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash From 121ed4acf90b3306ffb4beb1a5c1421c42c2dce2 Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Thu, 7 Mar 2019 15:04:21 -0800 Subject: [PATCH 58/71] Trace the cmake SEED This will be included in the repro instructions, so we need to trace it to keep track of it. --- tests/TestRunner/TestRunner.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/TestRunner/TestRunner.py b/tests/TestRunner/TestRunner.py index c8e54b7bc4..880a3cffa8 100755 --- a/tests/TestRunner/TestRunner.py +++ b/tests/TestRunner/TestRunner.py @@ -242,7 +242,7 @@ def get_traces(d, log_format): return traces -def process_traces(basedir, testname, path, out, aggregationPolicy, log_format, return_codes): +def process_traces(basedir, testname, path, out, aggregationPolicy, log_format, return_codes, cmake_seed): res = True backtraces = [] parser = None @@ -261,6 +261,7 @@ def process_traces(basedir, testname, path, out, aggregationPolicy, log_format, parser.fail() parser.processTraces() res = res and parser.result + parser.writeObject({'CMakeSEED': str(cmake_seed)}) return res def run_simulation_test(basedir, options): @@ -314,14 +315,14 @@ def run_simulation_test(basedir, options): if options.aggregate_traces == 'NONE': res = process_traces(basedir, options.name, wd, None, 'NONE', - options.log_format, return_codes) + options.log_format, return_codes, options.seed) else: with open(outfile, 'a') as f: os.lockf(f.fileno(), os.F_LOCK, 0) pos = f.tell() res = process_traces(basedir, options.name, wd, f, options.aggregate_traces, - options.log_format, return_codes) + options.log_format, return_codes, options.seed) f.seek(pos) os.lockf(f.fileno(), os.F_ULOCK, 0) if options.keep_logs == 'NONE' or options.keep_logs == 'FAILED' and res: From b9acc9a0e89e261626369ce9edd67c717dc761b7 Mon Sep 17 00:00:00 2001 From: Balachandar Namasivayam Date: Fri, 8 Mar 2019 15:13:11 -0800 Subject: [PATCH 59/71] Code refactor to fix windows msvc14 compiler errors. --- fdbclient/FileBackupAgent.actor.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fdbclient/FileBackupAgent.actor.cpp b/fdbclient/FileBackupAgent.actor.cpp index 6ef06cb4ef..8ea7dc5a60 100644 --- a/fdbclient/FileBackupAgent.actor.cpp +++ b/fdbclient/FileBackupAgent.actor.cpp @@ -3651,8 +3651,9 @@ public: oldRestore.clear(tr); } - for (auto &restoreRange : restoreRanges) { - KeyRange restoreIntoRange = KeyRangeRef(restoreRange.begin, restoreRange.end).removePrefix(removePrefix).withPrefix(addPrefix); + state int index; + for (index = 0; index < restoreRanges.size(); index++) { + KeyRange restoreIntoRange = KeyRangeRef(restoreRanges[index].begin, restoreRanges[index].end).removePrefix(removePrefix).withPrefix(addPrefix); Standalone existingRows = wait(tr->getRange(restoreIntoRange, 1)); if (existingRows.size() > 0) { throw restore_destination_not_empty(); From 59f598ab513653280c922c3b1adfe71d73e006a8 Mon Sep 17 00:00:00 2001 From: Alex Miller Date: Fri, 8 Mar 2019 21:46:32 -0800 Subject: [PATCH 60/71] Try adding a header? --- flow/Platform.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/Platform.cpp b/flow/Platform.cpp index c0c9aecb44..8d358229b8 100644 --- a/flow/Platform.cpp +++ b/flow/Platform.cpp @@ -49,6 +49,7 @@ #ifdef _WIN32 #define NOMINMAX #include +#include #include #include #include From 806655675381eccb67ae5a6c2acb69aabd84083a Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Sat, 9 Mar 2019 10:48:22 -0500 Subject: [PATCH 61/71] address review comments and bugs after running binding tester compared to python bindings --- bindings/go/src/fdb/tuple/tuple.go | 38 +++++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index d3f289e2eb..5cc1990cc5 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -76,7 +76,7 @@ type UUID [16]byte // Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are // 12 bytes long composed of a 10 byte transaction version and a 2 byte user // version. The transaction version is filled in at commit time and the user -// version is provided by your layer during a transaction. +// version is provided by the application to order results within a transaction. type Versionstamp struct { TransactionVersion [10]byte UserVersion uint16 @@ -295,9 +295,12 @@ func (p *packer) encodeUUID(u UUID) { func (p *packer) encodeVersionstamp(v Versionstamp) { p.putByte(versionstampCode) - if p.versionstampPos != -1 && v.TransactionVersion == incompleteTransactionVersion { - panic(fmt.Sprintf("Tuple can only contain one incomplete versionstamp")) - } else { + isIncomplete := v.TransactionVersion == incompleteTransactionVersion + if isIncomplete { + if p.versionstampPos != -1 { + panic(fmt.Sprintf("Tuple can only contain one incomplete versionstamp")) + } + p.versionstampPos = int32(len(p.buf)) } @@ -363,9 +366,9 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { // Pack returns a new byte slice encoding the provided tuple. Pack will panic if // the tuple contains an element of any type other than []byte, // fdb.KeyConvertible, string, int64, int, uint64, uint, *big.Int, big.Int, float32, -// float64, bool, tuple.UUID, nil, or a Tuple with elements of valid types. It will -// also panic if an integer is specified with a value outside the range -// [-2**2040+1, 2**2040-1] +// float64, bool, tuple.UUID, tuple.Versionstamp, nil, or a Tuple with elements of +// valid types. It will also panic if an integer is specified with a value outside +// the range [-2**2040+1, 2**2040-1] // // Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to // call Pack when using a Tuple with a FoundationDB API function that requires a @@ -384,22 +387,25 @@ func (t Tuple) Pack() []byte { // operations. See Pack for more information. This function will return an error // if you attempt to pack a tuple with more than one versionstamp. This function will // return an error if you attempt to pack a tuple with a versionstamp position larger -// than an uint16 on apiVersion < 520. +// than an uint16 if the API version is less than 520. func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) { hasVersionstamp, err := t.HasIncompleteVersionstamp() if err != nil { return nil, err } + apiVersion, err := fdb.GetAPIVersion() + if err != nil { + return nil, err + } + if hasVersionstamp == false { return nil, errors.New("No incomplete versionstamp included in tuple pack with versionstamp") } p := newPacker() - prefixLength := int32(0) if prefix != nil { - prefixLength = int32(len(prefix)) p.putBytes(prefix) } @@ -408,18 +414,16 @@ func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) { if hasVersionstamp { var scratch [4]byte var offsetIndex int - - apiVersion := fdb.MustGetAPIVersion() if apiVersion < 520 { if p.versionstampPos > math.MaxUint16 { return nil, errors.New("Versionstamp position too large") } - offsetIndex = 1 - binary.LittleEndian.PutUint16(scratch[:], uint16(prefixLength+p.versionstampPos)) + offsetIndex = 2 + binary.LittleEndian.PutUint16(scratch[:], uint16(p.versionstampPos)) } else { - offsetIndex = 3 - binary.LittleEndian.PutUint32(scratch[:], uint32(prefixLength+p.versionstampPos)) + offsetIndex = 4 + binary.LittleEndian.PutUint32(scratch[:], uint32(p.versionstampPos)) } p.putBytes(scratch[0:offsetIndex]) @@ -439,7 +443,7 @@ func (t Tuple) HasIncompleteVersionstamp() (bool, error) { err = errors.New("Tuple can only contain one incomplete versionstamp") } - return incompleteCount == 1, err + return incompleteCount >= 1, err } func (t Tuple) countIncompleteVersionstamps() int { From 92167fd03f4d78b064328939adc73b5f91697c86 Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Sat, 9 Mar 2019 11:11:22 -0500 Subject: [PATCH 62/71] handle incomplete versionstamp attempting to be packed into vanilla tuple --- bindings/go/src/fdb/tuple/tuple.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index 5cc1990cc5..a37ce5f3e8 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -307,7 +307,7 @@ func (p *packer) encodeVersionstamp(v Versionstamp) { p.putBytes(v.Bytes()) } -func (p *packer) encodeTuple(t Tuple, nested bool) { +func (p *packer) encodeTuple(t Tuple, nested bool, versionstamps bool) { if nested { p.putByte(nestedCode) } @@ -315,7 +315,7 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { for i, e := range t { switch e := e.(type) { case Tuple: - p.encodeTuple(e, true) + p.encodeTuple(e, true, versionstamps) case nil: p.putByte(nilCode) if nested { @@ -352,6 +352,10 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { case UUID: p.encodeUUID(e) case Versionstamp: + if versionstamps == false && e.TransactionVersion == incompleteTransactionVersion { + panic(fmt.Sprintf("Incomplete Versionstamp included in vanilla tuple pack")) + } + p.encodeVersionstamp(e) default: panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i])) @@ -379,7 +383,7 @@ func (p *packer) encodeTuple(t Tuple, nested bool) { // func (t Tuple) Pack() []byte { p := newPacker() - p.encodeTuple(t, false) + p.encodeTuple(t, false, false) return p.buf } @@ -409,7 +413,7 @@ func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) { p.putBytes(prefix) } - p.encodeTuple(t, false) + p.encodeTuple(t, false, true) if hasVersionstamp { var scratch [4]byte From c3043971663b8b443e1213e8b2aadee39127a180 Mon Sep 17 00:00:00 2001 From: Evan Tschannen Date: Sun, 10 Mar 2019 21:09:47 -0700 Subject: [PATCH 63/71] fixed a memory leak when allocating memory > 4K and < 8K --- flow/Arena.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/Arena.h b/flow/Arena.h index ef5f77bda9..b29b244f0c 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -116,7 +116,7 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted { enum { SMALL = 64, - LARGE = 4097 // If size == used == LARGE, then use hugeSize, hugeUsed + LARGE = 8193 // If size == used == LARGE, then use hugeSize, hugeUsed }; enum { NOT_TINY = 255, TINY_HEADER = 6 }; From 314e87edfb74d1c0c9e6bb4e0604e62df7d8742e Mon Sep 17 00:00:00 2001 From: Jingyu Zhou Date: Mon, 11 Mar 2019 11:08:50 -0700 Subject: [PATCH 64/71] Create tar.gz file for python package On Windows, the default package format can be zip, while tar.gz is expected. --- bindings/python/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt index 57259e7337..796185729e 100644 --- a/bindings/python/CMakeLists.txt +++ b/bindings/python/CMakeLists.txt @@ -62,7 +62,7 @@ endif() set(package_file_name foundationdb-${FDB_VERSION}.tar.gz) set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name}) add_custom_command(OUTPUT ${package_file} - COMMAND $ setup.py sdist && + COMMAND $ setup.py sdist --formats=gztar && ${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "Create Python sdist package") From d90da27ee5f8d345ca4e47cd75eda97223f5218a Mon Sep 17 00:00:00 2001 From: Ryan Worl Date: Mon, 11 Mar 2019 14:33:32 -0400 Subject: [PATCH 65/71] Add Go to list of supported bindings for `set_versionstamped_key` in the Tuple layer. --- documentation/sphinx/source/api-common.rst.inc | 4 ++-- fdbclient/vexillographer/fdb.options | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/documentation/sphinx/source/api-common.rst.inc b/documentation/sphinx/source/api-common.rst.inc index 80f26fcd9b..6ce102359f 100644 --- a/documentation/sphinx/source/api-common.rst.inc +++ b/documentation/sphinx/source/api-common.rst.inc @@ -142,10 +142,10 @@ A transaction is not permitted to read any transformed key or value previously set within that transaction, and an attempt to do so will result in an error. .. |atomic-versionstamps-tuple-warning-key| replace:: - At this time, versionstamped keys are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages. + At this time, versionstamped keys are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages. .. |atomic-versionstamps-tuple-warning-value| replace:: - At this time, versionstamped values are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages. + At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages. .. |api-version| replace:: 610 diff --git a/fdbclient/vexillographer/fdb.options b/fdbclient/vexillographer/fdb.options index d3161632b1..2194139224 100644 --- a/fdbclient/vexillographer/fdb.options +++ b/fdbclient/vexillographer/fdb.options @@ -247,10 +247,10 @@ description is not currently required but encouraged. description="Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database."/>