Initial repository commit
This commit is contained in:
commit
a674cb4ef4
|
@ -0,0 +1,89 @@
|
|||
# Build artifacts
|
||||
/bin/
|
||||
/lib/
|
||||
/packages/
|
||||
bindings/flow/bin/
|
||||
bindings/java/foundationdb-client*.jar
|
||||
bindings/java/foundationdb-tests*.jar
|
||||
bindings/java/fdb-java-*-sources.jar
|
||||
bindings/nodejs/build/
|
||||
bindings/nodejs/modules/
|
||||
packaging/msi/FDBInstaller.msi
|
||||
|
||||
# Generated source, build, and packaging files
|
||||
*.g.cpp
|
||||
*.g.h
|
||||
*.g.S
|
||||
*.g.asm
|
||||
*.pom
|
||||
bindings/go/src/fdb/generated.go
|
||||
bindings/java/pom*.xml
|
||||
bindings/java/src*/main/overview.html
|
||||
bindings/java/src*/main/com/apple/foundationdb/*Options.java
|
||||
bindings/java/src*/main/com/apple/foundationdb/StreamingMode.java
|
||||
bindings/java/src*/main/com/apple/foundationdb/MutationType.java
|
||||
bindings/java/src*/main/com/apple/foundationdb/ConflictRangeType.java
|
||||
bindings/java/src*/main/com/apple/foundationdb/FDBException.java
|
||||
bindings/nodejs/package.json
|
||||
bindings/python/fdb/fdb*options.py
|
||||
bindings/python/dist/
|
||||
bindings/python/setup.py
|
||||
bindings/python/MANIFEST
|
||||
bindings/ruby/lib/fdboptions.rb
|
||||
bindings/ruby/fdb.gemspec
|
||||
fdbclient/vexillographer/obj/
|
||||
fdbrpc/hgVersion*.h
|
||||
fdbrpc/libeio/config.h
|
||||
flow/hgVersion*.h
|
||||
generated.mk
|
||||
versions.h
|
||||
packaging/msi/FDBInstaller.wix*
|
||||
|
||||
# Intermediate build artifacts and dependencies
|
||||
*.class
|
||||
*.py[cod]
|
||||
*.sass-cache
|
||||
.ccache
|
||||
.deps/
|
||||
.objs/
|
||||
bindings/c/fdb_c.symbols
|
||||
bindings/go/build
|
||||
bindings/go/godoc
|
||||
bindings/java/.classstamp*
|
||||
bindings/java/classes*/
|
||||
bindings/java/javadoc*/
|
||||
bindings/nodejs/fdb_node.stamp
|
||||
bindings/nodejs/node_modules/
|
||||
|
||||
# Testing and logging
|
||||
bindings/nodejs/fdb_node*.log
|
||||
bindings/nodejs/npm-debug.log
|
||||
packaging/msi/*.log
|
||||
packaging/msi/obj
|
||||
simfdb
|
||||
tests/oldBinaries
|
||||
trace.*.xml
|
||||
|
||||
# Editor files
|
||||
*.iml
|
||||
*.opensdf
|
||||
*.sdf
|
||||
*.suo
|
||||
*.user
|
||||
.idea/
|
||||
.project
|
||||
.pydevproject
|
||||
.vscode
|
||||
FoundationDB.xcodeproj
|
||||
foundationdb.VC.db
|
||||
foundationdb.VC.VC.opendb
|
||||
ipch/
|
||||
|
||||
# Temporary and user configuration files
|
||||
*~
|
||||
*.orig
|
||||
*.rej
|
||||
*.swp
|
||||
.envrc
|
||||
.DS_Store
|
||||
temp/
|
|
@ -0,0 +1,459 @@
|
|||
Acknowledgements
|
||||
Portions of this FoundationDB Software may utilize the following copyrighted
|
||||
material, the use of which is hereby acknowledged.
|
||||
|
||||
_____________________
|
||||
|
||||
Mark Adler, Robert Važan (CRC-32C [Castagnoli] for C++ and .NET)
|
||||
This license covers C++ code of the CRC-32C library as well as binaries generated
|
||||
from it. It is standard zlib license. .NET code is distributed under BSD license.
|
||||
|
||||
Copyright (c) 2013 - 2014 Mark Adler, Robert Važan
|
||||
|
||||
This software is provided ‘as-is’, without any express or implied warranty. In no
|
||||
event will the author be held liable for any damages arising from the use of this
|
||||
software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose, including
|
||||
commercial applications, and to alter it and redistribute it freely, subject to
|
||||
the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that
|
||||
you wrote the original software. If you use this software in a product, an
|
||||
acknowledgment in the product documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Steven J. Bethard (argparse.py from https://code.google.com/p/argparse/)
|
||||
argparse is licensed under the Python license, see:
|
||||
https://code.google.com/p/argparse/source/browse/LICENSE.txt and
|
||||
https://code.google.com/p/argparse/source/browse/doc/source/Python-License.txt
|
||||
|
||||
Russ Cox (asm.S from libcoroutine)
|
||||
This software was developed as part of a project at MIT.
|
||||
|
||||
Copyright (c) 2005-2007 Russ Cox,
|
||||
Massachusetts Institute of Technology
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Beman Dawes, Christopher M. Kohlhoff (Boost)
|
||||
The Boost License (http://www.boost.org/users/license.html) does not require
|
||||
attribution or republication when Boost is redistributed in object code form.
|
||||
|
||||
Steve Dekorte (libcoroutine)
|
||||
Copyright (c) 2002, 2003 Steve Dekorte
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
¥ Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
¥ Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
¥ Neither the name of the author nor the names of other contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Jean-loup Gailly, Mark Adler (zlib)
|
||||
Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Jean-loup Gailly Mark Adler
|
||||
jloup@gzip.org madler@alumni.caltech.edu
|
||||
|
||||
The Go Authors (Go Tools)
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Bob Jenkins (lookup3.c)
|
||||
By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. You may use this
|
||||
code any way you wish, private, educational, or commercial. Its free.
|
||||
|
||||
JS Foundation and other contributors (JQuery)
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Marcin Kalicinski (RapidXML)
|
||||
Copyright (c) 2006, 2007 Marcin Kalicinski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
|
||||
Marc Alexander Lehmann (Libeio)
|
||||
All files in libeio are Copyright (C)2007,2008 Marc Alexander Lehmann.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Microsoft Corporation (CppWindowsService)
|
||||
Microsoft Public License (Ms-PL)
|
||||
|
||||
This license governs use of the accompanying software. If you use the software,
|
||||
you accept this license. If you do not accept the license, do not use the
|
||||
software.
|
||||
|
||||
Definitions
|
||||
The terms "reproduce," "reproduction," "derivative works," and "distribution"
|
||||
have the same meaning here as under U.S. copyright law.
|
||||
|
||||
A "contribution" is the original software, or any additions or changes to the
|
||||
software.
|
||||
A "contributor" is any person that distributes its contribution under this
|
||||
license.
|
||||
"Licensed patents" are a contributor's patent claims that read directly on its
|
||||
contribution.
|
||||
|
||||
Grant of Rights
|
||||
(A) Copyright Grant- Subject to the terms of this license, including the license
|
||||
conditions and limitations in section 3, each contributor grants you a
|
||||
non-exclusive, worldwide, royalty-free copyright license to reproduce its
|
||||
contribution, prepare derivative works of its contribution, and distribute its
|
||||
contribution or any derivative works that you create.
|
||||
(B) Patent Grant- Subject to the terms of this license, including the license
|
||||
conditions and limitations in section 3, each contributor grants you a
|
||||
non-exclusive, worldwide, royalty-free license under its licensed patents to
|
||||
make, have made, use, sell, offer for sale, import, and/or otherwise dispose of
|
||||
its contribution in the software or derivative works of the contribution in the
|
||||
software.
|
||||
|
||||
Conditions and Limitations
|
||||
(A) No Trademark License- This license does not grant you rights to use any
|
||||
contributors' name, logo, or trademarks.
|
||||
(B) If you bring a patent claim against any contributor over patents that you
|
||||
claim are infringed by the software, your patent license from such contributor
|
||||
to the software ends automatically.
|
||||
(C) If you distribute any portion of the software, you must retain all
|
||||
copyright, patent, trademark, and attribution notices that are present in the
|
||||
software.
|
||||
(D) If you distribute any portion of the software in source code form, you may
|
||||
do so only under this license by including a complete copy of this license with
|
||||
your distribution. If you distribute any portion of the software in compiled or
|
||||
object code form, you may only do so under a license that complies with this
|
||||
license.
|
||||
(E) The software is licensed "as-is." You bear the risk of using it. The
|
||||
contributors give no express warranties, guarantees, or conditions. You may have
|
||||
additional consumer rights under your local laws which this license cannot
|
||||
change. To the extent permitted under your local laws, the contributors exclude
|
||||
the implied warranties of merchantability, fitness for a particular purpose and
|
||||
non-infringement.
|
||||
|
||||
Marcel Moolenaar (amd64-ucontext.h)
|
||||
Copyright (c) 1999 Marcel Moolenaar
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer
|
||||
in this position and unchanged.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Alexander Peslyak (md5)
|
||||
Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
|
||||
general public under the following terms:
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted.
|
||||
|
||||
There's ABSOLUTELY NO WARRANTY, express or implied.
|
||||
|
||||
Steve Reid, Bruce Guenter, Volker Grabsch
|
||||
100% Public Domain.
|
||||
|
||||
Salvatore Sanfilippo, Pieter Noordhuis (Linenoise)
|
||||
Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
|
||||
Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
SQLite authors (SQLite)
|
||||
All of the code and documentation in SQLite has been dedicated to the public
|
||||
domain by the authors. All code authors, and representatives of the companies
|
||||
they work for, have signed affidavits dedicating their contributions to the
|
||||
public domain and originals of those signed affidavits are stored in a
|
||||
firesafe at the main offices of Hwaci. Anyone is free to copy, modify, publish,
|
||||
use, compile, sell, or distribute the original SQLite code, either in source
|
||||
code form or as a compiled binary, for any purpose, commercial or
|
||||
non-commercial, and by any means.
|
||||
|
||||
The Abseil Authors (Abseil)
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
Brodie Thiesfield (SimpleIni, SimpleOpt)
|
||||
Copyright (c) 2006-2012, Brodie Thiesfield
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
THL A29 Limited, Milo Yip (RapidJSON)
|
||||
Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
|
||||
Licensed under the MIT License (the "License"); you may not use this file except
|
||||
in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
http://opensource.org/licenses/MIT
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed
|
||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Unicode, Inc. (ConvertUTF from SimpleIni)
|
||||
Copyright 2001-2004 Unicode, Inc.
|
||||
|
||||
Disclaimer
|
||||
|
||||
This source code is provided as is by Unicode, Inc. No claims are
|
||||
made as to fitness for any particular purpose. No warranties of any
|
||||
kind are expressed or implied. The recipient agrees to determine
|
||||
applicability of information provided. If this file has been
|
||||
purchased on magnetic or optical media from Unicode, Inc., the
|
||||
sole remedy for any claim will be exchange of defective media
|
||||
within 90 days of receipt.
|
||||
|
||||
Limitations on Rights to Redistribute This Code
|
||||
|
||||
Unicode, Inc. hereby grants the right to freely use the information
|
||||
supplied in this file in the creation of products supporting the
|
||||
Unicode Standard, and to make copies of this file in any form
|
||||
for internal or external distribution as long as this notice
|
||||
remains attached.
|
||||
|
||||
Chris Venter (libb64)
|
||||
The libb64 project has been placed in the public domain.
|
||||
|
||||
Dmitry Vyukov (Intrusive MPSC node-based queue)
|
||||
Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
John W. Wilkinson (JSON Spirit)
|
||||
Copyright (c) 2007 - 2010 John W. Wilkinson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,207 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
SOFTWARE DISTRIBUTED WITH FOUNDATIONDB:
|
||||
|
||||
The FoundationDB software includes a number of subcomponents with separate
|
||||
copyright notices and license terms - please see the file ACKNOWLEDGEMENTS.
|
||||
-------------------------------------------------------------------------------
|
|
@ -0,0 +1,192 @@
|
|||
export
|
||||
PLATFORM := $(shell uname)
|
||||
ARCH := $(shell uname -m)
|
||||
|
||||
TOPDIR := $(shell pwd)
|
||||
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH := x64
|
||||
else
|
||||
$(error Not prepared to compile on $(ARCH))
|
||||
endif
|
||||
|
||||
MONO := $(shell which mono)
|
||||
ifeq ($(MONO),)
|
||||
MONO := /usr/bin/mono
|
||||
endif
|
||||
|
||||
MCS := $(shell which dmcs)
|
||||
ifeq ($(MCS),)
|
||||
MCS := /usr/bin/dmcs
|
||||
endif
|
||||
|
||||
CFLAGS := -Werror -Wno-error=format -fPIC -DNO_INTELLISENSE -fvisibility=hidden -DNDEBUG=1 -Wreturn-type
|
||||
ifeq ($(RELEASE),true)
|
||||
CFLAGS += -DFDB_CLEAN_BUILD
|
||||
endif
|
||||
ifeq ($(NIGHTLY),true)
|
||||
CFLAGS += -DFDB_CLEAN_BUILD
|
||||
endif
|
||||
|
||||
ifeq ($(PLATFORM),Linux)
|
||||
PLATFORM := linux
|
||||
|
||||
CC ?= gcc
|
||||
CXX ?= g++
|
||||
|
||||
CXXFLAGS += -std=c++0x
|
||||
|
||||
BOOSTDIR ?= /opt/boost_1_52_0
|
||||
DLEXT := so
|
||||
java_DLEXT := so
|
||||
TARGET_LIBC_VERSION ?= 2.11
|
||||
else ifeq ($(PLATFORM),Darwin)
|
||||
PLATFORM := osx
|
||||
|
||||
CC := /usr/bin/clang
|
||||
CXX := /usr/bin/clang
|
||||
|
||||
CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++
|
||||
CXXFLAGS += -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
|
||||
.LIBPATTERNS := lib%.dylib lib%.a
|
||||
|
||||
BOOSTDIR ?= $(HOME)/boost_1_52_0
|
||||
DLEXT := dylib
|
||||
java_DLEXT := jnilib
|
||||
else
|
||||
$(error Not prepared to compile on platform $(PLATFORM))
|
||||
endif
|
||||
|
||||
CCACHE := $(shell which ccache)
|
||||
ifneq ($(CCACHE),)
|
||||
CCACHE_CC := $(CCACHE) $(CC)
|
||||
CCACHE_CXX := $(CCACHE) $(CXX)
|
||||
else
|
||||
CCACHE_CC := $(CC)
|
||||
CCACHE_CXX := $(CXX)
|
||||
endif
|
||||
|
||||
ACTORCOMPILER := bin/actorcompiler.exe
|
||||
|
||||
# UNSTRIPPED := 1
|
||||
|
||||
# Normal optimization level
|
||||
CFLAGS += -O2
|
||||
|
||||
# Or turn off optimization entirely
|
||||
# CFLAGS += -O0
|
||||
|
||||
# Debugging symbols are a good thing (and harmless, since we keep them
|
||||
# in external debug files)
|
||||
CFLAGS += -g
|
||||
|
||||
# valgrind-compatibile builds are enabled by uncommenting lines in valgind.mk
|
||||
|
||||
CXXFLAGS += -Wno-deprecated
|
||||
LDFLAGS :=
|
||||
LIBS :=
|
||||
STATIC_LIBS :=
|
||||
|
||||
# Add library search paths (that aren't -Llib) to the VPATH
|
||||
VPATH += $(addprefix :,$(filter-out lib,$(patsubst -L%,%,$(filter -L%,$(LDFLAGS)))))
|
||||
|
||||
CS_PROJECTS := flow/actorcompiler flow/coveragetool fdbclient/vexillographer
|
||||
CPP_PROJECTS := flow fdbrpc fdbclient fdbbackup fdbserver fdbcli bindings/c bindings/java fdbmonitor bindings/flow/tester bindings/flow
|
||||
OTHER_PROJECTS := bindings/python bindings/ruby bindings/nodejs bindings/go
|
||||
|
||||
CS_MK_GENERATED := $(CS_PROJECTS:=/generated.mk)
|
||||
CPP_MK_GENERATED := $(CPP_PROJECTS:=/generated.mk)
|
||||
|
||||
MK_GENERATED := $(CS_MK_GENERATED) $(CPP_MK_GENERATED)
|
||||
|
||||
# build/valgrind.mk needs to be included before any _MK_GENERATED (which in turn includes local.mk)
|
||||
MK_INCLUDE := build/scver.mk build/valgrind.mk $(CS_MK_GENERATED) $(CPP_MK_GENERATED) $(OTHER_PROJECTS:=/include.mk) build/packages.mk
|
||||
|
||||
ALL_MAKEFILES := Makefile $(MK_INCLUDE) $(patsubst %/generated.mk,%/local.mk,$(MK_GENERATED))
|
||||
|
||||
TARGETS =
|
||||
|
||||
.PHONY: clean all Makefiles
|
||||
|
||||
default: fdbserver fdbbackup fdbcli fdb_c fdb_python fdb_python_sdist
|
||||
|
||||
all: $(CS_PROJECTS) $(CPP_PROJECTS) $(OTHER_PROJECTS)
|
||||
|
||||
# These are always defined and ready to use. Any target that uses them and needs them up to date
|
||||
# should depend on versions.target
|
||||
VERSION := $(shell cat versions.target | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
|
||||
PACKAGE_NAME := $(shell cat versions.target | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
|
||||
|
||||
versions.h: Makefile versions.target
|
||||
@rm -f $@
|
||||
ifeq ($(RELEASE),true)
|
||||
@echo "#define FDB_VT_VERSION \"$(VERSION)\"" >> $@
|
||||
else
|
||||
@echo "#define FDB_VT_VERSION \"$(VERSION)-PRERELEASE\"" >> $@
|
||||
endif
|
||||
@echo "#define FDB_VT_PACKAGE_NAME \"$(PACKAGE_NAME)\"" >> $@
|
||||
|
||||
bindings: fdb_c fdb_python fdb_ruby fdb_java fdb_java-completable fdb_node fdb_flow fdb_flow_tester fdb_go fdb_go_tester
|
||||
|
||||
Makefiles: $(MK_GENERATED)
|
||||
|
||||
$(CS_MK_GENERATED): build/csprojtom4.py build/csproj.mk Makefile
|
||||
@echo "Creating $@"
|
||||
@python build/csprojtom4.py $(@D)/*.csproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.csproj .csproj` - build/csproj.mk > $(@D)/generated.mk
|
||||
|
||||
$(CPP_MK_GENERATED): build/vcxprojtom4.py build/vcxproj.mk Makefile
|
||||
@echo "Creating $@"
|
||||
@python build/vcxprojtom4.py $(@D)/*.vcxproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.vcxproj .vcxproj` - build/vcxproj.mk > $(@D)/generated.mk
|
||||
|
||||
DEPSDIR := .deps
|
||||
OBJDIR := .objs
|
||||
|
||||
include $(MK_INCLUDE)
|
||||
|
||||
clean: $(CLEAN_TARGETS)
|
||||
@echo "Cleaning toplevel"
|
||||
@rm -rf $(OBJDIR)
|
||||
@rm -rf $(DEPSDIR)
|
||||
@rm -rf lib/libstdc++.a
|
||||
@rm -rf bin/coverage.*.xml
|
||||
|
||||
targets:
|
||||
@echo "Available targets:"
|
||||
@for i in $(sort $(TARGETS)); do echo " $$i" ; done
|
||||
@echo "Append _clean to clean specific target."
|
||||
|
||||
lib/libstdc++.a: $(shell $(CC) -print-file-name=libstdc++_pic.a)
|
||||
@echo "Frobnicating $@"
|
||||
@mkdir -p lib
|
||||
@rm -rf .libstdc++
|
||||
@mkdir .libstdc++
|
||||
@(cd .libstdc++ && ar x $<)
|
||||
@for i in .libstdc++/*.o ; do \
|
||||
nm $$i | grep -q \@ || continue ; \
|
||||
nm $$i | awk '$$3 ~ /@@/ { COPY = $$3; sub(/@@.*/, "", COPY); print $$3, COPY; }' > .libstdc++/replacements ; \
|
||||
objcopy --redefine-syms=.libstdc++/replacements $$i $$i.new && mv $$i.new $$i ; \
|
||||
rm .libstdc++/replacements ; \
|
||||
nm $$i | awk '$$3 ~ /@/ { print $$3; }' > .libstdc++/deletes ; \
|
||||
objcopy --strip-symbols=.libstdc++/deletes $$i $$i.new && mv $$i.new $$i ; \
|
||||
rm .libstdc++/deletes ; \
|
||||
done
|
||||
@ar rcs $@ .libstdc++/*.o
|
||||
@rm -r .libstdc++
|
||||
|
||||
|
||||
ifeq ($(PLATFORM),osx)
|
||||
MD5SUM=md5
|
||||
else
|
||||
MD5SUM=md5sum
|
||||
endif
|
||||
|
||||
.SECONDEXPANSION:
|
||||
|
||||
bin/coverage.%.xml: bin/coveragetool.exe $$(%_ALL_SOURCES)
|
||||
@echo "Creating $@"
|
||||
@$(MONO) bin/coveragetool.exe $@ $(filter-out $<,$^) >/dev/null
|
||||
|
||||
$(CPP_MK_GENERATED): $$(@D)/*.vcxproj
|
||||
|
||||
$(CS_MK_GENERATED): $$(@D)/*.csproj
|
|
@ -0,0 +1,3 @@
|
|||
# FoundationDB
|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# __init__.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
#
|
||||
# __init__.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
|
||||
import util
|
||||
|
||||
FDB_API_VERSION = 500
|
||||
|
||||
LOGGING = {
|
||||
'version' : 1,
|
||||
'disable_existing_loggers' : False,
|
||||
'formatters' : {
|
||||
'simple' : {
|
||||
'format' : '%(message)s'
|
||||
}
|
||||
},
|
||||
'handlers' : {
|
||||
'console' : {
|
||||
'level' : 'NOTSET',
|
||||
'class' : 'logging.StreamHandler',
|
||||
'stream' : sys.stdout,
|
||||
'formatter' : 'simple'
|
||||
}
|
||||
},
|
||||
'loggers' : {
|
||||
'foundationdb.bindingtester' : {
|
||||
'level' : 'INFO',
|
||||
'handlers' : ['console']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class Result:
|
||||
def __init__(self, subspace, key, values):
|
||||
self.subspace_tuple = util.subspace_to_tuple(subspace)
|
||||
self.key_tuple = subspace.unpack(key)
|
||||
self.values = values
|
||||
|
||||
def matches(self, rhs, specification):
|
||||
if not isinstance(rhs, Result):
|
||||
return False
|
||||
|
||||
left_key = self.key_tuple[specification.key_start_index:]
|
||||
right_key = self.key_tuple[specification.key_start_index:]
|
||||
|
||||
if len(left_key) != len(right_key) or left_key != right_key:
|
||||
return False
|
||||
|
||||
for value in self.values:
|
||||
for rValue in rhs.values:
|
||||
if value == rValue:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def matches_global_error_filter(self, specification):
|
||||
return any([specification.matches_global_error_filter(v) for v in self.values])
|
||||
|
||||
# A non-unique sequence of numbers used to align results from different testers
|
||||
def sequence_num(self, specification):
|
||||
if specification.ordering_index is not None:
|
||||
return self.key_tuple[specification.ordering_index]
|
||||
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
if len(self.values) == 1:
|
||||
value_str = repr(self.values[0])
|
||||
else:
|
||||
value_str = repr(self.values)
|
||||
|
||||
return '%s = %s' % (repr(self.subspace_tuple + self.key_tuple), value_str)
|
||||
|
|
@ -0,0 +1,450 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# bindingtester.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import struct
|
||||
import random
|
||||
import argparse
|
||||
import math
|
||||
import os
|
||||
import copy
|
||||
import traceback
|
||||
from threading import Timer, Event
|
||||
|
||||
import logging.config
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..')]
|
||||
|
||||
import bindingtester
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import Result
|
||||
|
||||
from bindingtester import util
|
||||
from bindingtester.tests import Test, InstructionSet
|
||||
|
||||
from known_testers import Tester
|
||||
|
||||
import fdb
|
||||
import fdb.tuple
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class ResultSet(object):
|
||||
def __init__(self, specification):
|
||||
self.specification = specification
|
||||
self.tester_results = OrderedDict()
|
||||
|
||||
def add(self, name, results):
|
||||
num = 1
|
||||
base_name = name
|
||||
while name in self.tester_results:
|
||||
num += 1
|
||||
name = '%s (%d)' % (base_name, num)
|
||||
|
||||
self.tester_results[name] = results
|
||||
|
||||
def check_for_errors(self):
|
||||
if len(self.tester_results) == 1:
|
||||
return (0, False)
|
||||
|
||||
util.get_logger().info('Comparing results from \'%s\'...' % repr(util.subspace_to_tuple(self.specification.subspace)))
|
||||
|
||||
num_errors = 0
|
||||
indices = [0 for i in range(len(self.tester_results))]
|
||||
|
||||
name_length = max([len(name) for name in self.tester_results.keys()])
|
||||
|
||||
has_filtered_error = False
|
||||
|
||||
while True:
|
||||
results = { i : r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i] }
|
||||
if len(results) == 0:
|
||||
break
|
||||
|
||||
sequence_nums = [ r.sequence_num(self.specification) for r in results.values() ]
|
||||
if any([s is not None for s in sequence_nums]):
|
||||
results = { i : r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums) }
|
||||
else:
|
||||
results = { i : r for i, r in results.items() if r.matches(min(results.values()), self.specification) }
|
||||
|
||||
for i in results.keys():
|
||||
indices[i] += 1
|
||||
|
||||
all_results = { i : results[i] if i in results else None for i in range(len(self.tester_results)) }
|
||||
result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()])
|
||||
|
||||
result_list = results.values()
|
||||
if any(r.matches_global_error_filter(self.specification) for r in result_list):
|
||||
has_filtered_error = True
|
||||
|
||||
if len(results) < len(all_results) or not all(result_list[0].matches(r, self.specification) for r in result_list):
|
||||
util.get_logger().error('\nIncorrect result: \n%s' % result_str)
|
||||
num_errors += 1
|
||||
else:
|
||||
util.get_logger().debug('\nCorrect result: \n%s' % result_str)
|
||||
|
||||
if num_errors > 0:
|
||||
util.get_logger().error('')
|
||||
else:
|
||||
util.get_logger().debug('')
|
||||
|
||||
return (num_errors, has_filtered_error)
|
||||
|
||||
def choose_api_version(selected_api_version, tester_min_version, tester_max_version, test_min_version, test_max_version):
|
||||
if selected_api_version is not None:
|
||||
if selected_api_version < tester_min_version or selected_api_version > tester_max_version:
|
||||
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' % (selected_api_version, tester_min_version, tester_max_version))
|
||||
elif selected_api_version < test_min_version or selected_api_version > test_max_version:
|
||||
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' % (selected_api_version, test_min_version, test_max_version))
|
||||
|
||||
api_version = selected_api_version
|
||||
else:
|
||||
min_version = max(tester_min_version, test_min_version)
|
||||
max_version = min(tester_max_version, test_max_version)
|
||||
|
||||
if min_version > max_version:
|
||||
raise Exception('Not all testers support the API versions required by the specified test (tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
|
||||
|
||||
if random.random() < 0.7:
|
||||
api_version = max_version
|
||||
elif random.random() < 0.7:
|
||||
api_version = min_version
|
||||
elif random.random() < 0.9:
|
||||
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430, 440, 450, 460, 500] if v >= min_version and v <= max_version])
|
||||
else:
|
||||
api_version = random.randint(min_version, max_version)
|
||||
|
||||
return api_version
|
||||
|
||||
class TestRunner(object):
|
||||
def __init__(self, args):
|
||||
self.args = copy.copy(args)
|
||||
|
||||
self.db = fdb.open(self.args.cluster_file)
|
||||
self.test_seed = random.randint(0, 0xffffffff)
|
||||
|
||||
self.testers = [Tester.get_test(self.args.test1)]
|
||||
if self.args.test2 is not None:
|
||||
self.testers.append(Tester.get_test(self.args.test2))
|
||||
|
||||
self.test = Test.create_test(self.args.test_name, fdb.Subspace((self.args.output_subspace,)))
|
||||
|
||||
if self.test is None:
|
||||
raise Exception('the test \'%s\' could not be found' % self.args.test_name)
|
||||
|
||||
min_api_version = max([tester.min_api_version for tester in self.testers])
|
||||
max_api_version = min([tester.max_api_version for tester in self.testers])
|
||||
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, self.test.min_api_version, self.test.max_api_version)
|
||||
|
||||
util.get_logger().info('\nCreating test at API version %d' % self.args.api_version)
|
||||
|
||||
max_int_bits = min([tester.max_int_bits for tester in self.testers])
|
||||
if self.args.max_int_bits is None:
|
||||
self.args.max_int_bits = max_int_bits
|
||||
elif self.args.max_int_bits > max_int_bits:
|
||||
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % (max_int_bits, self.args.max_int_bits))
|
||||
|
||||
self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers])
|
||||
if self.args.no_threads and self.args.concurrency > 1:
|
||||
raise Exception('Not all testers support concurrency')
|
||||
|
||||
|
||||
def print_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
|
||||
for top_level_subspace, top_level_thread in test_instructions.items():
|
||||
for subspace, thread in top_level_thread.get_threads(top_level_subspace).items():
|
||||
util.get_logger().error('\nThread at prefix %r:' % util.subspace_to_tuple(subspace))
|
||||
if self.args.print_all:
|
||||
instructions = thread
|
||||
offset = 0
|
||||
else:
|
||||
instructions = thread.core_instructions()
|
||||
offset = thread.core_test_begin
|
||||
|
||||
for i, instruction in enumerate(instructions):
|
||||
if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'):
|
||||
util.get_logger().error(' %d. %r' % (i+offset, instruction))
|
||||
|
||||
util.get_logger().error('');
|
||||
|
||||
def run_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
expected_results = self.test.get_expected_results()
|
||||
|
||||
tester_results = { s.subspace : ResultSet(s) for s in self.test.get_result_specifications() }
|
||||
for subspace, results in expected_results.items():
|
||||
tester_results[subspace].add('expected', results)
|
||||
|
||||
tester_errors = {}
|
||||
|
||||
for tester in self.testers:
|
||||
self._insert_instructions(test_instructions)
|
||||
self.test.pre_run(self.db, self.args)
|
||||
return_code = self._run_tester(tester)
|
||||
if return_code != 0:
|
||||
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops))
|
||||
return 2
|
||||
|
||||
tester_errors[tester] = self.test.validate(self.db, self.args)
|
||||
|
||||
for spec in self.test.get_result_specifications():
|
||||
tester_results[spec.subspace].add(tester.name, self._get_results(spec.subspace))
|
||||
|
||||
return_code = self._validate_results(tester_errors, tester_results)
|
||||
util.get_logger().info('Completed %s test with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops))
|
||||
|
||||
return return_code
|
||||
|
||||
def insert_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
self._insert_instructions(test_instructions)
|
||||
|
||||
def _generate_test(self):
|
||||
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
|
||||
|
||||
random.seed(self.test_seed)
|
||||
|
||||
if self.args.concurrency == 1:
|
||||
self.test.setup(self.args)
|
||||
test_instructions = { fdb.Subspace((self.args.instruction_prefix,)) : self.test.generate(self.args, 0) }
|
||||
else:
|
||||
test_instructions = {}
|
||||
main_thread = InstructionSet()
|
||||
for i in range(self.args.concurrency):
|
||||
#thread_spec = fdb.Subspace(('thread_spec', i))
|
||||
thread_spec = 'thread_spec%d' % i
|
||||
main_thread.push_args(thread_spec)
|
||||
main_thread.append('START_THREAD')
|
||||
self.test.setup(self.args)
|
||||
test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(self.args, i)
|
||||
|
||||
test_instructions[fdb.Subspace((self.args.instruction_prefix,))] = main_thread
|
||||
|
||||
return test_instructions
|
||||
|
||||
def _insert_instructions(self, test_instructions):
|
||||
util.get_logger().info('\nInserting test into database...')
|
||||
del self.db[:]
|
||||
for subspace, thread in test_instructions.items():
|
||||
thread.insert_operations(self.db, subspace)
|
||||
|
||||
def _run_tester(self, test):
|
||||
params = test.cmd.split(' ') + [self.args.instruction_prefix, str(self.args.api_version)]
|
||||
if self.args.cluster_file is not None:
|
||||
params += [self.args.cluster_file]
|
||||
|
||||
util.get_logger().info('\nRunning tester \'%s\'...' % ' '.join(params))
|
||||
sys.stdout.flush();
|
||||
proc = subprocess.Popen(params)
|
||||
timed_out = Event()
|
||||
|
||||
def killProc():
|
||||
proc.kill()
|
||||
timed_out.set()
|
||||
|
||||
timer = Timer(self.args.timeout, killProc)
|
||||
try:
|
||||
timer.start()
|
||||
ret_code = proc.wait()
|
||||
except Exception as e:
|
||||
raise Exception('Unable to run tester (%s)' % e)
|
||||
finally:
|
||||
timer.cancel()
|
||||
|
||||
if ret_code != 0:
|
||||
signal_name = str(ret_code)
|
||||
if ret_code < 0:
|
||||
signal_name = util.signal_number_to_name(-ret_code)
|
||||
|
||||
reason = 'exit code: %s' % (signal_name,)
|
||||
if timed_out.is_set():
|
||||
reason = 'timed out after %d seconds' % (self.args.timeout,)
|
||||
util.get_logger().error('\n\'%s\' did not complete succesfully (%s)' % (params[0], reason))
|
||||
|
||||
util.get_logger().info('')
|
||||
return ret_code
|
||||
|
||||
def _get_results(self, subspace, instruction_index=None):
|
||||
util.get_logger().info('Reading results from \'%s\'...' % repr(util.subspace_to_tuple(subspace)))
|
||||
|
||||
results = []
|
||||
next_key = subspace.range().start
|
||||
while True:
|
||||
next_results = self.db.get_range(next_key, subspace.range().stop, 1000)
|
||||
if len(next_results) == 0:
|
||||
break
|
||||
|
||||
results += [Result(subspace, kv.key, (kv.value,)) for kv in next_results]
|
||||
next_key = fdb.KeySelector.first_greater_than(next_results[-1].key)
|
||||
|
||||
return results
|
||||
|
||||
def _validate_results(self, tester_errors, tester_results):
|
||||
util.get_logger().info('')
|
||||
|
||||
num_incorrect = 0
|
||||
has_filtered_error = False
|
||||
for r in tester_results.values():
|
||||
(count, filtered_error) = r.check_for_errors()
|
||||
num_incorrect += count
|
||||
has_filtered_error = has_filtered_error or filtered_error
|
||||
|
||||
num_errors = sum([len(e) for e in tester_errors.values()])
|
||||
|
||||
for tester, errors in tester_errors.items():
|
||||
if len(errors) > 0:
|
||||
util.get_logger().error('The %s tester reported errors:\n' % tester.name)
|
||||
for i, error in enumerate(errors):
|
||||
util.get_logger().error(' %d. %s' % (i+1, error))
|
||||
|
||||
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' % (self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
|
||||
if num_errors == 0 and (num_incorrect == 0 or has_filtered_error):
|
||||
util.get_logger().info(log_message)
|
||||
if has_filtered_error:
|
||||
util.get_logger().info("Test had permissible non-deterministic errors; disregarding results...")
|
||||
return 0
|
||||
else:
|
||||
util.get_logger().error(log_message)
|
||||
return 1
|
||||
|
||||
def bisect(test_runner, args):
|
||||
util.get_logger().info('')
|
||||
|
||||
lower_bound = 0
|
||||
upper_bound = args.num_ops
|
||||
|
||||
while True:
|
||||
test_runner.args.num_ops = int((lower_bound + upper_bound) / 2)
|
||||
result = test_runner.run_test()
|
||||
|
||||
if lower_bound == upper_bound:
|
||||
if result != 0:
|
||||
util.get_logger().error('Found minimal failing test with %d operations' % lower_bound)
|
||||
if args.print_test:
|
||||
test_runner.print_test()
|
||||
|
||||
return 0
|
||||
elif upper_bound < args.num_ops:
|
||||
util.get_logger().error('Error finding minimal failing test for seed %d. The failure may not be deterministic' % args.seed)
|
||||
return 1
|
||||
else:
|
||||
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.' % (args.seed, args.num_ops))
|
||||
return 0
|
||||
|
||||
elif result == 0:
|
||||
util.get_logger().info('Test with %d operations succeeded\n' % test_runner.args.num_ops)
|
||||
lower_bound = test_runner.args.num_ops + 1
|
||||
|
||||
else:
|
||||
util.get_logger().info('Test with %d operations failed with error code %d\n' % (test_runner.args.num_ops, result))
|
||||
upper_bound = test_runner.args.num_ops
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser(description='FoundationDB Binding API Tester')
|
||||
parser.add_argument('--test-name', default='scripted', help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
|
||||
|
||||
parser.add_argument(metavar='tester1', dest='test1', help='Name of the first tester to invoke')
|
||||
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2', help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument for the second tester to invoke (default = \'python\').')
|
||||
|
||||
parser.add_argument('--print-test', action='store_true', help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all setup, finalization, PUSH, and SWAP instructions will be excluded.')
|
||||
parser.add_argument('--all', dest='print_all', action='store_true', help='Causes --print-test to print all instructions.')
|
||||
parser.add_argument('--bisect', action='store_true', help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for concurrent tests.')
|
||||
parser.add_argument('--insert-only', action='store_true', help='Insert the test instructions into the database, but do not run it.')
|
||||
|
||||
parser.add_argument('--concurrency', type=int, default=1, help='Number of concurrent test threads to run. (default = 1).')
|
||||
parser.add_argument('--num-ops', type=int, default=100, help='The number of operations to generate per thread (default = 100)')
|
||||
parser.add_argument('--seed', type=int, help='The random seed to use for generating the test')
|
||||
parser.add_argument('--max-int-bits', type=int, default=None, help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being run will be chosen.')
|
||||
parser.add_argument('--api-version', default=None, type=int, help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by all testers)')
|
||||
parser.add_argument('--cluster-file', type=str, default=None, help='The cluster file for the cluster being connected to. (default None)')
|
||||
parser.add_argument('--timeout', type=int, default=600, help='The timeout in seconds for running each individual tester. (default 600)')
|
||||
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.', help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
|
||||
parser.add_argument('--instruction-prefix', type=str, default='test_spec', help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
|
||||
parser.add_argument('--output-subspace', type=str, default='tester_output', help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). (default=\'tester_output\')')
|
||||
|
||||
parser.add_argument('--logging-level', type=str, default='INFO', choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
|
||||
|
||||
# SOMEDAY: this applies only to the scripted test. Should we invoke test files specifically (as in circus),
|
||||
# or invoke them here and allow tests to add arguments?
|
||||
parser.add_argument('--no-threads', action='store_true', help='Disables the START_THREAD instruction in the scripted test.')
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
def validate_args(args):
|
||||
if args.insert_only and args.bisect:
|
||||
raise Exception('--bisect cannot be used with --insert-only')
|
||||
if args.print_all and not args.print_test:
|
||||
raise Exception('cannot specify --all without --print-test')
|
||||
if args.bisect and not args.seed:
|
||||
raise Exception('--seed must be specified if using --bisect')
|
||||
if args.concurrency < 1:
|
||||
raise Exception('--concurrency must be a positive integer')
|
||||
if args.concurrency > 1 and args.test2:
|
||||
raise Exception('--compare cannot be used with concurrent tests')
|
||||
|
||||
def main(argv):
|
||||
args = parse_args(argv)
|
||||
try:
|
||||
from bindingtester import LOGGING
|
||||
|
||||
logging.config.dictConfig(LOGGING)
|
||||
util.initialize_logger_level(args.logging_level)
|
||||
|
||||
validate_args(args)
|
||||
|
||||
if args.seed is None:
|
||||
args.seed = random.randint(0, 0xffffffff)
|
||||
|
||||
random.seed(args.seed)
|
||||
|
||||
if args.enable_client_trace_logging is not None:
|
||||
fdb.options.set_trace_enable(args.enable_client_trace_logging)
|
||||
|
||||
test_runner = TestRunner(args)
|
||||
|
||||
if args.bisect:
|
||||
return bisect(test_runner, args)
|
||||
|
||||
if args.print_test:
|
||||
return test_runner.print_test()
|
||||
|
||||
if args.insert_only:
|
||||
return test_runner.insert_test()
|
||||
|
||||
return test_runner.run_test()
|
||||
|
||||
except Exception as e:
|
||||
util.get_logger().error('\nERROR: %s' % e)
|
||||
util.get_logger().debug(traceback.format_exc())
|
||||
exit(3)
|
||||
|
||||
except:
|
||||
util.get_logger().error('\nERROR: %s' % sys.exc_info()[0])
|
||||
util.get_logger().info(traceback.format_exc())
|
||||
exit(3)
|
||||
|
||||
if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
|
|
@ -0,0 +1,68 @@
|
|||
#
|
||||
# known_testers.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
MAX_API_VERSION = 500
|
||||
|
||||
class Tester:
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True):
|
||||
self.name = name
|
||||
self.cmd = cmd
|
||||
self.max_int_bits = max_int_bits
|
||||
self.min_api_version = min_api_version
|
||||
self.max_api_version = max_api_version
|
||||
self.threads_enabled = threads_enabled
|
||||
|
||||
def supports_api_version(self, api_version):
|
||||
return api_version >= self.min_api_version and api_version <= self.max_api_version
|
||||
|
||||
@classmethod
|
||||
def get_test(cls, test_name_or_args):
|
||||
if test_name_or_args in testers:
|
||||
return testers[test_name_or_args]
|
||||
else:
|
||||
return Tester(test_name_or_args.split(' ')[0], test_name_or_args)
|
||||
|
||||
def _absolute_path(path):
|
||||
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', path)
|
||||
|
||||
_java_cmd = 'java -ea -cp %s:%s com.apple.cie.foundationdb.test.' % (
|
||||
_absolute_path('java/foundationdb-client.jar'),
|
||||
_absolute_path('java/foundationdb-tests.jar'))
|
||||
|
||||
_java_completable_cmd = 'java -ea -cp %s:%s com.apple.cie.foundationdb.test.' % (
|
||||
_absolute_path('java/foundationdb-client-completable.jar'),
|
||||
_absolute_path('java/foundationdb-tests-completable.jar'))
|
||||
|
||||
# We could set min_api_version lower on some of these if the testers were updated to support them
|
||||
testers = {
|
||||
'python' : Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION),
|
||||
'python3' : Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION),
|
||||
'node' : Tester('node', _absolute_path('nodejs/tests/tester.js'), 53, 23, MAX_API_VERSION),
|
||||
'streamline' : Tester('streamline', _absolute_path('nodejs/tests/streamline_tester._js'), 53, 23, MAX_API_VERSION),
|
||||
'ruby' : Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
|
||||
'java' : Tester('java', _java_cmd + 'StackTester', 63, 500, MAX_API_VERSION),
|
||||
'java_async' : Tester('java', _java_cmd + 'AsyncStackTester', 63, 500, MAX_API_VERSION),
|
||||
'java_completable' : Tester('java', _java_completable_cmd + 'StackTester', 63, 500, MAX_API_VERSION),
|
||||
'java_completable_async' : Tester('java', _java_completable_cmd + 'AsyncStackTester', 63, 500, MAX_API_VERSION),
|
||||
'go' : Tester('go', _absolute_path('go/bin/_stacktester'), 63, 200, MAX_API_VERSION),
|
||||
'flow' : Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 200, MAX_API_VERSION),
|
||||
}
|
|
@ -0,0 +1,382 @@
|
|||
#!/bin/bash
|
||||
######################################################
|
||||
#
|
||||
# FoundationDB Binding Test Script
|
||||
#
|
||||
# Test script for running FoundationDB binding tests
|
||||
#
|
||||
# Defines:
|
||||
#
|
||||
# Author: Alvin Moore
|
||||
# Date: 16-04-28
|
||||
# Version: 1.0
|
||||
######################################################
|
||||
|
||||
# Defines
|
||||
SCRIPTDIR=$( cd "${BASH_SOURCE[0]%\/*}" && pwd )
|
||||
CWD=$(pwd)
|
||||
OSNAME="$(uname -s)"
|
||||
DEBUGLEVEL="${DEBUGLEVEL:-1}"
|
||||
DISPLAYERROR="${DISPLAYERROR:-0}"
|
||||
OPERATIONS="${OPERATIONS:-1000}"
|
||||
HCAOPERATIONS="${HCAOPERATIONS:-100}"
|
||||
CONCURRENCY="${CONCURRENCY:-5}"
|
||||
BREAKONERROR="${BREAKONERROR:-0}"
|
||||
RUNSCRIPTS="${RUNSCRIPTS:-1}"
|
||||
RUNTESTS="${RUNTESTS:-1}"
|
||||
RANDOMTEST="${RANDOMTEST:-0}"
|
||||
BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async java_completable java_completable_async node go flow}"
|
||||
LOGLEVEL="${LOGLEVEL:-INFO}"
|
||||
_BINDINGTESTS=(${BINDINGTESTS})
|
||||
DISABLEDTESTS=( 'ruby' )
|
||||
TESTFILE="${SCRIPTDIR}/bindingtester.py"
|
||||
TESTTYPES=('API' 'Concurrent API' 'Directory' 'Directory HCA')
|
||||
TESTTOTAL="${#TESTTYPES[@]}"
|
||||
TESTINDEX="${TESTINDEX:-$TESTTOTAL}"
|
||||
LOGSTDOUT="${LOGSTDOUT:-0}"
|
||||
CONSOLELOG="${CONSOLELOG:-${CWD}/console.log}"
|
||||
VERSION="1.6"
|
||||
|
||||
# Display syntax
|
||||
if [ "${#}" -lt 2 ]
|
||||
then
|
||||
echo 'run_binding_tester.sh <number of cycles> <error file>'
|
||||
echo ' cycles: number of cycles to run test (0 => unlimitted)'
|
||||
echo ''
|
||||
echo ' Modifiable Environment Variables:'
|
||||
echo ' CONCURRENCY: number of concurrent requests'
|
||||
echo ' OPERATIONS: number of operations per test'
|
||||
echo ' HCAOPERATIONS: number of HCA operations per test'
|
||||
echo ' BINDINGTESTS: lists of binding tests to run'
|
||||
echo ' BREAKONERROR: stop on first error, if positive number'
|
||||
echo " TESTINDEX: (0-${TESTTOTAL}) ${TESTTYPES[*]}"
|
||||
echo ' RANDOMTEST: select a single random test, if positive number'
|
||||
echo ' LOGLEVEL: ERROR, WARNING, INFO, DEBUG'
|
||||
echo ''
|
||||
echo " version: ${VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Read arguments
|
||||
MAXCYCLES="${1}"
|
||||
ERRORFILE="${2}"
|
||||
|
||||
function logError()
|
||||
{
|
||||
local status=0
|
||||
|
||||
if [ "$#" -lt 3 ]
|
||||
then
|
||||
echo "runCommand <message> <output> <command executable> [args ...]"
|
||||
let status="${status} + 1"
|
||||
else
|
||||
local message="${1}"
|
||||
local output="${2}"
|
||||
local command="${3}"
|
||||
shift
|
||||
shift
|
||||
shift
|
||||
|
||||
let errorTotal="${errorTotal} + 1"
|
||||
|
||||
# Display the error, if enabled
|
||||
if [ "${DISPLAYERROR}" -gt 0 ]
|
||||
then
|
||||
printf '%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}"
|
||||
echo "Message: '${message}'"
|
||||
echo "Command: '${command} ${@}'"
|
||||
echo "Error: ${output}"
|
||||
fi
|
||||
|
||||
# Create the file, if not present
|
||||
if [[ ! -f "${ERRORFILE}" ]]
|
||||
then
|
||||
dir=$(dirname "${ERRORFILE}")
|
||||
|
||||
if [ ! -d "${dir}" ] && ! mkdir -p "${dir}"
|
||||
then
|
||||
echo "Failed to create directory: ${dir} for error file: ${ERRORFILE}"
|
||||
let status="${status} + 1"
|
||||
printf '\n%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}"
|
||||
echo "Message: '${message}'"
|
||||
echo "Command: '${command} ${@}'"
|
||||
echo "Error: ${output}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Initialize the error log, if first error
|
||||
if [[ "${errorTotal}" -eq 1 ]]
|
||||
then
|
||||
:
|
||||
fi
|
||||
|
||||
# Write the error to the log
|
||||
if [[ "${status}" -eq 0 ]]
|
||||
then
|
||||
printf '\n%-16s Error #%3d:\n' "$(date '+%F %H-%M-%S')" "${errorTotal}" >> "${ERRORFILE}"
|
||||
echo "Message: '${message}'" >> "${ERRORFILE}"
|
||||
echo "Command: '${command} ${@}'" >> "${ERRORFILE}"
|
||||
echo -n "Error:" >> "${ERRORFILE}"
|
||||
echo "${output}" >> "${ERRORFILE}"
|
||||
echo '----------------------------------------------------------------------------------------------------' >> "${ERRORFILE}"
|
||||
fi
|
||||
fi
|
||||
|
||||
return "${status}"
|
||||
}
|
||||
|
||||
function runCommand()
|
||||
{
|
||||
local status=0
|
||||
|
||||
if [ "$#" -lt 2 ]
|
||||
then
|
||||
echo "runCommand <message> <executable> [args ...]"
|
||||
let status="${status} + 1"
|
||||
else
|
||||
local message="${1}"
|
||||
local command="${2}"
|
||||
local time="${SECONDS}"
|
||||
shift
|
||||
shift
|
||||
|
||||
if [ "${DEBUGLEVEL}" -gt 2 ]; then
|
||||
printf "%-16s %-70s \n" "" "${command} ${*}"
|
||||
fi
|
||||
|
||||
if [ "${DEBUGLEVEL}" -gt 1 ]; then
|
||||
printf "%-16s %-40s " "" "${message}"
|
||||
fi
|
||||
|
||||
if [ "${LOGSTDOUT}" -gt 0 ] ; then
|
||||
printf "Running command: ${command} ${*}\n\n" >> "${CONSOLELOG}"
|
||||
"${command}" "${@}" 2>&1 >> "${CONSOLELOG}"
|
||||
result=$?
|
||||
output=$(cat "${CONSOLELOG}")
|
||||
else
|
||||
output=$("${command}" "${@}" 2>&1)
|
||||
result=$?
|
||||
fi
|
||||
let time="${SECONDS} - ${time}"
|
||||
|
||||
# Check return code
|
||||
if [ "${result}" -ne 0 ]
|
||||
then
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
echo "failed after ${time} seconds."
|
||||
fi
|
||||
let status="${status} + 1"
|
||||
logError "${message}" "${output}" "${command}" "${@}"
|
||||
elif [ "${DEBUGLEVEL}" -gt 0 ];then
|
||||
echo "passed in ${time} seconds."
|
||||
fi
|
||||
fi
|
||||
|
||||
return "${status}"
|
||||
}
|
||||
|
||||
function runScriptedTest()
|
||||
{
|
||||
local status=0
|
||||
|
||||
if [ "$#" -lt 1 ]
|
||||
then
|
||||
echo "runScriptedTest <test>"
|
||||
let status="${status} + 1"
|
||||
else
|
||||
local test="${1}"
|
||||
|
||||
if ! runCommand "Scripting ${test} ..." 'python' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
fi
|
||||
|
||||
return "${status}"
|
||||
}
|
||||
|
||||
function runTest()
|
||||
{
|
||||
local status=0
|
||||
|
||||
if [ "$#" -lt 1 ]
|
||||
then
|
||||
echo "runTest <test>"
|
||||
let status="${status} + 1"
|
||||
else
|
||||
local test="${1}"
|
||||
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Testing ${test}"
|
||||
fi
|
||||
|
||||
# API
|
||||
if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
|
||||
# Concurrent API
|
||||
if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
|
||||
# Directory
|
||||
if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
|
||||
# Directory HCA
|
||||
if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
fi
|
||||
fi
|
||||
|
||||
return "${status}"
|
||||
}
|
||||
|
||||
# Initialize the variables
|
||||
status=0
|
||||
cycles=0
|
||||
rundate="$(date +%F_%H-%M-%S)"
|
||||
errorTotal=0
|
||||
|
||||
|
||||
# Select a random test, if enabled
|
||||
if [ "${RANDOMTEST}" -gt 0 ]
|
||||
then
|
||||
let testIndex="${RANDOM} % ${#_BINDINGTESTS[@]}"
|
||||
randomTest="${_BINDINGTESTS[$testIndex]}"
|
||||
# Remove the random test from the list of binding tests
|
||||
_BINDINGTESTS=("${_BINDINGTESTS[@]/${randomTest}}")
|
||||
DISABLEDTESTS+=("${_BINDINGTESTS[@]}")
|
||||
_BINDINGTESTS=("${randomTest}")
|
||||
|
||||
# Choose a random test
|
||||
let TESTINDEX="${RANDOM} % ${TESTTOTAL}"
|
||||
|
||||
# Select scripted or tests, if enabled
|
||||
if [ "${RUNSCRIPTS}" -gt 0 ] && [ "${RUNTESTS}" -gt 0 ]; then
|
||||
# Select scripted tests, if 1 out of 100
|
||||
if [ $((${RANDOM} % 100)) -eq 0 ]; then
|
||||
RUNTESTS=0
|
||||
else
|
||||
RUNSCRIPTS=0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Determine the name of the test type
|
||||
# from the test index
|
||||
if [ "${TESTINDEX}" -lt "${TESTTOTAL}" ]; then
|
||||
TESTNAME="${TESTTYPES[$TESTINDEX]}"
|
||||
else
|
||||
TESTNAME="All Tests"
|
||||
TESTINDEX="${TESTTOTAL}"
|
||||
fi
|
||||
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]
|
||||
then
|
||||
echo ''
|
||||
echo ''
|
||||
echo '*******************************************************************************************'
|
||||
echo ''
|
||||
printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "FoundationDb Binding Tester"
|
||||
printf "%-20s Host OS: %-40s \n" "" "${OSNAME}"
|
||||
printf "%-20s Max Cycles: %-40s \n" "" "${MAXCYCLES}"
|
||||
printf "%-20s Operations: %-40s \n" "" "${OPERATIONS}"
|
||||
printf "%-20s HCA Operations: %-40s \n" "" "${HCAOPERATIONS}"
|
||||
printf "%-20s Concurrency: %-40s \n" "" "${CONCURRENCY}"
|
||||
printf "%-20s Tests: (%2d) %-40s \n" "" "${#_BINDINGTESTS[@]}" "${_BINDINGTESTS[*]}"
|
||||
printf "%-20s Disabled: (%2d) %-40s \n" "" "${#DISABLEDTESTS[@]}" "${DISABLEDTESTS[*]}"
|
||||
printf "%-20s Error Log: %-40s \n" "" "${ERRORFILE}"
|
||||
printf "%-20s Log Level: %-40s \n" "" "${LOGLEVEL}"
|
||||
printf "%-20s Random Test: %-40s \n" "" "${RANDOMTEST}"
|
||||
printf "%-20s Test Type: (%d) %-40s \n" "" "${TESTINDEX}" "${TESTNAME}"
|
||||
printf "%-20s Run Scripts: %-40s \n" "" "${RUNSCRIPTS}"
|
||||
printf "%-20s Run Tests: %-40s \n" "" "${RUNTESTS}"
|
||||
printf "%-20s Debug Level: %-40s \n" "" "${DEBUGLEVEL}"
|
||||
printf "%-20s Script Version: %-40s \n" "" "${VERSION}"
|
||||
echo ''
|
||||
fi
|
||||
|
||||
# Run the scripted tests, if enabled
|
||||
if [ "${RUNSCRIPTS}" -gt 0 ]
|
||||
then
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Running scripted tests"
|
||||
fi
|
||||
|
||||
for test in "${_BINDINGTESTS[@]}"
|
||||
do
|
||||
# Run the specified scripted test
|
||||
if ! runScriptedTest "${test}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
|
||||
# Break Stop the test, if enabled
|
||||
if [[ "${BREAKONERROR}" -ne 0 ]]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Run the individual tests, if enabled
|
||||
while [[ "${RUNTESTS}" -gt 0 ]] && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ([[ "${cycles}" -lt "${MAXCYCLES}" ]] || [[ "${MAXCYCLES}" -eq 0 ]])
|
||||
do
|
||||
let cycles="${cycles} + 1"
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
printf "\n%-16s Cycle #%3d \n" "$(date '+%F %H-%M-%S')" "${cycles}"
|
||||
fi
|
||||
|
||||
for test in "${_BINDINGTESTS[@]}"
|
||||
do
|
||||
# Run the specified test
|
||||
if ! runTest "${test}"
|
||||
then
|
||||
let status="${status} + 1"
|
||||
|
||||
# Break Stop the test, if enabled
|
||||
if [[ "${BREAKONERROR}" -ne 0 ]]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Final report
|
||||
if [ "${status}" -eq 0 ]
|
||||
then
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
printf "\n%-16s Successfully completed ${cycles} cycles of the FDB binding tester for ${#_BINDINGTESTS[@]} binding tests in %d seconds.\n" "$(date '+%F %H-%M-%S')" "${SECONDS}"
|
||||
fi
|
||||
elif [ "${DEBUGLEVEL}" -gt 0 ]; then
|
||||
printf "\n%-16s Failed to complete all ${cycles} cycles of the FDB binding tester for ${#_BINDINGTESTS[@]} binding tests in %d seconds.\n" "$(date '+%F %H-%M-%S')" "${SECONDS}"
|
||||
fi
|
||||
|
||||
if [ "${DEBUGLEVEL}" -gt 0 ]
|
||||
then
|
||||
echo ''
|
||||
echo ''
|
||||
echo '*******************************************************************************************'
|
||||
echo ''
|
||||
printf "%-16s %-40s \n" "$(date '+%F %H-%M-%S')" "Binding Tester Results"
|
||||
printf "%-20s Cycles: %-40s \n" "" "${cycles}"
|
||||
printf "%-20s Failed Tests: %-40s \n" "" "${status}"
|
||||
printf "%-20s Errors: %-40s \n" "" "${errorTotal}"
|
||||
printf "%-20s Tests: (%2d) %-40s \n" "" "${#_BINDINGTESTS[@]}" "${_BINDINGTESTS[*]}"
|
||||
printf "%-20s Version: %-40s \n" "" "${VERSION}"
|
||||
fi
|
||||
|
||||
# Ensure that status is a returnable number
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
status=1
|
||||
fi
|
||||
|
||||
exit "${status}"
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
LOGGING_LEVEL=WARNING
|
||||
|
||||
function run() {
|
||||
echo "Running $1 api"
|
||||
./bindingtester.py $1 --test-name api --cluster-file fdb.cluster --compare --num-ops 1000 --logging-level $LOGGING_LEVEL
|
||||
echo "Running $1 concurrent api"
|
||||
./bindingtester.py $1 --test-name api --cluster-file fdb.cluster --num-ops 1000 --concurrency 5 --logging-level $LOGGING_LEVEL
|
||||
echo "Running $1 directory"
|
||||
./bindingtester.py $1 --test-name directory --cluster-file fdb.cluster --compare --num-ops 1000 --logging-level $LOGGING_LEVEL
|
||||
echo "Running $1 directory hca"
|
||||
./bindingtester.py $1 --test-name directory_hca --cluster-file fdb.cluster --num-ops 100 --concurrency 5 --logging-level $LOGGING_LEVEL
|
||||
}
|
||||
|
||||
function scripted() {
|
||||
echo "Running $1 scripted"
|
||||
./bindingtester.py $1 --test-name scripted --cluster-file fdb.cluster --logging-level $LOGGING_LEVEL
|
||||
}
|
||||
|
||||
function run_scripted() {
|
||||
scripted python
|
||||
scripted python3
|
||||
scripted ruby
|
||||
scripted java
|
||||
scripted java_async
|
||||
scripted java_completable
|
||||
scripted java_completable_async
|
||||
scripted node
|
||||
scripted go
|
||||
scripted flow
|
||||
}
|
||||
|
||||
run_scripted
|
||||
|
||||
i=1
|
||||
while `true`; do
|
||||
echo "Pass $i"
|
||||
i=$((i+1))
|
||||
run python
|
||||
run python3
|
||||
run ruby
|
||||
run java
|
||||
run java_async
|
||||
run java_completable
|
||||
run java_completable_async
|
||||
run node
|
||||
#run streamline
|
||||
run go
|
||||
run flow
|
||||
done
|
|
@ -0,0 +1,343 @@
|
|||
Overview
|
||||
--------
|
||||
|
||||
Your API test program must implement a simple stack machine that exercises the
|
||||
FoundationDB API. The program is invoked with two or three arguments. The first
|
||||
argument is a prefix that is the first element of a tuple, the second is the
|
||||
API version, and the third argument is the path to a cluster file. If the
|
||||
third argument is not specified, your program may assume that fdb.open() will
|
||||
succeed with no arguments (an fdb.cluster file will exist in the current
|
||||
directory). Otherwise, your program should connect to the cluster specified
|
||||
by the given cluster file.
|
||||
|
||||
Your stack machine should begin reading the range returned by the tuple range
|
||||
method of prefix and execute each instruction (stored in the value of the key)
|
||||
until the range has been exhausted. When this stack machine (along with any
|
||||
additional stack machines created as part of the test) have finished running,
|
||||
your program should terminate.
|
||||
|
||||
Upon successful termination, your program should exit with code 0. If your
|
||||
program or any of your stack machines failed to run correctly, then it should
|
||||
exit with a nonzero exit code.
|
||||
|
||||
Instructions are also stored as packed tuples and should be expanded with the
|
||||
tuple unpack method. The first element of the instruction tuple represents an
|
||||
operation, and will always be returned as a unicode string. An operation may have
|
||||
a second element which provides additional data, which may be of any tuple type.
|
||||
|
||||
Your stack machine must maintain a small amount of state while executing
|
||||
instructions:
|
||||
|
||||
- A global transaction map from byte string to Transactions. This map is
|
||||
shared by all tester 'threads'.
|
||||
|
||||
- A stack of data items of mixed types and their associated metadata. At a
|
||||
minimum, each item should be stored with the 0-based instruction number
|
||||
which resulted in it being put onto the stack. Your stack must support push
|
||||
and pop operations. It may be helpful if it supports random access, clear
|
||||
and a peek operation. The stack is initialized to be empty.
|
||||
|
||||
- A current FDB transaction name (stored as a byte string). The transaction
|
||||
name should be initialized to the prefix that instructions are being read
|
||||
from.
|
||||
|
||||
- A last seen FDB version, which is a 64-bit integer.
|
||||
|
||||
|
||||
Data Operations
|
||||
---------------
|
||||
|
||||
PUSH <item>
|
||||
|
||||
Pushes the provided item onto the stack.
|
||||
|
||||
DUP
|
||||
|
||||
Duplicates the top item on the stack. The instruction number for the
|
||||
duplicate item should be the same as the original.
|
||||
|
||||
EMPTY_STACK
|
||||
|
||||
Discards all items in the stack.
|
||||
|
||||
SWAP
|
||||
|
||||
Pops the top item off of the stack as INDEX. Swaps the items in the stack at
|
||||
depth 0 and depth INDEX. Does not modify the instruction numbers of the
|
||||
swapped items.
|
||||
|
||||
POP
|
||||
|
||||
Pops and discards the top item on the stack.
|
||||
|
||||
SUB
|
||||
|
||||
Pops the top two items off of the stack as A and B and then pushes the
|
||||
difference (A-B) onto the stack. A and B may be assumed to be integers.
|
||||
|
||||
CONCAT
|
||||
|
||||
Pops the top two items off the stack as A and B and then pushes the
|
||||
concatenation of A and B onto the stack. A and B can be assumed to
|
||||
be of the same type and will be either byte strings or unicode strings.
|
||||
|
||||
LOG_STACK
|
||||
|
||||
Pops the top item off the stack as PREFIX. Using a new transaction with normal
|
||||
retry logic, inserts a key-value pair into the database for each item in the
|
||||
stack of the form:
|
||||
|
||||
PREFIX + tuple.pack((stackIndex, instructionNumber)) = tuple.pack((item,))
|
||||
|
||||
where stackIndex is the current index of the item in the stack. The oldest
|
||||
item in the stack should have stackIndex 0.
|
||||
|
||||
If the byte string created by tuple packing the item exceeds 40000 bytes,
|
||||
then the value should be truncated to the first 40000 bytes of the packed
|
||||
tuple.
|
||||
|
||||
When finished, the stack should be empty. Note that because the stack may be
|
||||
large, it may be necessary to commit the transaction every so often (e.g.
|
||||
after every 100 sets) to avoid past_version errors.
|
||||
|
||||
FoundationDB Operations
|
||||
-----------------------
|
||||
|
||||
All of these operations map to a portion of the FoundationDB API. When an
|
||||
operation applies to a transaction, it should use the transaction stored in
|
||||
the global transaction map corresponding to the current transaction name. Certain
|
||||
instructions will be followed by one or both of _SNAPSHOT and _DATABASE to
|
||||
indicate that they may appear with these variations. _SNAPSHOT operations should
|
||||
perform the operation as a snapshot read. _DATABASE operations should (if
|
||||
possible) make use of the methods available directly on the FoundationDB
|
||||
database object, rather than the currently open transaction.
|
||||
|
||||
If your binding does not support operations directly on a database object, you
|
||||
should simulate it using an anonymous transaction. Remember that set and clear
|
||||
operations must immediately commit (with appropriate retry behavior!).
|
||||
|
||||
Any error that bubbles out of these operations must be caught. In the event of
|
||||
an error, you must push the packed tuple of the string "ERROR" and the error
|
||||
code (as a string, not an integer).
|
||||
|
||||
Some operations may allow you to push future values onto the stack. When popping
|
||||
objects from the stack, the future MUST BE waited on and errors caught before
|
||||
any operations that use the result of the future.
|
||||
|
||||
Whether or not you choose to push a future, any operation that supports optional
|
||||
futures must apply the following rules to the result:
|
||||
|
||||
- If the result is an error, then its value is to be converted to an error
|
||||
string as defined above
|
||||
|
||||
- If the result is void (i.e. the future was just a signal of
|
||||
completion), then its value should be the byte string
|
||||
"RESULT_NOT_PRESENT"
|
||||
|
||||
- If the result is from a GET operation in which no result was
|
||||
returned, then its value is to be converted to the byte string
|
||||
"RESULT_NOT_PRESENT"
|
||||
|
||||
NEW_TRANSACTION
|
||||
|
||||
Creates a new transaction and stores it in the global transaction map
|
||||
under the currently used transaction name.
|
||||
|
||||
USE_TRANSACTION
|
||||
|
||||
Pop the top item off of the stack as TRANSACTION_NAME. Begin using the
|
||||
transaction stored at TRANSACTION_NAME in the transaction map for future
|
||||
operations. If no entry exists in the map for the given name, a new
|
||||
transaction should be inserted.
|
||||
|
||||
ON_ERROR
|
||||
|
||||
Pops the top item off of the stack as ERROR_CODE. Passes ERROR_CODE in a
|
||||
language-appropriate way to the on_error method of current transaction
|
||||
object and blocks on the future. If on_error re-raises the error, bubbles
|
||||
the error out as indicated above. May optionally push a future onto the
|
||||
stack.
|
||||
|
||||
GET (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top item off of the stack as KEY and then looks up KEY in the
|
||||
database using the get() method. May optionally push a future onto the
|
||||
stack.
|
||||
|
||||
GET_KEY (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top four items off of the stack as KEY, OR_EQUAL, OFFSET, PREFIX
|
||||
and then constructs a key selector. This key selector is then resolved
|
||||
using the get_key() method to yield RESULT. If RESULT starts with PREFIX,
|
||||
then RESULT is pushed onto the stack. Otherwise, if RESULT < PREFIX, PREFIX
|
||||
is pushed onto the stack. If RESULT > PREFIX, then strinc(PREFIX) is pushed
|
||||
onto the stack. May optionally push a future onto the stack.
|
||||
|
||||
GET_RANGE (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top five items off of the stack as BEGIN_KEY, END_KEY, LIMIT,
|
||||
REVERSE and STREAMING_MODE. Performs a range read in a language-appropriate
|
||||
way using these parameters. The resulting range of n key-value pairs are
|
||||
packed into a tuple as [k1,v1,k2,v2,...,kn,vn], and this single packed value
|
||||
is pushed onto the stack.
|
||||
|
||||
GET_RANGE_STARTS_WITH (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top four items off of the stack as PREFIX, LIMIT, REVERSE and
|
||||
STREAMING_MODE. Performs a prefix range read in a language-appropriate way
|
||||
using these parameters. Output is pushed onto the stack as with GET_RANGE.
|
||||
|
||||
GET_RANGE_SELECTOR (_SNAPSHOT, _DATABASE)
|
||||
|
||||
Pops the top ten items off of the stack as BEGIN_KEY, BEGIN_OR_EQUAL,
|
||||
BEGIN_OFFSET, END_KEY, END_OR_EQUAL, END_OFFSET, LIMIT, REVERSE,
|
||||
STREAMING_MODE, and PREFIX. Constructs key selectors BEGIN and END from
|
||||
the first six parameters, and then performs a range read in a language-
|
||||
appropriate way using BEGIN, END, LIMIT, REVERSE and STREAMING_MODE. Output
|
||||
is pushed onto the stack as with GET_RANGE, excluding any keys that do not
|
||||
begin with PREFIX.
|
||||
|
||||
GET_READ_VERSION (_SNAPSHOT)
|
||||
|
||||
Gets the current read version and stores it in the internal stack machine
|
||||
state as the last seen version. Pushed the string "GOT_READ_VERSION" onto
|
||||
the stack.
|
||||
|
||||
GET_VERSIONSTAMP
|
||||
|
||||
Calls get_versionstamp and pushes the resulting future onto the stack.
|
||||
|
||||
SET (_DATABASE)
|
||||
|
||||
Pops the top two items off of the stack as KEY and VALUE. Sets KEY to have
|
||||
the value VALUE. A SET_DATABASE call may optionally push a future onto the
|
||||
stack.
|
||||
|
||||
SET_READ_VERSION
|
||||
|
||||
Sets the current transaction read version to the internal state machine last
|
||||
seen version.
|
||||
|
||||
CLEAR (_DATABASE)
|
||||
|
||||
Pops the top item off of the stack as KEY and then clears KEY from the
|
||||
database. A CLEAR_DATABASE call may optionally push a future onto the stack.
|
||||
|
||||
CLEAR_RANGE (_DATABASE)
|
||||
|
||||
Pops the top two items off of the stack as BEGIN_KEY and END_KEY. Clears the
|
||||
range of keys from BEGIN_KEY to END_KEY in the database. A
|
||||
CLEAR_RANGE_DATABASE call may optionally push a future onto the stack.
|
||||
|
||||
CLEAR_RANGE_STARTS_WITH (_DATABASE)
|
||||
|
||||
Pops the top item off of the stack as PREFIX and then clears all keys from
|
||||
the database that begin with PREFIX. A CLEAR_RANGE_STARTS_WITH_DATABASE call
|
||||
may optionally push a future onto the stack.
|
||||
|
||||
ATOMIC_OP (_DATABASE)
|
||||
|
||||
Pops the top three items off of the stack as OPTYPE, KEY, and VALUE.
|
||||
Performs the atomic operation described by OPTYPE upon KEY with VALUE. An
|
||||
ATOMIC_OP_DATABASE call may optionally push a future onto the stack.
|
||||
|
||||
READ_CONFLICT_RANGE and WRITE_CONFLICT_RANGE
|
||||
|
||||
Pops the top two items off of the stack as BEGIN_KEY and END_KEY. Adds a
|
||||
read conflict range or write conflict range from BEGIN_KEY to END_KEY.
|
||||
Pushes the byte string "SET_CONFLICT_RANGE" onto the stack.
|
||||
|
||||
READ_CONFLICT_KEY and WRITE_CONFLICT_KEY
|
||||
|
||||
Pops the top item off of the stack as KEY. Adds KEY as a read conflict key
|
||||
or write conflict key. Pushes the byte string "SET_CONFLICT_KEY" onto the
|
||||
stack.
|
||||
|
||||
DISABLE_WRITE_CONFLICT
|
||||
|
||||
Sets the NEXT_WRITE_NO_WRITE_CONFLICT_RANGE transaction option on the
|
||||
current transaction. Does not modify the stack.
|
||||
|
||||
COMMIT
|
||||
|
||||
Commits the current transaction (with no retry behavior). May optionally
|
||||
push a future onto the stack.
|
||||
|
||||
RESET
|
||||
|
||||
Resets the current transaction.
|
||||
|
||||
CANCEL
|
||||
|
||||
Cancels the current transaction.
|
||||
|
||||
GET_COMMITTED_VERSION
|
||||
|
||||
Gets the committed version from the current transaction and stores it in the
|
||||
internal stack machine state as the last seen version. Pushes the byte
|
||||
string "GOT_COMMITTED_VERSION" onto the stack.
|
||||
|
||||
WAIT_FUTURE
|
||||
|
||||
Pops the top item off the stack and pushes it back on. If the top item on
|
||||
the stack is a future, this will have the side effect of waiting on the
|
||||
result of the future and pushing the result on the stack. Does not change
|
||||
the instruction number of the item.
|
||||
|
||||
Tuple Operations
|
||||
----------------
|
||||
|
||||
TUPLE_PACK
|
||||
|
||||
Pops the top item off of the stack as N. Pops the next N items off of the
|
||||
stack and packs them as the tuple [item0,item1,...,itemN], and then pushes
|
||||
this single packed value onto the stack.
|
||||
|
||||
TUPLE_UNPACK
|
||||
|
||||
Pops the top item off of the stack as PACKED, and then unpacks PACKED into a
|
||||
tuple. For each element of the tuple, packs it as a new tuple and pushes it
|
||||
onto the stack.
|
||||
|
||||
TUPLE_RANGE
|
||||
|
||||
Pops the top item off of the stack as N. Pops the next N items off of the
|
||||
stack, and passes these items as a tuple (or array, or language-appropriate
|
||||
structure) to the tuple range method. Pushes the begin and end elements of
|
||||
the returned range onto the stack.
|
||||
|
||||
|
||||
Thread Operations
|
||||
-----------------
|
||||
|
||||
START_THREAD
|
||||
|
||||
Pops the top item off of the stack as PREFIX. Creates a new stack machine
|
||||
instance operating on the same database as the current stack machine, but
|
||||
operating on PREFIX. The new stack machine should have independent internal
|
||||
state. The new stack machine should begin executing instructions concurrent
|
||||
with the current stack machine through a language-appropriate mechanism.
|
||||
|
||||
WAIT_EMPTY
|
||||
|
||||
Pops the top item off of the stack as PREFIX. Blocks execution until the
|
||||
range with prefix PREFIX is not present in the database. This should be
|
||||
implemented as a polling loop inside of a language- and binding-appropriate
|
||||
retryable construct which synthesizes FoundationDB error 1020 when the range
|
||||
is not empty. Pushes the string "WAITED_FOR_EMPTY" onto the stack when
|
||||
complete.
|
||||
|
||||
Miscellaneous
|
||||
-------------
|
||||
|
||||
UNIT_TESTS
|
||||
|
||||
This is called during the scripted test to allow bindings to test features
|
||||
which aren't supported by the stack tester. Things currently tested in the
|
||||
UNIT_TESTS section:
|
||||
|
||||
Transaction options
|
||||
Watches
|
||||
Cancellation
|
||||
Retry limits
|
||||
Timeouts
|
|
@ -0,0 +1,241 @@
|
|||
Overview
|
||||
--------
|
||||
|
||||
The directory layer is tested by adding some additional instructions and state to
|
||||
the existing stack tester. Each 'thread' of the stack tester should have its own
|
||||
directory testing state.
|
||||
|
||||
Additional State and Initialization
|
||||
-----------------------------------
|
||||
|
||||
Your tester should store three additional pieces of state.
|
||||
|
||||
directory list - The items in this list should be accessible by index. The list
|
||||
should support an append operation. It will be required to store Subspaces,
|
||||
DirectorySubspaces, and DirectoryLayers.
|
||||
|
||||
directory list index - an index into the directory list of the currently active
|
||||
directory.
|
||||
|
||||
error index - the index to use when the directory at directory list index is not
|
||||
present
|
||||
|
||||
At the beginning of the test, the list should contain just the default directory
|
||||
layer. The directory index and error index should both be set to 0.
|
||||
|
||||
Popping Tuples
|
||||
-------------
|
||||
|
||||
Some instructions will require you to pop N tuples. To do this, repeat the
|
||||
following procedure N times:
|
||||
|
||||
Pop 1 item off the stack as M. Pop M items off the stack as
|
||||
tuple = [item1, ..., itemM].
|
||||
|
||||
Errors
|
||||
------
|
||||
|
||||
In the even that you encounter an error when performing a directory layer
|
||||
operation, you should push the byte string: "DIRECTORY_ERROR" onto the stack. If
|
||||
the operation being performed was supposed to append an item to the directory
|
||||
list, then a null entry should be appended instead.
|
||||
|
||||
New Instructions
|
||||
----------------
|
||||
|
||||
Below are the new instructions that must be implemented to test the directory
|
||||
layer. Some instructions specify that the current directory should be used
|
||||
for the operation. In that case, use the object in the directory list specified
|
||||
by the current directory list index. Operations that are not defined for a
|
||||
particular object will not be called (e.g. a DirectoryLayer will never be asked
|
||||
to pack a key).
|
||||
|
||||
Directory/Subspace/Layer Creation
|
||||
---------------------------------
|
||||
|
||||
DIRECTORY_CREATE_SUBSPACE
|
||||
|
||||
Pop 1 tuple off the stack as [path]. Pop 1 additional item as [raw_prefix].
|
||||
Create a subspace with path as the prefix tuple and the specified
|
||||
raw_prefix. Append it to the directory list.
|
||||
|
||||
DIRECTORY_CREATE_LAYER
|
||||
|
||||
Pop 3 items off the stack as [index1, index2, allow_manual_prefixes]. Let
|
||||
node_subspace be the object in the directory list at index1 and
|
||||
content_subspace be the object in the directory list at index2. Create a new
|
||||
directory layer with the specified node_subspace and content_subspace. If
|
||||
allow_manual_prefixes is 1, then enable manual prefixes on the directory
|
||||
layer. Append the resulting directory layer to the directory list.
|
||||
|
||||
If either of the two specified subspaces are null, then do not create a
|
||||
directory layer and instead push null onto the directory list.
|
||||
|
||||
DIRECTORY_CREATE_OR_OPEN[_DATABASE]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [path]. Pop 1 additional item as [layer].
|
||||
create_or_open a directory with the specified path and layer. If layer is
|
||||
null, use the default value for that parameter.
|
||||
|
||||
DIRECTORY_CREATE[_DATABASE]
|
||||
|
||||
Pop 1 tuple off the stack as [path]. Pop 2 additional items as
|
||||
[layer, prefix]. create a directory with the specified path, layer,
|
||||
and prefix. If either of layer or prefix is null, use the default value for
|
||||
that parameter (layer='', prefix=null).
|
||||
|
||||
DIRECTORY_OPEN[_DATABASE|_SNAPSHOT]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [path]. Pop 1 additional item as [layer]. Open
|
||||
a directory with the specified path and layer. If layer is null, use the
|
||||
default value (layer='').
|
||||
|
||||
Directory Management
|
||||
--------------------
|
||||
|
||||
DIRECTORY_CHANGE
|
||||
|
||||
Pop the top item off the stack as [index]. Set the current directory list
|
||||
index to index. In the event that the directory at this new index is null
|
||||
(as the result of a previous error), set the directory list index to the
|
||||
error index.
|
||||
|
||||
DIRECTORY_SET_ERROR_INDEX
|
||||
|
||||
Pop the top item off the stack as [error_index]. Set the current error index
|
||||
to error_index.
|
||||
|
||||
Directory Operations
|
||||
--------------------
|
||||
|
||||
DIRECTORY_MOVE[_DATABASE]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 2 tuples off the stack as [old_path, new_path]. Call move with the
|
||||
specified old_path and new_path. Append the result onto the directory list.
|
||||
|
||||
DIRECTORY_MOVE_TO[_DATABASE]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [new_absolute_path]. Call moveTo with the
|
||||
specified new_absolute_path. Append the result onto the directory list.
|
||||
|
||||
DIRECTORY_REMOVE[_DATABASE]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1
|
||||
tuple off the stack as [path]. Call remove, passing it path if one was
|
||||
popped.
|
||||
|
||||
DIRECTORY_REMOVE_IF_EXISTS[_DATABASE]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1
|
||||
tuple off the stack as [path]. Call remove_if_exits, passing it path if one
|
||||
was popped.
|
||||
|
||||
DIRECTORY_LIST[_DATABASE|_SNAPSHOT]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1
|
||||
tuple off the stack as [path]. Call list, passing it path if one was popped.
|
||||
Pack the resulting list of directories using the tuple layer and push the
|
||||
packed string onto the stack.
|
||||
|
||||
DIRECTORY_EXISTS[_DATABASE|_SNAPSHOT]
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [count] (either 0 or 1). If count is 1, pop 1
|
||||
tuple off the stack as [path]. Call exists, passing it path if one
|
||||
was popped. Push 1 onto the stack if the path exists and 0 if it does not.
|
||||
|
||||
Subspace Operations
|
||||
-------------------
|
||||
|
||||
DIRECTORY_PACK_KEY
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [key_tuple]. Pack key_tuple and push the result
|
||||
onto the stack.
|
||||
|
||||
DIRECTORY_UNPACK_KEY
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [key]. Unpack key and push the resulting tuple
|
||||
onto the stack one item at a time.
|
||||
|
||||
DIRECTORY_RANGE
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [tuple]. Create a range using tuple and push
|
||||
range.begin and range.end onto the stack.
|
||||
|
||||
DIRECTORY_CONTAINS
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [key]. Check if the current directory contains
|
||||
the specified key. Push 1 if it does and 0 if it doesn't.
|
||||
|
||||
DIRECTORY_OPEN_SUBSPACE
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 tuple off the stack as [tuple]. Open the subspace of the current
|
||||
directory specified by tuple and push it onto the directory list.
|
||||
|
||||
Directory Logging
|
||||
--------------------
|
||||
|
||||
DIRECTORY_LOG_SUBSPACE
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [prefix]. Let key equal
|
||||
prefix + tuple.pack([dir_index]). Set key to be the result of calling
|
||||
directory.key() in the current transaction.
|
||||
|
||||
DIRECTORY_LOG_DIRECTORY
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [raw_prefix]. Create a subspace log_subspace
|
||||
with path (dir_index) and the specified raw_prefix. Set:
|
||||
|
||||
tr[log_subspace[u'path']] = the tuple packed path of the directory.
|
||||
|
||||
tr[log_subspace[u'layer']] = the tuple packed layer of the directory.
|
||||
|
||||
tr[log_subspace[u'exists']] = the packed tuple containing a 1 if the
|
||||
directory exists and 0 if it doesn't.
|
||||
|
||||
tr[log_subspace[u'children']] the tuple packed list of children of the
|
||||
directory.
|
||||
|
||||
Where log_subspace[u<str>] is the subspace packed tuple containing only the
|
||||
single specified unicode string <str>.
|
||||
|
||||
Other
|
||||
-----
|
||||
|
||||
DIRECTORY_STRIP_PREFIX
|
||||
|
||||
Use the current directory for this operation.
|
||||
|
||||
Pop 1 item off the stack as [byte_array]. Call .key() on the current
|
||||
subspace and store the result as [prefix]. Throw an error if the popped
|
||||
array does not start with prefix. Otherwise, remove the prefix from the
|
||||
popped array and push the result onto the stack.
|
|
@ -0,0 +1,197 @@
|
|||
#
|
||||
# __init__.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import math
|
||||
import re
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class ResultSpecification(object):
|
||||
def __init__(self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None):
|
||||
self.subspace = subspace
|
||||
self.key_start_index = key_start_index
|
||||
self.ordering_index = ordering_index
|
||||
|
||||
if global_error_filter is not None:
|
||||
error_str = '|'.join(['%d' % e for e in global_error_filter])
|
||||
self.error_regex = re.compile(r'\x01+ERROR\x00\xff*\x01' + error_str + r'\x00')
|
||||
else:
|
||||
self.error_regex = None
|
||||
|
||||
def matches_global_error_filter(self, str):
|
||||
if self.error_regex is None:
|
||||
return False
|
||||
|
||||
return self.error_regex.search(str) is not None
|
||||
|
||||
|
||||
class Test(object):
|
||||
def __init__(self, subspace, min_api_version=0, max_api_version=int(1e9)):
|
||||
self.subspace = subspace
|
||||
self.min_api_version = min_api_version
|
||||
self.max_api_version = max_api_version
|
||||
|
||||
# Returns nothing
|
||||
def setup(self, args):
|
||||
pass
|
||||
|
||||
# Returns an instance of TestInstructions
|
||||
def generate(self, args, thread_number):
|
||||
pass
|
||||
|
||||
# Returns nothing
|
||||
def pre_run(self, db, args):
|
||||
pass
|
||||
|
||||
# Returns a list of ResultSpecifications to read data from and compare with other testers
|
||||
def get_result_specifications(self):
|
||||
return []
|
||||
|
||||
# Returns a dict { subspace => results } of results that the test is expected to have.
|
||||
# Compared against subspaces returned by get_result_subspaces. A subspace omitted from this dictionary
|
||||
# can still be compared against other testers if it is added to the list returned by get_result_subspaces.
|
||||
def get_expected_results(self):
|
||||
return {}
|
||||
|
||||
# Returns a list of error strings
|
||||
def validate(self, db, args):
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def create_test(cls, name, subspace):
|
||||
target = 'bindingtester.tests.%s' % name
|
||||
test_class = [s for s in cls.__subclasses__() if s.__module__ == target]
|
||||
if len(test_class) == 0:
|
||||
return None
|
||||
|
||||
return test_class[0](subspace)
|
||||
|
||||
class Instruction(object):
|
||||
def __init__(self, operation):
|
||||
self.operation = operation
|
||||
self.argument = None
|
||||
self.value = fdb.tuple.pack((unicode(self.operation),))
|
||||
|
||||
def to_value(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return self.operation
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.operation)
|
||||
|
||||
class PushInstruction(Instruction):
|
||||
def __init__(self, argument):
|
||||
self.operation = 'PUSH'
|
||||
self.argument = argument
|
||||
self.value = fdb.tuple.pack((unicode("PUSH"), argument))
|
||||
|
||||
def __str__(self):
|
||||
return '%s %s' % (self.operation, self.argument)
|
||||
|
||||
def __repr__(self):
|
||||
return '%r %r' % (self.operation, self.argument)
|
||||
|
||||
class TestInstructions(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
# returns a dictionary of subspace => InstructionSets
|
||||
def get_threads(self, subspace):
|
||||
pass
|
||||
|
||||
def insert_operations(self, db, subspace):
|
||||
pass
|
||||
|
||||
class InstructionSet(TestInstructions, list):
|
||||
def __init__(self):
|
||||
TestInstructions.__init__(self)
|
||||
list.__init__(self)
|
||||
|
||||
self.core_test_begin = 0
|
||||
self.core_test_end = None
|
||||
|
||||
def push_args(self, *args):
|
||||
self.extend([PushInstruction(arg) for arg in reversed(args)])
|
||||
|
||||
def append(self, instruction):
|
||||
if isinstance(instruction, Instruction):
|
||||
list.append(self, instruction)
|
||||
else:
|
||||
list.append(self, Instruction(instruction))
|
||||
|
||||
def get_threads(self, subspace):
|
||||
return { subspace : self }
|
||||
|
||||
def setup_complete(self):
|
||||
self.core_test_begin = len(self)
|
||||
|
||||
def begin_finalization(self):
|
||||
self.core_test_end = len(self)
|
||||
|
||||
def core_instructions(self):
|
||||
return self[self.core_test_begin : self.core_test_end]
|
||||
|
||||
@fdb.transactional
|
||||
def _insert_operations_transactional(self, tr, subspace, start, count):
|
||||
for i, instruction in enumerate(self[start : start+count]):
|
||||
tr[subspace.pack((start + i,))] = instruction.to_value()
|
||||
|
||||
def insert_operations(self, db, subspace):
|
||||
for i in range(0, int(math.ceil(len(self) / 1000.0))):
|
||||
self._insert_operations_transactional(db, subspace, i*1000, 1000)
|
||||
|
||||
class ThreadedInstructionSet(TestInstructions):
|
||||
def __init__(self):
|
||||
super(ThreadedInstructionSet, self).__init__()
|
||||
self.threads = {}
|
||||
|
||||
def get_threads(self, subspace):
|
||||
result = dict(self.threads)
|
||||
if None in self.threads:
|
||||
result[subspace] = result[None]
|
||||
del result[None]
|
||||
|
||||
return result
|
||||
|
||||
def insert_operations(self, db, subspace):
|
||||
for thread_subspace, thread in self.threads.items():
|
||||
if thread_subspace is None:
|
||||
thread_subspace = subspace
|
||||
|
||||
thread.insert_operations(db, thread_subspace)
|
||||
|
||||
def create_thread(self, subspace=None, thread_instructions=None):
|
||||
if subspace in self.threads:
|
||||
raise 'An instruction set with the subspace %r has already been created' % util.subspace_to_tuple(subspace)
|
||||
|
||||
if thread_instructions == None:
|
||||
thread_instructions = InstructionSet()
|
||||
|
||||
self.threads[subspace] = thread_instructions
|
||||
return thread_instructions
|
||||
|
||||
util.import_subclasses(__file__, 'bindingtester.tests')
|
|
@ -0,0 +1,482 @@
|
|||
#
|
||||
# api.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification
|
||||
from bindingtester.tests import test_util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class ApiTest(Test):
|
||||
def __init__(self, subspace):
|
||||
super(ApiTest, self).__init__(subspace)
|
||||
self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test
|
||||
self.scratch = self.subspace['scratch'] # The keys and values here can differ between runs
|
||||
self.stack_subspace = self.subspace['stack']
|
||||
|
||||
self.versionstamped_values = self.scratch['versionstamped_values']
|
||||
self.versionstamped_keys = self.scratch['versionstamped_keys']
|
||||
|
||||
def setup(self, args):
|
||||
self.stack_size = 0
|
||||
self.string_depth = 0
|
||||
self.key_depth = 0
|
||||
self.max_keys = 1000
|
||||
|
||||
self.has_version = False
|
||||
self.can_set_version = True
|
||||
self.is_committed = True
|
||||
self.can_use_key_selectors = True
|
||||
|
||||
self.generated_keys = []
|
||||
self.outstanding_ops = []
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits)
|
||||
|
||||
def add_stack_items(self, num):
|
||||
self.stack_size += num
|
||||
self.string_depth = 0
|
||||
self.key_depth = 0
|
||||
|
||||
def add_strings(self, num):
|
||||
self.stack_size += num
|
||||
self.string_depth += num
|
||||
self.key_depth = 0
|
||||
|
||||
def add_keys(self, num):
|
||||
self.stack_size += num
|
||||
self.string_depth += num
|
||||
self.key_depth += num
|
||||
|
||||
def remove(self, num):
|
||||
self.stack_size -= num
|
||||
self.string_depth = max(0, self.string_depth - num)
|
||||
self.key_depth = max(0, self.key_depth - num)
|
||||
|
||||
self.outstanding_ops = [i for i in self.outstanding_ops if i[0] <= self.stack_size]
|
||||
|
||||
def ensure_string(self, instructions, num):
|
||||
while self.string_depth < num:
|
||||
instructions.push_args(self.random.random_string(random.randint(0, 100)))
|
||||
self.add_strings(1)
|
||||
|
||||
self.remove(num)
|
||||
|
||||
def choose_key(self):
|
||||
if random.random() < float(len(self.generated_keys)) / self.max_keys:
|
||||
tup = random.choice(self.generated_keys)
|
||||
if random.random() < 0.3:
|
||||
return self.workspace.pack(tup[0:random.randint(0, len(tup))])
|
||||
|
||||
return self.workspace.pack(tup)
|
||||
|
||||
tup = self.random.random_tuple(5)
|
||||
self.generated_keys.append(tup)
|
||||
|
||||
return self.workspace.pack(tup)
|
||||
|
||||
def ensure_key(self, instructions, num):
|
||||
while self.key_depth < num:
|
||||
instructions.push_args(self.choose_key())
|
||||
self.add_keys(1)
|
||||
|
||||
self.remove(num)
|
||||
|
||||
def ensure_key_value(self, instructions):
|
||||
if self.string_depth == 0:
|
||||
instructions.push_args(self.choose_key(), self.random.random_string(random.randint(0, 100)))
|
||||
|
||||
elif self.string_depth == 1 or self.key_depth == 0:
|
||||
self.ensure_key(instructions, 1)
|
||||
self.remove(1)
|
||||
|
||||
else:
|
||||
self.remove(2)
|
||||
|
||||
def preload_database(self, instructions, num):
|
||||
for i in range(num):
|
||||
self.ensure_key_value(instructions)
|
||||
instructions.append('SET')
|
||||
|
||||
if i % 100 == 99:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_stack_items(1)
|
||||
|
||||
def wait_for_reads(self, instructions):
|
||||
while len(self.outstanding_ops) > 0 and self.outstanding_ops[-1][0] <= self.stack_size:
|
||||
read = self.outstanding_ops.pop()
|
||||
#print '%d. waiting for read at instruction %r' % (len(instructions), read)
|
||||
test_util.to_front(instructions, self.stack_size - read[0])
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
op_choices = ['NEW_TRANSACTION', 'COMMIT']
|
||||
|
||||
reads = ['GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH', 'GET_RANGE_SELECTOR']
|
||||
mutations = ['SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH', 'ATOMIC_OP']
|
||||
snapshot_reads = [x + '_SNAPSHOT' for x in reads]
|
||||
database_reads = [x + '_DATABASE' for x in reads]
|
||||
database_mutations = [x + '_DATABASE' for x in mutations]
|
||||
mutations += ['VERSIONSTAMP']
|
||||
versions = ['GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION']
|
||||
snapshot_versions = ['GET_READ_VERSION_SNAPSHOT']
|
||||
tuples = ['TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'SUB']
|
||||
resets = ['ON_ERROR', 'RESET', 'CANCEL']
|
||||
read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
|
||||
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
|
||||
|
||||
op_choices += reads
|
||||
op_choices += mutations
|
||||
op_choices += snapshot_reads
|
||||
op_choices += database_reads
|
||||
op_choices += database_mutations
|
||||
op_choices += versions
|
||||
op_choices += snapshot_versions
|
||||
op_choices += tuples
|
||||
op_choices += read_conflicts
|
||||
op_choices += write_conflicts
|
||||
op_choices += resets
|
||||
|
||||
idempotent_atomic_ops = [u'BIT_AND', u'BIT_OR', u'MAX', u'MIN']
|
||||
atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR']
|
||||
|
||||
if args.concurrency > 1:
|
||||
self.max_keys = random.randint(100, 1000)
|
||||
else:
|
||||
self.max_keys = random.randint(100, 10000)
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
instructions.append('GET_READ_VERSION')
|
||||
|
||||
self.preload_database(instructions, self.max_keys)
|
||||
|
||||
instructions.setup_complete()
|
||||
|
||||
for i in range(args.num_ops):
|
||||
op = random.choice(op_choices)
|
||||
index = len(instructions)
|
||||
|
||||
#print 'Adding instruction %s at %d' % (op, index)
|
||||
|
||||
if args.concurrency == 1 and (op in database_mutations):
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_stack_items(1)
|
||||
|
||||
if op in resets or op == 'NEW_TRANSACTION':
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
|
||||
self.outstanding_ops = []
|
||||
|
||||
if op == 'NEW_TRANSACTION':
|
||||
instructions.append(op)
|
||||
self.is_committed = False
|
||||
self.can_set_version = True
|
||||
self.can_use_key_selectors = True
|
||||
|
||||
elif op == 'ON_ERROR':
|
||||
instructions.push_args(random.randint(0, 5000))
|
||||
instructions.append(op)
|
||||
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
self.is_committed = False
|
||||
self.can_set_version = True
|
||||
self.can_use_key_selectors = True
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE':
|
||||
self.ensure_key(instructions, 1)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE':
|
||||
if op.endswith('_DATABASE') or self.can_use_key_selectors:
|
||||
self.ensure_key(instructions, 1)
|
||||
instructions.push_args(self.workspace.key())
|
||||
instructions.push_args(*self.random.random_selector_params())
|
||||
test_util.to_front(instructions, 3)
|
||||
instructions.append(op)
|
||||
|
||||
#Don't add key here because we may be outside of our prefix
|
||||
self.add_strings(1)
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE':
|
||||
self.ensure_key(instructions, 2)
|
||||
range_params = self.random.random_range_params()
|
||||
instructions.push_args(*range_params)
|
||||
test_util.to_front(instructions, 4)
|
||||
test_util.to_front(instructions, 4)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE':
|
||||
#TODO: not tested well
|
||||
self.ensure_key(instructions, 1)
|
||||
range_params = self.random.random_range_params()
|
||||
instructions.push_args(*range_params)
|
||||
test_util.to_front(instructions, 3)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE':
|
||||
if op.endswith('_DATABASE') or self.can_use_key_selectors:
|
||||
self.ensure_key(instructions, 2)
|
||||
instructions.push_args(self.workspace.key())
|
||||
range_params = self.random.random_range_params()
|
||||
instructions.push_args(*range_params)
|
||||
instructions.push_args(*self.random.random_selector_params())
|
||||
test_util.to_front(instructions, 6)
|
||||
instructions.push_args(*self.random.random_selector_params())
|
||||
test_util.to_front(instructions, 9)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT':
|
||||
instructions.append(op)
|
||||
self.has_version = self.can_set_version
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'SET' or op == 'SET_DATABASE':
|
||||
self.ensure_key_value(instructions)
|
||||
instructions.append(op)
|
||||
if op == 'SET_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'SET_READ_VERSION':
|
||||
if self.has_version and self.can_set_version:
|
||||
instructions.append(op)
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'CLEAR' or op == 'CLEAR_DATABASE':
|
||||
self.ensure_key(instructions, 1)
|
||||
instructions.append(op)
|
||||
if op == 'CLEAR_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE':
|
||||
#Protect against inverted range
|
||||
key1 = self.workspace.pack(self.random.random_tuple(5))
|
||||
key2 = self.workspace.pack(self.random.random_tuple(5))
|
||||
|
||||
if key1 > key2:
|
||||
key1, key2 = key2, key1
|
||||
|
||||
instructions.push_args(key1, key2)
|
||||
|
||||
instructions.append(op)
|
||||
if op == 'CLEAR_RANGE_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
|
||||
self.ensure_key(instructions, 1)
|
||||
instructions.append(op)
|
||||
if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE':
|
||||
self.ensure_key_value(instructions)
|
||||
if op == 'ATOMIC_OP' or args.concurrency > 1:
|
||||
instructions.push_args(random.choice(atomic_ops))
|
||||
else:
|
||||
instructions.push_args(random.choice(idempotent_atomic_ops))
|
||||
|
||||
instructions.append(op)
|
||||
if op == 'ATOMIC_OP_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'VERSIONSTAMP':
|
||||
rand_str1 = self.random.random_string(100)
|
||||
key1 = self.versionstamped_values.pack((rand_str1,))
|
||||
|
||||
split = random.randint(0, 70)
|
||||
rand_str2 = self.random.random_string(20+split) + 'XXXXXXXXXX' + self.random.random_string(70-split)
|
||||
key2 = self.versionstamped_keys.pack() + rand_str2
|
||||
index = key2.find('XXXXXXXXXX')
|
||||
key2 += chr(index%256)+chr(index/256)
|
||||
|
||||
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', key1, 'XXXXXXXXXX' + rand_str2)
|
||||
instructions.append('ATOMIC_OP')
|
||||
|
||||
instructions.push_args(u'SET_VERSIONSTAMPED_KEY', key2, rand_str1)
|
||||
instructions.append('ATOMIC_OP')
|
||||
self.can_use_key_selectors = False
|
||||
|
||||
elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE':
|
||||
self.ensure_key(instructions, 2)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY':
|
||||
self.ensure_key(instructions, 1)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'DISABLE_WRITE_CONFLICT':
|
||||
instructions.append(op)
|
||||
|
||||
elif op == 'COMMIT':
|
||||
if args.concurrency == 1 or i < self.max_keys or random.random() < 0.9:
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_stack_items(1)
|
||||
self.is_committed = True
|
||||
self.can_set_version = True
|
||||
self.can_use_key_selectors = True
|
||||
else:
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'RESET':
|
||||
instructions.append(op)
|
||||
self.is_committed = False
|
||||
self.can_set_version = True
|
||||
self.can_use_key_selectors = True
|
||||
|
||||
elif op == 'CANCEL':
|
||||
instructions.append(op)
|
||||
self.is_committed = False
|
||||
self.can_set_version = False
|
||||
|
||||
elif op == 'GET_COMMITTED_VERSION':
|
||||
if self.is_committed:
|
||||
instructions.append(op)
|
||||
self.has_version = True
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE':
|
||||
tup = self.random.random_tuple(10)
|
||||
instructions.push_args(len(tup), *tup)
|
||||
instructions.append(op)
|
||||
if op == 'TUPLE_PACK':
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_strings(2)
|
||||
|
||||
elif op == 'TUPLE_UNPACK':
|
||||
tup = self.random.random_tuple(10)
|
||||
instructions.push_args(len(tup), *tup)
|
||||
instructions.append('TUPLE_PACK')
|
||||
instructions.append(op)
|
||||
self.add_strings(len(tup))
|
||||
|
||||
#Use SUB to test if integers are correctly unpacked
|
||||
elif op == 'SUB':
|
||||
a = self.random.random_int() / 2
|
||||
b = self.random.random_int() / 2
|
||||
instructions.push_args(0, a, b)
|
||||
instructions.append(op)
|
||||
instructions.push_args(1)
|
||||
instructions.append('SWAP')
|
||||
instructions.append(op)
|
||||
instructions.push_args(1)
|
||||
instructions.append('TUPLE_PACK')
|
||||
self.add_stack_items(1)
|
||||
|
||||
else:
|
||||
assert False
|
||||
|
||||
if op in reads or op in snapshot_reads:
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
|
||||
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations):
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
instructions.begin_finalization()
|
||||
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_stack_items(1)
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
instructions.push_args(self.stack_subspace.key())
|
||||
instructions.append('LOG_STACK')
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
return instructions
|
||||
|
||||
@fdb.transactional
|
||||
def check_versionstamps(self, tr, begin_key, limit):
|
||||
next_begin = None
|
||||
incorrect_versionstamps = 0
|
||||
for k,v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
|
||||
next_begin = k + '\x00'
|
||||
tup = fdb.tuple.unpack(k)
|
||||
key = self.versionstamped_keys.pack() + v[10:].replace('XXXXXXXXXX', v[:10], 1)
|
||||
if tr[key] != tup[-1]:
|
||||
incorrect_versionstamps += 1
|
||||
|
||||
return (next_begin, incorrect_versionstamps)
|
||||
|
||||
def validate(self, db, args):
|
||||
errors = []
|
||||
|
||||
begin = self.versionstamped_values.range().start
|
||||
incorrect_versionstamps = 0
|
||||
|
||||
while begin is not None:
|
||||
(begin, current_incorrect_versionstamps) = self.check_versionstamps(db, begin, 100)
|
||||
incorrect_versionstamps += current_incorrect_versionstamps
|
||||
|
||||
if incorrect_versionstamps > 0:
|
||||
errors.append('There were %d failed version stamp operations' % incorrect_versionstamps)
|
||||
|
||||
return errors
|
||||
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
|
||||
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
|
||||
]
|
||||
|
|
@ -0,0 +1,373 @@
|
|||
#
|
||||
# directory.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import util
|
||||
|
||||
from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification
|
||||
from bindingtester.tests import test_util, directory_util
|
||||
|
||||
from bindingtester.tests.directory_util import DirListEntry
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class DirectoryTest(Test):
|
||||
|
||||
def __init__(self, subspace):
|
||||
super(DirectoryTest, self).__init__(subspace)
|
||||
self.stack_subspace = subspace['stack']
|
||||
self.directory_log = subspace['directory_log']['directory']
|
||||
self.subspace_log = subspace['directory_log']['subspace']
|
||||
self.prefix_log = subspace['prefix_log']
|
||||
|
||||
self.prepopulated_dirs = []
|
||||
self.next_path = 1
|
||||
|
||||
def ensure_default_directory_subspace(self, instructions, path):
|
||||
directory_util.create_default_directory_subspace(instructions, path, self.random)
|
||||
|
||||
child = self.root.add_child((path,), path, self.root, DirListEntry(True, True))
|
||||
self.dir_list.append(child)
|
||||
self.dir_index = directory_util.DEFAULT_DIRECTORY_INDEX
|
||||
|
||||
def generate_layer(self):
|
||||
if random.random < 0.7:
|
||||
return ''
|
||||
else:
|
||||
choice = random.randint(0, 3)
|
||||
if choice == 0:
|
||||
return 'partition'
|
||||
elif choice == 1:
|
||||
return 'test_layer'
|
||||
else:
|
||||
return self.random.random_string(random.randint(0, 5))
|
||||
|
||||
def setup(self, args):
|
||||
self.dir_index = 0
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits)
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
op_choices = ['NEW_TRANSACTION', 'COMMIT']
|
||||
|
||||
general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER']
|
||||
|
||||
op_choices += general
|
||||
|
||||
directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO',
|
||||
'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS']
|
||||
directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST']
|
||||
|
||||
directory_db_mutations = [x + '_DATABASE' for x in directory_mutations]
|
||||
directory_db_reads = [x + '_DATABASE' for x in directory_reads]
|
||||
directory_snapshot_reads = [x + '_SNAPSHOT' for x in directory_reads]
|
||||
|
||||
directory = []
|
||||
directory += directory_mutations
|
||||
directory += directory_reads
|
||||
directory += directory_db_mutations
|
||||
directory += directory_db_reads
|
||||
directory += directory_snapshot_reads
|
||||
|
||||
subspace = ['DIRECTORY_PACK_KEY', 'DIRECTORY_UNPACK_KEY', 'DIRECTORY_RANGE', 'DIRECTORY_CONTAINS', 'DIRECTORY_OPEN_SUBSPACE']
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
||||
default_path = unicode('default%d' % self.next_path)
|
||||
self.next_path += 1
|
||||
self.dir_list = directory_util.setup_directories(instructions, default_path, self.random)
|
||||
self.root = self.dir_list[0]
|
||||
|
||||
instructions.push_args(0)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
# Generate some directories that we are going to create in advance. This tests that other bindings
|
||||
# are compatible with the Python implementation
|
||||
self.prepopulated_dirs = [ (generate_path(min_length=1), self.generate_layer()) for i in range(5) ]
|
||||
|
||||
for path, layer in self.prepopulated_dirs:
|
||||
instructions.push_args(layer)
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.append('DIRECTORY_OPEN')
|
||||
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), False, len(self.dir_list))
|
||||
self.dir_list.append(self.dir_list[0].add_child(path, default_path, self.root, DirListEntry(True, True, has_known_prefix=False)))
|
||||
|
||||
instructions.setup_complete()
|
||||
|
||||
for i in range(args.num_ops):
|
||||
if random.random() < 0.5:
|
||||
self.dir_index = random.randrange(0, len(self.dir_list))
|
||||
instructions.push_args(self.dir_index)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
choices = op_choices[:]
|
||||
if self.dir_list[self.dir_index].is_directory:
|
||||
choices += directory
|
||||
if self.dir_list[self.dir_index].is_subspace:
|
||||
choices += subspace
|
||||
|
||||
op = random.choice(choices)
|
||||
dir_entry = self.dir_list[self.dir_index]
|
||||
|
||||
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), op, repr(self.dir_index), repr(dir_entry.has_known_prefix), len(self.dir_list))
|
||||
|
||||
if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'):
|
||||
root_op = op[0:-9]
|
||||
else:
|
||||
root_op = op
|
||||
|
||||
if root_op == 'NEW_TRANSACTION':
|
||||
instructions.append(op)
|
||||
|
||||
elif root_op == 'COMMIT':
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
elif root_op == 'DIRECTORY_CREATE_SUBSPACE':
|
||||
path = generate_path()
|
||||
instructions.push_args(generate_prefix(allow_empty=False, is_partition=True))
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.append(op)
|
||||
self.dir_list.append(DirListEntry(False, True))
|
||||
|
||||
elif root_op == 'DIRECTORY_CREATE_LAYER':
|
||||
indices = []
|
||||
for i in range(2):
|
||||
instructions.push_args(generate_prefix(allow_empty=False, is_partition=True))
|
||||
instructions.push_args(*test_util.with_length(generate_path()))
|
||||
instructions.append('DIRECTORY_CREATE_SUBSPACE')
|
||||
indices.append(len(self.dir_list))
|
||||
self.dir_list.append(DirListEntry(False, True))
|
||||
|
||||
instructions.push_args(random.choice([0,1]))
|
||||
instructions.push_args(*indices)
|
||||
instructions.append(op)
|
||||
self.dir_list.append(DirListEntry(True, False, False))
|
||||
|
||||
elif root_op == 'DIRECTORY_CREATE_OR_OPEN':
|
||||
# Because allocated prefixes are non-deterministic, we cannot have overlapping
|
||||
# transactions that allocate/remove these prefixes in a comparison test
|
||||
if op.endswith('_DATABASE') and args.concurrency == 1:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = generate_path()
|
||||
op_args = test_util.with_length(path) + (self.generate_layer(),)
|
||||
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
|
||||
|
||||
if not op.endswith('_DATABASE') and args.concurrency == 1:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
self.dir_list.append(dir_entry.add_child(path, default_path, self.root, DirListEntry(True, True, False)))
|
||||
|
||||
elif root_op == 'DIRECTORY_CREATE':
|
||||
layer = self.generate_layer()
|
||||
is_partition = layer == 'partition'
|
||||
|
||||
allow_empty_prefix = random.random() < 0.8
|
||||
prefix = generate_prefix(allow_empty=allow_empty_prefix, is_partition=is_partition)
|
||||
|
||||
# Because allocated prefixes are non-deterministic, we cannot have overlapping
|
||||
# transactions that allocate/remove these prefixes in a comparison test
|
||||
if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = generate_path()
|
||||
op_args = test_util.with_length(path) + (layer, prefix)
|
||||
if prefix is None:
|
||||
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
|
||||
else:
|
||||
instructions.push_args(*op_args)
|
||||
instructions.append(op)
|
||||
|
||||
if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
self.dir_list.append(dir_entry.add_child(path, default_path, self.root, DirListEntry(True, True, bool(prefix))))
|
||||
|
||||
elif root_op == 'DIRECTORY_OPEN':
|
||||
path = generate_path()
|
||||
instructions.push_args(self.generate_layer())
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.append(op)
|
||||
self.dir_list.append(dir_entry.add_child(path, default_path, self.root, DirListEntry(True, True)))
|
||||
|
||||
elif root_op == 'DIRECTORY_MOVE':
|
||||
old_path = generate_path()
|
||||
new_path = generate_path()
|
||||
instructions.push_args(*(test_util.with_length(old_path) + test_util.with_length(new_path)))
|
||||
instructions.append(op)
|
||||
# This could probably be made to sometimes set has_known_prefix to true
|
||||
self.dir_list.append(dir_entry.add_child(new_path, default_path, self.root, DirListEntry(True, True, False)))
|
||||
|
||||
# Make sure that the default directory subspace still exists after moving the specified directory
|
||||
if dir_entry.is_directory and not dir_entry.is_subspace and old_path == (u'',):
|
||||
self.ensure_default_directory_subspace(instructions, default_path)
|
||||
|
||||
elif root_op == 'DIRECTORY_MOVE_TO':
|
||||
new_path = generate_path()
|
||||
instructions.push_args(*test_util.with_length(new_path))
|
||||
instructions.append(op)
|
||||
self.dir_list.append(dir_entry.root.add_child(new_path, default_path, self.root, DirListEntry(True, True, dir_entry.has_known_prefix)))
|
||||
|
||||
# Make sure that the default directory subspace still exists after moving the current directory
|
||||
self.ensure_default_directory_subspace(instructions, default_path)
|
||||
|
||||
# FIXME: There is currently a problem with removing partitions. In these generated tests, it's possible
|
||||
# for a removed partition to resurrect itself and insert keys into the database using its allocated
|
||||
# prefix. The result is non-deterministic HCA errors.
|
||||
elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS':
|
||||
# Because allocated prefixes are non-deterministic, we cannot have overlapping
|
||||
# transactions that allocate/remove these prefixes in a comparison test
|
||||
if op.endswith('_DATABASE') and args.concurrency == 1:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = ()
|
||||
count = random.randint(0, 1)
|
||||
if count == 1:
|
||||
path = generate_path()
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.push_args(count)
|
||||
|
||||
instructions.append(op)
|
||||
|
||||
# Make sure that the default directory subspace still exists after removing the specified directory
|
||||
if path == () or (dir_entry.is_directory and not dir_entry.is_subspace and path == (u'',)):
|
||||
self.ensure_default_directory_subspace(instructions, default_path)
|
||||
|
||||
elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
|
||||
path = ()
|
||||
count = random.randint(0, 1)
|
||||
if count == 1:
|
||||
path = generate_path()
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.push_args(count)
|
||||
instructions.append(op)
|
||||
|
||||
elif root_op == 'DIRECTORY_PACK_KEY':
|
||||
t = self.random.random_tuple(5)
|
||||
instructions.push_args(*test_util.with_length(t))
|
||||
instructions.append(op)
|
||||
instructions.append('DIRECTORY_STRIP_PREFIX')
|
||||
|
||||
elif root_op == 'DIRECTORY_UNPACK_KEY' or root_op == 'DIRECTORY_CONTAINS':
|
||||
if not dir_entry.has_known_prefix or random.random() < 0.2 or root_op == 'DIRECTORY_UNPACK_KEY':
|
||||
t = self.random.random_tuple(5)
|
||||
instructions.push_args(*test_util.with_length(t))
|
||||
instructions.append('DIRECTORY_PACK_KEY')
|
||||
instructions.append(op)
|
||||
else:
|
||||
instructions.push_args(fdb.tuple.pack(self.random.random_tuple(5)))
|
||||
instructions.append(op)
|
||||
|
||||
elif root_op == 'DIRECTORY_RANGE' or root_op == 'DIRECTORY_OPEN_SUBSPACE':
|
||||
t = self.random.random_tuple(5)
|
||||
instructions.push_args(*test_util.with_length(t))
|
||||
instructions.append(op)
|
||||
if root_op == 'DIRECTORY_OPEN_SUBSPACE':
|
||||
self.dir_list.append(DirListEntry(False, True, dir_entry.has_known_prefix))
|
||||
else:
|
||||
test_util.to_front(instructions, 1)
|
||||
instructions.append('DIRECTORY_STRIP_PREFIX')
|
||||
test_util.to_front(instructions, 1)
|
||||
instructions.append('DIRECTORY_STRIP_PREFIX')
|
||||
|
||||
instructions.begin_finalization()
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
||||
for i, dir_entry in enumerate(self.dir_list):
|
||||
instructions.push_args(i)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
if dir_entry.is_directory:
|
||||
instructions.push_args(self.directory_log.key())
|
||||
instructions.append('DIRECTORY_LOG_DIRECTORY')
|
||||
if dir_entry.has_known_prefix and dir_entry.is_subspace:
|
||||
#print '%d. Logging subspace: %d' % (i, dir_entry.dir_id)
|
||||
instructions.push_args(self.subspace_log.key())
|
||||
instructions.append('DIRECTORY_LOG_SUBSPACE')
|
||||
if (i+1) % 100 == 0:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
instructions.push_args(self.stack_subspace.key())
|
||||
instructions.append('LOG_STACK')
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
return instructions
|
||||
|
||||
def pre_run(self, db, args):
|
||||
for (path, layer) in self.prepopulated_dirs:
|
||||
try:
|
||||
util.get_logger().debug('Prepopulating directory: %r (layer=%r)' % (path, layer))
|
||||
fdb.directory.create_or_open(db, path, layer)
|
||||
except Exception as e:
|
||||
util.get_logger().debug('Could not create directory %r: %r' % (path, e))
|
||||
pass
|
||||
|
||||
def validate(self, db, args):
|
||||
errors = []
|
||||
# This check doesn't work in the current test because of the way we use partitions.
|
||||
# If a partition is created, allocates a prefix, and then is removed, subsequent prefix
|
||||
# allocations could collide with prior ones. We can get around this by not allowing
|
||||
# a removed directory (or partition) to be used, but that weakens the test in another way.
|
||||
#errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
|
||||
return errors
|
||||
|
||||
def get_result_specfications(self):
|
||||
return [
|
||||
ResultSpecification(self.stack, key_start_index=1, ordering_index=1),
|
||||
ResultSpecification(self.directory_log, ordering_index=0),
|
||||
ResultSpecification(self.subspace_log, ordering_index=0)
|
||||
]
|
||||
|
||||
# Utility functions
|
||||
def generate_path(min_length = 0):
|
||||
length = int(random.random() * random.random() * (4 - min_length)) + min_length
|
||||
path = ()
|
||||
for i in range(length):
|
||||
if random.random() < 0.05:
|
||||
path = path + (u'',)
|
||||
else:
|
||||
path = path + (random.choice([u'1', u'2', u'3']),)
|
||||
|
||||
return path
|
||||
|
||||
def generate_prefix(allow_empty=True, is_partition=False):
|
||||
if allow_empty and random.random() < 0.8:
|
||||
return None
|
||||
elif is_partition or random.random() < 0.5:
|
||||
length = random.randint(0 if allow_empty else 1, 5)
|
||||
if length == 0:
|
||||
return ''
|
||||
|
||||
if not is_partition:
|
||||
first = chr(random.randint(ord('\x1d'), 255) % 255)
|
||||
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length-1))
|
||||
else:
|
||||
return ''.join(chr(random.randrange(ord('\x02'), ord('\x14'))) for i in range(0, length))
|
||||
else:
|
||||
prefix = 'abcdefg'
|
||||
generated = prefix[0:random.randrange(0 if allow_empty else 1, len(prefix))]
|
||||
return generated
|
|
@ -0,0 +1,130 @@
|
|||
#
|
||||
# directory_hca.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import util
|
||||
|
||||
from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification
|
||||
from bindingtester.tests import test_util, directory_util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class DirectoryHcaTest(Test):
|
||||
def __init__(self, subspace):
|
||||
super(DirectoryHcaTest, self).__init__(subspace)
|
||||
self.coordination = subspace['coordination']
|
||||
self.prefix_log = subspace['prefix_log']
|
||||
self.next_path = 1
|
||||
|
||||
def setup(self, args):
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits)
|
||||
self.transactions = ['tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
|
||||
self.barrier_num = 0
|
||||
|
||||
self.max_directories_per_transaction = 30
|
||||
if args.api_version < 300:
|
||||
if args.concurrency > 8:
|
||||
raise Exception('Directory HCA test does not support concurrency larger than 8 with API version less than 300')
|
||||
|
||||
self.max_directories_per_transaction = 8.0 / args.concurrency
|
||||
|
||||
def commit_transactions(self, instructions, args):
|
||||
for tr in self.transactions:
|
||||
if random.random() < 0.8 or args.api_version < 300:
|
||||
instructions.push_args(tr)
|
||||
instructions.append('USE_TRANSACTION')
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
def barrier(self, instructions, thread_number, thread_ending=False):
|
||||
if not thread_ending:
|
||||
instructions.push_args(self.coordination[(self.barrier_num+1)][thread_number].key(), '')
|
||||
instructions.append('SET_DATABASE')
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
instructions.push_args(self.coordination[self.barrier_num][thread_number].key())
|
||||
instructions.append('CLEAR_DATABASE')
|
||||
instructions.append('WAIT_FUTURE')
|
||||
instructions.push_args(self.coordination[self.barrier_num].key())
|
||||
instructions.append('WAIT_EMPTY')
|
||||
|
||||
self.barrier_num += 1
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
||||
default_path = unicode('default%d' % self.next_path)
|
||||
self.next_path += 1
|
||||
dir_list = directory_util.setup_directories(instructions, default_path, self.random)
|
||||
num_dirs = len(dir_list)
|
||||
|
||||
instructions.push_args(directory_util.DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
instructions.setup_complete()
|
||||
|
||||
current_op = 0
|
||||
|
||||
while current_op < args.num_ops:
|
||||
if args.concurrency > 1:
|
||||
self.barrier(instructions, thread_number)
|
||||
|
||||
instructions.push_args(random.choice(self.transactions))
|
||||
instructions.append('USE_TRANSACTION')
|
||||
|
||||
if thread_number == 0 and args.concurrency > 1:
|
||||
num_directories = 1
|
||||
else:
|
||||
num_directories = int(max(1, pow(random.random(), 4) * min(self.max_directories_per_transaction, args.num_ops - current_op)))
|
||||
|
||||
for i in range(num_directories):
|
||||
path = (self.random.random_unicode_str(16),)
|
||||
op_args = test_util.with_length(path) + ('', None)
|
||||
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE', op_args, path, num_dirs, self.random, self.prefix_log)
|
||||
num_dirs += 1
|
||||
|
||||
current_op += num_directories
|
||||
|
||||
if args.concurrency > 1:
|
||||
self.barrier(instructions, thread_number, thread_ending=(current_op >= args.num_ops))
|
||||
|
||||
if thread_number == 0:
|
||||
self.commit_transactions(instructions, args)
|
||||
|
||||
return instructions
|
||||
|
||||
@fdb.transactional
|
||||
def pre_run(self, tr, args):
|
||||
if args.concurrency > 1:
|
||||
for i in range(args.concurrency):
|
||||
tr[self.coordination[0][i]] = ''
|
||||
|
||||
def validate(self, db, args):
|
||||
errors = []
|
||||
errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
|
||||
errors += directory_util.validate_hca_state(db)
|
||||
|
||||
return errors
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
#
|
||||
# directory_util.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
import struct
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import util
|
||||
|
||||
from bindingtester.tests import test_util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
DEFAULT_DIRECTORY_INDEX = 4
|
||||
DEFAULT_DIRECTORY_PREFIX = 'default'
|
||||
DIRECTORY_ERROR_STRING = 'DIRECTORY_ERROR'
|
||||
|
||||
class DirListEntry:
|
||||
dir_id = 0 # Used for debugging
|
||||
|
||||
def __init__(self, is_directory, is_subspace, has_known_prefix=True, path=(), root=None):
|
||||
self.root = root or self
|
||||
self.path = path
|
||||
self.is_directory = is_directory
|
||||
self.is_subspace = is_subspace
|
||||
self.has_known_prefix = has_known_prefix
|
||||
self.children = {}
|
||||
|
||||
self.dir_id = DirListEntry.dir_id + 1
|
||||
DirListEntry.dir_id += 1
|
||||
|
||||
def __repr__(self):
|
||||
return 'DirEntry %d %r: %d' % (self.dir_id, self.path, self.has_known_prefix)
|
||||
|
||||
def add_child(self, subpath, default_path, root, child):
|
||||
if default_path in root.children:
|
||||
#print 'Adding child %r to default directory %r at %r' % (child, root.children[DirectoryTest.DEFAULT_DIRECTORY_PATH].path, subpath)
|
||||
c = root.children[default_path]._add_child_impl(subpath, child)
|
||||
child.has_known_prefix = c.has_known_prefix and child.has_known_prefix
|
||||
#print 'Added %r' % c
|
||||
|
||||
#print 'Adding child %r to directory %r at %r' % (child, self.path, subpath)
|
||||
c = self._add_child_impl(subpath, child)
|
||||
#print 'Added %r' % c
|
||||
return c
|
||||
|
||||
def _add_child_impl(self, subpath, child):
|
||||
#print '%d, %d. Adding child (recursive): %s %s' % (self.dir_id, child.dir_id, repr(self.path), repr(subpath))
|
||||
if len(subpath) == 0:
|
||||
self.has_known_prefix = self.has_known_prefix and child.has_known_prefix
|
||||
#print '%d, %d. Setting child: %d' % (self.dir_id, child.dir_id, self.has_known_prefix)
|
||||
self._merge_children(child)
|
||||
|
||||
return self
|
||||
else:
|
||||
if not subpath[0] in self.children:
|
||||
#print '%d, %d. Path %s was absent (%s)' % (self.dir_id, child.dir_id, repr(self.path + subpath[0:1]), repr(self.children))
|
||||
subdir = DirListEntry(True, True, path = self.path+subpath[0:1], root = self.root)
|
||||
subdir.has_known_prefix = len(subpath) == 1
|
||||
self.children[subpath[0]] = subdir
|
||||
else:
|
||||
subdir = self.children[subpath[0]]
|
||||
subdir.has_known_prefix = False
|
||||
#print '%d, %d. Path was present' % (self.dir_id, child.dir_id)
|
||||
|
||||
return subdir._add_child_impl(subpath[1:], child)
|
||||
|
||||
def _merge_children(self, other):
|
||||
for c in other.children:
|
||||
if not c in self.children:
|
||||
self.children[c] = other.children[c]
|
||||
else:
|
||||
self.children[c].has_known_prefix = self.children[c].has_known_prefix and other.children[c].has_known_prefix
|
||||
self.children[c]._merge_children(other.children[c])
|
||||
|
||||
def setup_directories(instructions, default_path, random):
|
||||
dir_list = [DirListEntry(True, False, True)]
|
||||
instructions.push_args(0, '\xfe')
|
||||
instructions.append('DIRECTORY_CREATE_SUBSPACE')
|
||||
dir_list.append(DirListEntry(False, True))
|
||||
|
||||
instructions.push_args(0, '')
|
||||
instructions.append('DIRECTORY_CREATE_SUBSPACE')
|
||||
dir_list.append(DirListEntry(False, True))
|
||||
|
||||
instructions.push_args(1, 2, 1)
|
||||
instructions.append('DIRECTORY_CREATE_LAYER')
|
||||
dir_list.append(DirListEntry(True, False, True))
|
||||
|
||||
create_default_directory_subspace(instructions, default_path, random)
|
||||
dir_list.append(DirListEntry(True, True, True))
|
||||
|
||||
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_SET_ERROR_INDEX')
|
||||
|
||||
return dir_list
|
||||
|
||||
def create_default_directory_subspace(instructions, path, random):
|
||||
test_util.blocking_commit(instructions)
|
||||
instructions.push_args(3)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
prefix = random.random_string(16)
|
||||
instructions.push_args(1, path, '', '%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
|
||||
instructions.append('DIRECTORY_CREATE_DATABASE')
|
||||
|
||||
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace):
|
||||
if not op.endswith('_DATABASE'):
|
||||
instructions.push_args(1, *test_util.with_length(path))
|
||||
instructions.append('DIRECTORY_EXISTS')
|
||||
|
||||
# This op must leave the stack in the state it is in at this point, with the exception
|
||||
# that it may leave an error on the stack
|
||||
instructions.push_args(*op_args)
|
||||
instructions.append(op)
|
||||
|
||||
if not op.endswith('_DATABASE'):
|
||||
instructions.push_args(dir_index)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
instructions.push_args(1, '', random.random_string(16), '')
|
||||
instructions.append('DIRECTORY_PACK_KEY')
|
||||
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
|
||||
|
||||
t = util.subspace_to_tuple(subspace)
|
||||
instructions.push_args(len(t) + 3, *t)
|
||||
|
||||
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = ''
|
||||
instructions.append('SET')
|
||||
|
||||
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
def check_for_duplicate_prefixes(db, subspace):
|
||||
last_prefix = None
|
||||
start_key = subspace[0].range().start
|
||||
|
||||
duplicates = set()
|
||||
count = 0
|
||||
while True:
|
||||
prefixes = db.get_range(start_key, subspace[0].range().stop, limit=1000)
|
||||
if len(prefixes) == 0:
|
||||
break
|
||||
|
||||
start_key = fdb.KeySelector.first_greater_than(prefixes[-1].key)
|
||||
|
||||
prefixes = [subspace[0].unpack(kv.key)[0] for kv in prefixes]
|
||||
prefixes = [p for p in prefixes if not (p.startswith(DEFAULT_DIRECTORY_PREFIX) or p == DIRECTORY_ERROR_STRING)]
|
||||
count += len(prefixes)
|
||||
|
||||
prefixes = [last_prefix] + prefixes
|
||||
duplicates.update([p for i,p in enumerate(prefixes[1:]) if p == prefixes[i]])
|
||||
last_prefix = prefixes[-1]
|
||||
|
||||
util.get_logger().info('Checked %d directory prefixes for duplicates' % count)
|
||||
return ['The prefix %r was allocated multiple times' % d[:-2] for d in set(duplicates)]
|
||||
|
||||
def validate_hca_state(db):
|
||||
hca = fdb.Subspace(('\xfe', 'hca'), '\xfe')
|
||||
counters = hca[0]
|
||||
recent = hca[1]
|
||||
|
||||
last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True)
|
||||
[(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack('<q', kv.value)[0]) for kv in last_counter] or [(0, 0)]
|
||||
|
||||
actual_count = len(db[recent[start] : recent.range().stop])
|
||||
if actual_count > reported_count:
|
||||
return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)]
|
||||
|
||||
return []
|
|
@ -0,0 +1,395 @@
|
|||
#
|
||||
# scripted.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import Result
|
||||
|
||||
from bindingtester.tests import Test, Instruction, ThreadedInstructionSet, ResultSpecification
|
||||
from bindingtester.tests import test_util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
# SOMEDAY: This should probably be broken up into smaller tests
|
||||
class ScriptedTest(Test):
|
||||
TEST_API_VERSION = 500
|
||||
|
||||
def __init__(self, subspace):
|
||||
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
|
||||
self.workspace = self.subspace['workspace']
|
||||
self.results_subspace = self.subspace['results']
|
||||
#self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
|
||||
|
||||
def setup(self, args):
|
||||
if args.concurrency > 1:
|
||||
raise Exception('Scripted tests cannot be run with a concurrency greater than 1')
|
||||
|
||||
# SOMEDAY: this is only a limitation because we don't know how many operations the bisection should start with
|
||||
# it should be fixable.
|
||||
#
|
||||
# We also need to enable the commented out support for num_ops in this file and make it so the default value runs
|
||||
# the entire test
|
||||
if args.bisect:
|
||||
raise Exception('Scripted tests cannot be bisected')
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
self.results = []
|
||||
|
||||
test_instructions = ThreadedInstructionSet()
|
||||
main_thread = test_instructions.create_thread()
|
||||
|
||||
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0,6)]
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(1020)
|
||||
main_thread.append('ON_ERROR')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.append('GET_READ_VERSION')
|
||||
main_thread.push_args(foo[1], 'bar')
|
||||
main_thread.append('SET')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'bar')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.push_args(2000)
|
||||
main_thread.append('ON_ERROR')
|
||||
self.add_result(main_thread, args, test_util.error_string(2000))
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(0)
|
||||
main_thread.append('ON_ERROR')
|
||||
self.add_result(main_thread, args, test_util.error_string(2000))
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('DUP')
|
||||
main_thread.append('DUP')
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'bar')
|
||||
main_thread.append('CLEAR')
|
||||
main_thread.append('GET_SNAPSHOT')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('GET_DATABASE')
|
||||
self.add_result(main_thread, args, 'bar')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.append('SET_READ_VERSION')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('DUP')
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.append('CLEAR')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, test_util.error_string(1020))
|
||||
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('GET_SNAPSHOT')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('CLEAR')
|
||||
main_thread.append('COMMIT')
|
||||
main_thread.append('WAIT_FUTURE')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.append('GET_COMMITTED_VERSION')
|
||||
main_thread.append('RESET')
|
||||
main_thread.append('EMPTY_STACK')
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(1, 'bar', foo[1], foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
|
||||
main_thread.append('SWAP')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET_DATABASE')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.append('SET_READ_VERSION')
|
||||
main_thread.push_args(foo[2])
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args('', 0, -1, '')
|
||||
main_thread.append('GET_KEY')
|
||||
self.add_result(main_thread, args, '')
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.append('GET_READ_VERSION_SNAPSHOT')
|
||||
main_thread.push_args('random', foo[1], foo[3], 0, 1, 1)
|
||||
main_thread.append('POP')
|
||||
main_thread.append('GET_RANGE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2', foo[1], 'bar')))
|
||||
main_thread.push_args(foo[1], foo[3], 1, 1, 0)
|
||||
main_thread.append('GET_RANGE_SNAPSHOT')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2')))
|
||||
main_thread.push_args(foo[1], foo[3], 0, 0, 4)
|
||||
main_thread.append('GET_RANGE_DATABASE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.push_args(foo[3], foo[5])
|
||||
main_thread.append('CLEAR_RANGE')
|
||||
main_thread.push_args(foo[1], 0, 3, '')
|
||||
main_thread.append('GET_KEY')
|
||||
self.add_result(main_thread, args, foo[5])
|
||||
main_thread.push_args(foo[1], 1, 2, '')
|
||||
main_thread.append('GET_KEY_SNAPSHOT')
|
||||
self.add_result(main_thread, args, foo[5])
|
||||
main_thread.push_args(foo[5], 0, -2, '')
|
||||
main_thread.append('GET_KEY_DATABASE')
|
||||
self.add_result(main_thread, args, foo[2])
|
||||
main_thread.push_args(self.workspace.key(), 2, 0, 2)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
|
||||
main_thread.push_args(self.workspace.key(), 4, 0, 3)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
|
||||
main_thread.push_args(self.workspace.key(), 3, 1, -1)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], 'bar5', foo[4], 'bar4', foo[3], 'bar3')))
|
||||
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, '')
|
||||
main_thread.append('GET_RANGE_SELECTOR')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
|
||||
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, '')
|
||||
main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
|
||||
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, '')
|
||||
main_thread.append('GET_RANGE_SELECTOR_DATABASE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3')))
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.push_args(self.workspace.key())
|
||||
main_thread.append('CLEAR_RANGE_STARTS_WITH')
|
||||
main_thread.push_args(self.workspace.key(), 0, 0, -1)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH')
|
||||
self.add_result(main_thread, args, '')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.append('SET_READ_VERSION')
|
||||
main_thread.push_args(foo[1])
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'bar')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.push_args(foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
main_thread.append('SET')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.push_args(foo[2])
|
||||
main_thread.append('CLEAR_DATABASE')
|
||||
main_thread.append('WAIT_FUTURE')
|
||||
main_thread.push_args(self.workspace.key(), 0, 0, -1)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')))
|
||||
|
||||
main_thread.push_args(foo[3], foo[5])
|
||||
main_thread.append('CLEAR_RANGE_DATABASE')
|
||||
main_thread.append('WAIT_FUTURE')
|
||||
main_thread.push_args(self.workspace.key(), 0, 0, -1)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[5], 'bar5')))
|
||||
|
||||
main_thread.push_args(self.workspace.key())
|
||||
main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE')
|
||||
main_thread.append('WAIT_FUTURE')
|
||||
main_thread.push_args(self.workspace.key(), 0, 0, -1)
|
||||
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
|
||||
self.add_result(main_thread, args, '')
|
||||
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
|
||||
main_thread.append('GET_RANGE')
|
||||
self.add_result(main_thread, args, test_util.error_string(2210))
|
||||
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
|
||||
main_thread.append('GET_RANGE_DATABASE')
|
||||
self.add_result(main_thread, args, test_util.error_string(2210))
|
||||
|
||||
self.append_range_test(main_thread, args, 100, 256)
|
||||
self.append_range_test(main_thread, args, 1000, 8)
|
||||
|
||||
main_thread.append('EMPTY_STACK')
|
||||
tup = (0, 'foo', -1093, u'unicode\u9348test', 0xffffffff + 100, 'bar\x00\xff')
|
||||
main_thread.push_args(*test_util.with_length(tup))
|
||||
main_thread.append('TUPLE_PACK')
|
||||
main_thread.append('DUP')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack(tup))
|
||||
main_thread.append('TUPLE_UNPACK')
|
||||
for item in reversed(tup):
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((item,)))
|
||||
|
||||
main_thread.push_args(0xffffffff, -100)
|
||||
main_thread.append('SUB')
|
||||
main_thread.push_args(1)
|
||||
main_thread.append('TUPLE_PACK')
|
||||
self.add_result(main_thread, args, fdb.tuple.pack((0xffffffff + 100,)))
|
||||
|
||||
main_thread.append('EMPTY_STACK')
|
||||
main_thread.push_args(*test_util.with_length(tup))
|
||||
main_thread.append('TUPLE_RANGE')
|
||||
rng = fdb.tuple.range(tup)
|
||||
self.add_result(main_thread, args, rng.stop)
|
||||
self.add_result(main_thread, args, rng.start)
|
||||
|
||||
stampKey = 'stampedXXXXXXXXXXsuffix'
|
||||
stampKeyIndex = stampKey.find('XXXXXXXXXX')
|
||||
stampKeyStr = chr(stampKeyIndex%256) + chr(stampKeyIndex/256)
|
||||
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', stampKey + stampKeyStr, 'stampedBar')
|
||||
main_thread.append('ATOMIC_OP')
|
||||
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', 'XXXXXXXXXX')
|
||||
main_thread.append('ATOMIC_OP')
|
||||
|
||||
main_thread.push_args('suffix')
|
||||
main_thread.append('GET_VERSIONSTAMP')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
main_thread.push_args('stamped')
|
||||
main_thread.append('CONCAT')
|
||||
main_thread.append('CONCAT')
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'stampedBar')
|
||||
|
||||
main_thread.push_args('stampedValue', 'suffix')
|
||||
main_thread.append('GET')
|
||||
main_thread.push_args('stamped')
|
||||
main_thread.append('CONCAT')
|
||||
main_thread.append('CONCAT')
|
||||
main_thread.append('GET')
|
||||
self.add_result(main_thread, args, 'stampedBar')
|
||||
|
||||
main_thread.append('GET_VERSIONSTAMP')
|
||||
test_util.blocking_commit(main_thread)
|
||||
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
|
||||
self.add_result(main_thread, args, test_util.error_string(2021))
|
||||
|
||||
main_thread.push_args('sentinel')
|
||||
main_thread.append('UNIT_TESTS')
|
||||
self.add_result(main_thread, args, 'sentinel')
|
||||
|
||||
if not args.no_threads:
|
||||
wait_key = 'waitKey'
|
||||
#threads = [self.thread_subspace[i] for i in range(0, 2)]
|
||||
threads = ['thread_spec%d' % i for i in range(0, 2)]
|
||||
for thread_spec in threads:
|
||||
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), '')
|
||||
main_thread.append('SET_DATABASE')
|
||||
main_thread.append('WAIT_FUTURE')
|
||||
|
||||
for thread_spec in threads:
|
||||
main_thread.push_args(thread_spec)
|
||||
#if len(main_thread) < args.num_ops:
|
||||
main_thread.append('START_THREAD')
|
||||
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
|
||||
thread.append('NEW_TRANSACTION')
|
||||
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack((wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
|
||||
thread.append('GET')
|
||||
thread.append('POP')
|
||||
thread.append('SET')
|
||||
thread.append('CLEAR')
|
||||
test_util.blocking_commit(thread)
|
||||
thread.append('POP')
|
||||
thread.append('CLEAR_DATABASE')
|
||||
thread.push_args(self.workspace.pack((wait_key,)))
|
||||
thread.append('WAIT_EMPTY')
|
||||
|
||||
thread.append('NEW_TRANSACTION')
|
||||
thread.push_args(foo[1])
|
||||
thread.append('GET')
|
||||
self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1')
|
||||
|
||||
main_thread.append('EMPTY_STACK')
|
||||
#if len(main_thread) > args.num_ops:
|
||||
#main_thread[args.num_ops:] = []
|
||||
|
||||
return test_instructions
|
||||
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1021])
|
||||
]
|
||||
|
||||
def get_expected_results(self):
|
||||
return { self.results_subspace : self.results }
|
||||
|
||||
def append_range_test(self, instructions, args, num_pairs, kv_length):
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
||||
instructions.push_args(self.workspace.key())
|
||||
instructions.append('CLEAR_RANGE_STARTS_WITH')
|
||||
|
||||
kvpairs = []
|
||||
for i in range(0, num_pairs*2):
|
||||
kvpairs.append(self.workspace.pack(('foo', ''.join(chr(random.randint(0, 254)) for i in range(0, kv_length)))))
|
||||
|
||||
kvpairs = list(set(kvpairs))
|
||||
if len(kvpairs) % 2 == 1:
|
||||
kvpairs = kvpairs[:-1]
|
||||
kvpairs.sort()
|
||||
|
||||
instructions.push_args(*kvpairs)
|
||||
for i in range(0, len(kvpairs)/2):
|
||||
instructions.append('SET')
|
||||
if i % 100 == 99:
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
foo_range = self.workspace.range(('foo',))
|
||||
instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1)
|
||||
instructions.append('GET_RANGE')
|
||||
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
|
||||
instructions.push_args(self.workspace.key(), 0, 0, -1)
|
||||
instructions.append('GET_RANGE_STARTS_WITH')
|
||||
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
|
||||
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, '')
|
||||
instructions.append('GET_RANGE_SELECTOR')
|
||||
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
|
||||
test_util.blocking_commit(instructions)
|
||||
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
|
||||
|
||||
def add_result(self, instructions, args, *values):
|
||||
key = self.results_subspace.pack((len(self.results),))
|
||||
instructions.push_args(key)
|
||||
instructions.append('SET_DATABASE')
|
||||
|
||||
#if len(instructions) <= args.num_ops:
|
||||
self.results.append(Result(self.results_subspace, key, values))
|
||||
|
||||
instructions.append('POP')
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
#
|
||||
# test_util.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
import unicodedata
|
||||
|
||||
import fdb
|
||||
|
||||
from bindingtester import util
|
||||
|
||||
class RandomGenerator(object):
|
||||
def __init__(self, max_int_bits=64):
|
||||
self.max_int_bits = max_int_bits
|
||||
|
||||
def random_unicode_str(self, length):
|
||||
return u''.join(self.random_unicode_char() for i in range(0, length))
|
||||
|
||||
def random_int(self):
|
||||
num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability
|
||||
|
||||
max_value = (1 << num_bits) - 1
|
||||
min_value = -max_value - 1
|
||||
num = random.randint(min_value, max_value)
|
||||
|
||||
#util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,)))))
|
||||
return num
|
||||
|
||||
def random_tuple(self, max_size):
|
||||
size = random.randint(1, max_size)
|
||||
tup = []
|
||||
|
||||
for i in range(size):
|
||||
choice = random.randint(0, 3)
|
||||
if choice == 0:
|
||||
tup.append(self.random_int())
|
||||
elif choice == 1:
|
||||
tup.append(None)
|
||||
elif choice == 2:
|
||||
tup.append(self.random_string(random.randint(0, 100)))
|
||||
elif choice == 3:
|
||||
tup.append(self.random_unicode_str(random.randint(0, 100)))
|
||||
else:
|
||||
assert false
|
||||
|
||||
return tuple(tup)
|
||||
|
||||
def random_range_params(self):
|
||||
if random.random() < 0.75:
|
||||
limit = random.randint(1, 1e3)
|
||||
elif random.random() < 0.75:
|
||||
limit = 0
|
||||
else:
|
||||
limit = random.randint(1e8, (1<<31)-1)
|
||||
|
||||
return (limit, random.randint(0, 1), random.randint(-2, 4))
|
||||
|
||||
def random_selector_params(self):
|
||||
if random.random() < 0.9:
|
||||
offset = random.randint(-20, 20)
|
||||
else:
|
||||
offset = random.randint(-1000, 1000)
|
||||
|
||||
return (random.randint(0, 1), offset)
|
||||
|
||||
def random_string(self, length):
|
||||
if length == 0:
|
||||
return ''
|
||||
|
||||
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length-1))
|
||||
|
||||
def random_unicode_char(self):
|
||||
while True:
|
||||
c = random.randint(0, 0xffff)
|
||||
if unicodedata.category(unichr(c))[0] in 'LMNPSZ':
|
||||
return unichr(c)
|
||||
|
||||
|
||||
def error_string(error_code):
|
||||
return fdb.tuple.pack(('ERROR', str(error_code)))
|
||||
|
||||
def blocking_commit(instructions):
|
||||
instructions.append('COMMIT')
|
||||
instructions.append('WAIT_FUTURE')
|
||||
instructions.append('RESET')
|
||||
|
||||
def to_front(instructions, index):
|
||||
if index == 0:
|
||||
pass
|
||||
elif index == 1:
|
||||
instructions.push_args(1)
|
||||
instructions.append('SWAP')
|
||||
elif index == 2:
|
||||
instructions.push_args(index-1)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index)
|
||||
instructions.append('SWAP')
|
||||
else:
|
||||
instructions.push_args(index-1)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index-1)
|
||||
instructions.append('SWAP')
|
||||
to_front(instructions, index-1)
|
||||
|
||||
def with_length(tup):
|
||||
return (len(tup),) + tup
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
#
|
||||
# util.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import signal
|
||||
import os
|
||||
import glob
|
||||
|
||||
import fdb
|
||||
|
||||
def initialize_logger_level(logging_level):
|
||||
logger = get_logger()
|
||||
|
||||
assert logging_level in ["DEBUG", "INFO", "WARNING", "ERROR"]
|
||||
|
||||
if logging_level == "DEBUG":
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif logging_level == "INFO":
|
||||
logger.setLevel(logging.INFO)
|
||||
elif logging_level == "WARNING":
|
||||
logger.setLevel(logging.WARNING)
|
||||
elif logging_level == "ERROR":
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
def get_logger():
|
||||
return logging.getLogger('foundationdb.bindingtester')
|
||||
|
||||
# Attempts to get the name associated with a process termination signal
|
||||
def signal_number_to_name(signal_num):
|
||||
name = []
|
||||
for key in signal.__dict__.keys():
|
||||
if key.startswith("SIG") and getattr(signal, key) == signal_num:
|
||||
name.append(key)
|
||||
if len(name) == 1:
|
||||
return name[0]
|
||||
else:
|
||||
return str(signal_num)
|
||||
|
||||
def import_subclasses(filename, module_path):
|
||||
for f in glob.glob(os.path.join(os.path.dirname(filename), '*.py')):
|
||||
fn = os.path.basename(f)
|
||||
if fn == '__init__.py':
|
||||
continue
|
||||
__import__('%s.%s' % (module_path, os.path.splitext(fn)[0]))
|
||||
|
||||
# Attempts to unpack a subspace
|
||||
# This throws an exception if the subspace cannot be unpacked as a tuple
|
||||
# As a result, the binding tester cannot use subspaces that have non-tuple raw prefixes
|
||||
def subspace_to_tuple(subspace):
|
||||
try:
|
||||
return fdb.tuple.unpack(subspace.key())
|
||||
except Exception as e:
|
||||
get_logger().debug(e)
|
||||
raise Exception('The binding tester does not support subspaces with non-tuple raw prefixes')
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* ThreadCleanup.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "flow/Platform.h"
|
||||
#include "flow/FastAlloc.h"
|
||||
|
||||
#if defined(WIN32)
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
BOOL WINAPI DllMain( HINSTANCE dll, DWORD reason, LPVOID reserved ) {
|
||||
|
||||
if (reason == DLL_THREAD_DETACH)
|
||||
releaseAllThreadMagazines();
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#elif defined( __unixish__ )
|
||||
|
||||
static pthread_key_t threadDestructorKey;
|
||||
|
||||
static void threadDestructor(void*) {
|
||||
releaseAllThreadMagazines();
|
||||
}
|
||||
|
||||
void registerThread() {
|
||||
pthread_setspecific( threadDestructorKey, (const void*)1 );
|
||||
}
|
||||
|
||||
static int initThreadDestructorKey() {
|
||||
if (!pthread_key_create(&threadDestructorKey, &threadDestructor)) {
|
||||
registerThread();
|
||||
setFastAllocatorThreadInitFunction( ®isterThread );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int threadDestructorKeyInit = initThreadDestructorKey();
|
||||
|
||||
#else
|
||||
#error Port me!
|
||||
#endif
|
|
@ -0,0 +1,681 @@
|
|||
/*
|
||||
* fdb_c.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define FDB_API_VERSION 500
|
||||
|
||||
#include "fdbclient/MultiVersionTransaction.h"
|
||||
#include "foundationdb/fdb_c.h"
|
||||
|
||||
int g_api_version = 0;
|
||||
|
||||
/*
|
||||
* Our clients might share these ThreadSafe types between threads. It is therefore
|
||||
* unsafe to call addRef on them.
|
||||
*
|
||||
* type mapping:
|
||||
* FDBFuture -> ThreadSingleAssignmentVarBase
|
||||
* FDBCluster -> ICluster
|
||||
* FDBDatabase -> IDatabase
|
||||
* FDBTransaction -> ITransaction
|
||||
*/
|
||||
#define TSAVB(f) ((ThreadSingleAssignmentVarBase*)(f))
|
||||
#define TSAV(T, f) ((ThreadSingleAssignmentVar<T>*)(f))
|
||||
|
||||
#define DB(d) ((IDatabase*)d)
|
||||
#define CLUSTER(c) ((ICluster*)c)
|
||||
#define TXN(t) ((ITransaction*)t)
|
||||
|
||||
#define API (MultiVersionApi::api)
|
||||
|
||||
/* This must be true so that we can return the data pointer of a
|
||||
Standalone<RangeResultRef> as an array of FDBKeyValue. */
|
||||
static_assert( sizeof(FDBKeyValue) == sizeof(KeyValueRef),
|
||||
"FDBKeyValue / KeyValueRef size mismatch" );
|
||||
|
||||
|
||||
#define TSAV_ERROR(type, error) ((FDBFuture*)(ThreadFuture<type>(error())).extractPtr())
|
||||
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
const char *fdb_get_error( fdb_error_t code ) {
|
||||
return Error::fromUnvalidatedCode( code ).what();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_bool_t fdb_error_predicate( int predicate_test, fdb_error_t code ) {
|
||||
if(predicate_test == FDBErrorPredicates::RETRYABLE) {
|
||||
return fdb_error_predicate( FDBErrorPredicates::MAYBE_COMMITTED, code ) ||
|
||||
fdb_error_predicate( FDBErrorPredicates::RETRYABLE_NOT_COMMITTED, code );
|
||||
}
|
||||
if(predicate_test == FDBErrorPredicates::MAYBE_COMMITTED) {
|
||||
return code == error_code_commit_unknown_result ||
|
||||
code == error_code_cluster_version_changed;
|
||||
}
|
||||
if(predicate_test == FDBErrorPredicates::RETRYABLE_NOT_COMMITTED) {
|
||||
return code == error_code_not_committed ||
|
||||
code == error_code_past_version ||
|
||||
code == error_code_future_version ||
|
||||
code == error_code_database_locked;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#define RETURN_ON_ERROR(code_to_run) \
|
||||
try { code_to_run } \
|
||||
catch( Error& e) { if (e.code() <= 0) return internal_error().code(); else return e.code(); } \
|
||||
catch( ... ) { return error_code_unknown_error; }
|
||||
|
||||
#define CATCH_AND_RETURN(code_to_run) \
|
||||
RETURN_ON_ERROR(code_to_run); \
|
||||
return error_code_success;
|
||||
|
||||
#define CATCH_AND_DIE(code_to_run) \
|
||||
try { code_to_run } \
|
||||
catch ( Error& e ) { fprintf( stderr, "Unexpected FDB error %d\n", e.code() ); abort(); } \
|
||||
catch ( ... ) { fprintf( stderr, "Unexpected FDB unknown error\n" ); abort(); }
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_network_set_option( FDBNetworkOption option,
|
||||
uint8_t const* value,
|
||||
int value_length )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
API->setNetworkOption( (FDBNetworkOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_setup_network_impl() {
|
||||
CATCH_AND_RETURN( API->setupNetwork(); );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_setup_network_v13( const char* localAddress ) {
|
||||
fdb_error_t errorCode = fdb_network_set_option( FDB_NET_OPTION_LOCAL_ADDRESS, (uint8_t const*)localAddress, strlen(localAddress) );
|
||||
if(errorCode != 0)
|
||||
return errorCode;
|
||||
|
||||
return fdb_setup_network_impl();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_run_network() {
|
||||
CATCH_AND_RETURN( API->runNetwork(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_stop_network() {
|
||||
CATCH_AND_RETURN( API->stopNetwork(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_cluster_configure_database( FDBCluster* c, int config_type,
|
||||
int config_mode, uint8_t const* db_name,
|
||||
int db_name_length )
|
||||
{
|
||||
// Obsolete, but needed for linker compatibility with api version 12 and below
|
||||
return (FDBFuture*)ThreadFuture<Void>(client_invalid_operation()).extractPtr();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_future_cancel( FDBFuture* f ) {
|
||||
CATCH_AND_DIE(
|
||||
TSAVB(f)->addref();
|
||||
TSAVB(f)->cancel();
|
||||
);
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_future_release_memory( FDBFuture* f ) {
|
||||
CATCH_AND_DIE( TSAVB(f)->releaseMemory(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_future_destroy( FDBFuture* f ) {
|
||||
CATCH_AND_DIE( TSAVB(f)->cancel(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_block_until_ready( FDBFuture* f ) {
|
||||
CATCH_AND_RETURN( TSAVB(f)->blockUntilReady(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_bool_t fdb_future_is_error_v22( FDBFuture* f ) {
|
||||
return TSAVB(f)->isError();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_bool_t fdb_future_is_ready( FDBFuture* f ) {
|
||||
return TSAVB(f)->isReady();
|
||||
}
|
||||
|
||||
class CAPICallback : public ThreadCallback {
|
||||
public:
|
||||
CAPICallback(void (*callbackf)(FDBFuture*, void*), FDBFuture* f,
|
||||
void* userdata)
|
||||
: callbackf(callbackf), f(f), userdata(userdata) {}
|
||||
|
||||
virtual bool canFire(int notMadeActive) { return true; }
|
||||
virtual void fire(const Void& unused, int& userParam) {
|
||||
(*callbackf)(f, userdata);
|
||||
delete this;
|
||||
}
|
||||
virtual void error(const Error&, int& userParam) {
|
||||
(*callbackf)(f, userdata);
|
||||
delete this;
|
||||
}
|
||||
|
||||
private:
|
||||
void (*callbackf)(FDBFuture*, void*);
|
||||
FDBFuture* f;
|
||||
void* userdata;
|
||||
};
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_set_callback( FDBFuture* f,
|
||||
void (*callbackf)(FDBFuture*, void*),
|
||||
void* userdata ) {
|
||||
CAPICallback* cb = new CAPICallback(callbackf, f, userdata);
|
||||
int ignore;
|
||||
CATCH_AND_RETURN( TSAVB(f)->callOrSetAsCallback( cb, ignore, 0 ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_error_impl( FDBFuture* f ) {
|
||||
return TSAVB(f)->getErrorCode();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_error_v22( FDBFuture* f, const char** description ) {
|
||||
if ( !( TSAVB(f)->isError() ) )
|
||||
return error_code_future_not_error;
|
||||
if (description)
|
||||
*description = TSAVB(f)->error.what();
|
||||
return TSAVB(f)->error.code();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_version( FDBFuture* f, int64_t* out_version ) {
|
||||
CATCH_AND_RETURN( *out_version = TSAV(Version, f)->get(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
|
||||
int* out_key_length ) {
|
||||
CATCH_AND_RETURN(
|
||||
KeyRef key = TSAV(Key, f)->get();
|
||||
*out_key = key.begin();
|
||||
*out_key_length = key.size(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster ) {
|
||||
CATCH_AND_RETURN(
|
||||
*out_cluster = (FDBCluster*)
|
||||
( (TSAV( Reference<ICluster>, f )->get() ).extractPtr() ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database ) {
|
||||
CATCH_AND_RETURN(
|
||||
*out_database = (FDBDatabase*)
|
||||
( (TSAV( Reference<IDatabase>, f )->get() ).extractPtr() ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_future_get_value( FDBFuture* f, fdb_bool_t* out_present,
|
||||
uint8_t const** out_value, int* out_value_length ) {
|
||||
CATCH_AND_RETURN(
|
||||
Optional<Value> v = TSAV(Optional<Value>, f)->get();
|
||||
*out_present = v.present();
|
||||
if (*out_present) {
|
||||
*out_value = v.get().begin();
|
||||
*out_value_length = v.get().size();
|
||||
} );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_future_get_keyvalue_array_impl(
|
||||
FDBFuture* f, FDBKeyValue const** out_kv,
|
||||
int* out_count, fdb_bool_t* out_more )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
Standalone<RangeResultRef> rrr = TSAV(Standalone<RangeResultRef>, f)->get();
|
||||
*out_kv = (FDBKeyValue*)rrr.begin();
|
||||
*out_count = rrr.size();
|
||||
*out_more = rrr.more; );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_future_get_keyvalue_array_v13(
|
||||
FDBFuture* f, FDBKeyValue const** out_kv, int* out_count)
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
Standalone<RangeResultRef> rrr = TSAV(Standalone<RangeResultRef>, f)->get();
|
||||
*out_kv = (FDBKeyValue*)rrr.begin();
|
||||
*out_count = rrr.size(); );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_future_get_string_array(
|
||||
FDBFuture* f, const char*** out_strings, int* out_count)
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
Standalone<VectorRef<const char*>> na = TSAV(Standalone<VectorRef<const char*>>, f)->get();
|
||||
*out_strings = (const char **) na.begin();
|
||||
*out_count = na.size();
|
||||
);
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_create_cluster( const char* cluster_file_path ) {
|
||||
return (FDBFuture*) API->createCluster( cluster_file_path ? cluster_file_path : ""/*, g_api_version*/ ).extractPtr();
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_cluster_set_option( FDBCluster* c,
|
||||
FDBClusterOption option,
|
||||
uint8_t const* value,
|
||||
int value_length )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
CLUSTER(c)->setOption( (FDBClusterOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_cluster_destroy( FDBCluster* c ) {
|
||||
CATCH_AND_DIE( CLUSTER(c)->delref(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
|
||||
int db_name_length ) {
|
||||
return (FDBFuture*)
|
||||
( CLUSTER(c)->createDatabase( StringRef( db_name,
|
||||
db_name_length ) ).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_database_set_option( FDBDatabase* d,
|
||||
FDBDatabaseOption option,
|
||||
uint8_t const* value,
|
||||
int value_length )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
DB(d)->setOption( (FDBDatabaseOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_database_destroy( FDBDatabase* d ) {
|
||||
CATCH_AND_DIE( DB(d)->delref(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_database_create_transaction( FDBDatabase* d,
|
||||
FDBTransaction** out_transaction )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
Reference<ITransaction> tr = DB(d)->createTransaction();
|
||||
if(g_api_version <= 15)
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
*out_transaction = (FDBTransaction*)tr.extractPtr(); );
|
||||
}
|
||||
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_destroy( FDBTransaction* tr ) {
|
||||
try {
|
||||
TXN(tr)->delref();
|
||||
} catch ( ... ) { }
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_cancel( FDBTransaction* tr ) {
|
||||
CATCH_AND_DIE( TXN(tr)->cancel(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_set_read_version( FDBTransaction* tr, int64_t version ) {
|
||||
CATCH_AND_DIE( TXN(tr)->setVersion( version ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_transaction_get_read_version( FDBTransaction* tr ) {
|
||||
return (FDBFuture*)( TXN(tr)->getReadVersion().extractPtr() );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_impl( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t snapshot ) {
|
||||
return (FDBFuture*)
|
||||
( TXN(tr)->get( KeyRef( key_name, key_name_length ), snapshot ).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_v13( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length )
|
||||
{
|
||||
return fdb_transaction_get_impl( tr, key_name, key_name_length, 0 );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_key_impl( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t or_equal,
|
||||
int offset, fdb_bool_t snapshot ) {
|
||||
return (FDBFuture*)( TXN(tr)->getKey( KeySelectorRef(
|
||||
KeyRef( key_name,
|
||||
key_name_length ),
|
||||
or_equal, offset ),
|
||||
snapshot ).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_key_v13( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t or_equal,
|
||||
int offset ) {
|
||||
return fdb_transaction_get_key_impl( tr, key_name, key_name_length,
|
||||
or_equal, offset, false );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_addresses_for_key( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length ){
|
||||
return (FDBFuture*)( TXN(tr)->getAddressesForKey( KeyRef(key_name, key_name_length) ).extractPtr() );
|
||||
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_range_impl(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset,
|
||||
uint8_t const* end_key_name, int end_key_name_length,
|
||||
fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes,
|
||||
FDBStreamingMode mode, int iteration, fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse )
|
||||
{
|
||||
/* This method may be called with a runtime API version of 13, in
|
||||
which negative row limits are a reverse range read */
|
||||
if (g_api_version <= 13 && limit < 0) {
|
||||
limit = -limit;
|
||||
reverse = true;
|
||||
}
|
||||
|
||||
/* Zero at the C API maps to "infinity" at lower levels */
|
||||
if (!limit)
|
||||
limit = CLIENT_KNOBS->ROW_LIMIT_UNLIMITED;
|
||||
if (!target_bytes)
|
||||
target_bytes = CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED;
|
||||
|
||||
/* Unlimited/unlimited with mode _EXACT isn't permitted */
|
||||
if (limit == CLIENT_KNOBS->ROW_LIMIT_UNLIMITED && target_bytes == CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED && mode == FDB_STREAMING_MODE_EXACT)
|
||||
return TSAV_ERROR(Standalone<RangeResultRef>, exact_mode_without_limits);
|
||||
|
||||
/* _ITERATOR mode maps to one of the known streaming modes
|
||||
depending on iteration */
|
||||
static const int mode_bytes_array[] = {CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED, 256, 1000, 4096, 80000};
|
||||
|
||||
/* The progression used for FDB_STREAMING_MODE_ITERATOR.
|
||||
Goes from small -> medium -> large. Then 1.5 * previous until serial. */
|
||||
static const int iteration_progression[] = { 256, 1000, 4096, 6144, 9216, 13824, 20736, 31104, 46656, 69984, 80000 };
|
||||
|
||||
/* length(iteration_progression) */
|
||||
static const int max_iteration = sizeof(iteration_progression) / sizeof(int);
|
||||
|
||||
if(mode == FDB_STREAMING_MODE_WANT_ALL)
|
||||
mode = FDB_STREAMING_MODE_SERIAL;
|
||||
|
||||
int mode_bytes;
|
||||
if (mode == FDB_STREAMING_MODE_ITERATOR) {
|
||||
if (iteration <= 0)
|
||||
return TSAV_ERROR(Standalone<RangeResultRef>, client_invalid_operation);
|
||||
|
||||
iteration = std::min(iteration, max_iteration);
|
||||
mode_bytes = iteration_progression[iteration - 1];
|
||||
}
|
||||
else if(mode >= 0 && mode <= FDB_STREAMING_MODE_SERIAL)
|
||||
mode_bytes = mode_bytes_array[mode];
|
||||
else
|
||||
return TSAV_ERROR(Standalone<RangeResultRef>, client_invalid_operation);
|
||||
|
||||
if(target_bytes == CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED)
|
||||
target_bytes = mode_bytes;
|
||||
else if(mode_bytes != CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED)
|
||||
target_bytes = std::min(target_bytes, mode_bytes);
|
||||
|
||||
return (FDBFuture*)( TXN(tr)->getRange(
|
||||
KeySelectorRef(
|
||||
KeyRef( begin_key_name,
|
||||
begin_key_name_length ),
|
||||
begin_or_equal, begin_offset ),
|
||||
KeySelectorRef(
|
||||
KeyRef( end_key_name,
|
||||
end_key_name_length ),
|
||||
end_or_equal, end_offset ),
|
||||
GetRangeLimits(limit, target_bytes),
|
||||
snapshot, reverse ).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_range_selector_v13(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal, int begin_offset, uint8_t const* end_key_name,
|
||||
int end_key_name_length, fdb_bool_t end_or_equal, int end_offset, int limit )
|
||||
{
|
||||
return fdb_transaction_get_range_impl(
|
||||
tr, begin_key_name, begin_key_name_length, begin_or_equal, begin_offset,
|
||||
end_key_name, end_key_name_length, end_or_equal, end_offset,
|
||||
limit, 0, FDB_STREAMING_MODE_EXACT, 0, false, false);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
FDBFuture* fdb_transaction_get_range_v13(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length,
|
||||
uint8_t const* end_key_name, int end_key_name_length, int limit )
|
||||
{
|
||||
return fdb_transaction_get_range_selector_v13(
|
||||
tr,
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(begin_key_name,
|
||||
begin_key_name_length),
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(end_key_name,
|
||||
end_key_name_length),
|
||||
limit );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_set( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* value,
|
||||
int value_length ) {
|
||||
CATCH_AND_DIE(
|
||||
TXN(tr)->set( KeyRef( key_name, key_name_length ),
|
||||
ValueRef( value, value_length ) ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_atomic_op( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* param,
|
||||
int param_length, FDBMutationType operation_type ) {
|
||||
CATCH_AND_DIE(
|
||||
TXN(tr)->atomicOp( KeyRef( key_name, key_name_length ),
|
||||
ValueRef( param, param_length ),
|
||||
(FDBMutationTypes::Option) operation_type ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_clear( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length ) {
|
||||
CATCH_AND_DIE(
|
||||
TXN(tr)->clear( KeyRef( key_name, key_name_length ) ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_clear_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length,
|
||||
uint8_t const* end_key_name, int end_key_name_length )
|
||||
{
|
||||
CATCH_AND_DIE(
|
||||
TXN(tr)->clear( KeyRef( begin_key_name,
|
||||
begin_key_name_length ),
|
||||
KeyRef( end_key_name,
|
||||
end_key_name_length ) ); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_transaction_watch( FDBTransaction *tr, uint8_t const* key_name,
|
||||
int key_name_length)
|
||||
{
|
||||
return (FDBFuture*)( TXN(tr)->watch(KeyRef(key_name, key_name_length)).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_transaction_commit( FDBTransaction* tr ) {
|
||||
return (FDBFuture*)( TXN(tr)->commit().extractPtr() );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_transaction_get_committed_version( FDBTransaction* tr,
|
||||
int64_t* out_version )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
*out_version = TXN(tr)->getCommittedVersion(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_transaction_get_versionstamp( FDBTransaction* tr )
|
||||
{
|
||||
return (FDBFuture*)(TXN(tr)->getVersionstamp().extractPtr());
|
||||
}
|
||||
|
||||
extern "C"
|
||||
fdb_error_t fdb_transaction_set_option_impl( FDBTransaction* tr,
|
||||
FDBTransactionOption option,
|
||||
uint8_t const* value,
|
||||
int value_length )
|
||||
{
|
||||
CATCH_AND_RETURN(
|
||||
TXN(tr)->setOption( (FDBTransactionOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
|
||||
}
|
||||
|
||||
extern "C"
|
||||
void fdb_transaction_set_option_v13( FDBTransaction* tr,
|
||||
FDBTransactionOption option )
|
||||
{
|
||||
fdb_transaction_set_option_impl( tr, option, NULL, 0 );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_transaction_on_error( FDBTransaction* tr, fdb_error_t error ) {
|
||||
return (FDBFuture*)( TXN(tr)->onError(
|
||||
Error::fromUnvalidatedCode( error ) ).extractPtr() );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
void fdb_transaction_reset( FDBTransaction* tr ) {
|
||||
CATCH_AND_DIE( TXN(tr)->reset(); );
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_transaction_add_conflict_range( FDBTransaction*tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name,
|
||||
int end_key_name_length, FDBConflictRangeType type) {
|
||||
CATCH_AND_RETURN(
|
||||
KeyRangeRef range(KeyRef(begin_key_name, begin_key_name_length), KeyRef(end_key_name, end_key_name_length));
|
||||
if(type == FDBConflictRangeType::FDB_CONFLICT_RANGE_TYPE_READ)
|
||||
TXN(tr)->addReadConflictRange(range);
|
||||
else if(type == FDBConflictRangeType::FDB_CONFLICT_RANGE_TYPE_WRITE)
|
||||
TXN(tr)->addWriteConflictRange(range);
|
||||
else
|
||||
return error_code_client_invalid_operation;
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
#include "fdb_c_function_pointers.g.h"
|
||||
|
||||
#define FDB_API_CHANGED(func, ver) if (header_version < ver) fdb_api_ptr_##func = (void*)&(func##_v##ver##_PREV); else if (fdb_api_ptr_##func == (void*)&fdb_api_ptr_unimpl) fdb_api_ptr_##func = (void*)&(func##_impl);
|
||||
|
||||
#define FDB_API_REMOVED(func, ver) if (header_version < ver) fdb_api_ptr_##func = (void*)&(func##_v##ver##_PREV); else fdb_api_ptr_##func = (void*)&fdb_api_ptr_removed;
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_select_api_version_impl( int runtime_version, int header_version ) {
|
||||
/* Can only call this once */
|
||||
if (g_api_version != 0)
|
||||
return error_code_api_version_already_set;
|
||||
|
||||
/* Caller screwed up, this makes no sense */
|
||||
if (runtime_version > header_version)
|
||||
return error_code_api_version_invalid;
|
||||
|
||||
/* Caller requested a version we don't speak */
|
||||
if (header_version > FDB_API_VERSION)
|
||||
return error_code_api_version_not_supported;
|
||||
|
||||
/* No backwards compatibility for earlier versions */
|
||||
if (runtime_version < 13)
|
||||
return error_code_api_version_not_supported;
|
||||
|
||||
RETURN_ON_ERROR(
|
||||
API->selectApiVersion(runtime_version);
|
||||
);
|
||||
|
||||
g_api_version = runtime_version;
|
||||
|
||||
platformInit();
|
||||
Error::init();
|
||||
|
||||
// Versioned API changes -- descending order by version (new changes at top)
|
||||
// FDB_API_CHANGED( function, ver ) means there is a new implementation as of ver, and a function function_(ver-1) is the old implementation
|
||||
// FDB_API_REMOVED( function, ver ) means the function was removed as of ver, and function_(ver-1) is the old implementation
|
||||
FDB_API_CHANGED( fdb_future_get_error, 23 );
|
||||
FDB_API_REMOVED( fdb_future_is_error, 23 );
|
||||
FDB_API_CHANGED( fdb_future_get_keyvalue_array, 14 );
|
||||
FDB_API_CHANGED( fdb_transaction_get_key, 14 );
|
||||
FDB_API_CHANGED( fdb_transaction_get_range, 14 );
|
||||
FDB_API_REMOVED( fdb_transaction_get_range_selector, 14 );
|
||||
FDB_API_CHANGED( fdb_transaction_get, 14 );
|
||||
FDB_API_CHANGED( fdb_setup_network, 14 );
|
||||
FDB_API_CHANGED( fdb_transaction_set_option, 14 );
|
||||
/* End versioned API changes */
|
||||
|
||||
return error_code_success;
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
int fdb_get_max_api_version() {
|
||||
return FDB_API_VERSION;
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
const char* fdb_get_client_version() {
|
||||
return API->getClientVersion();
|
||||
}
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#include <dlfcn.h>
|
||||
__attribute__((constructor))
|
||||
static void initialize() {
|
||||
//OS X ld doesn't support -z nodelete, so we dlopen to increment the reference count of this module
|
||||
Dl_info info;
|
||||
int ret = dladdr((void*)&fdb_select_api_version_impl, &info);
|
||||
if(!ret || !info.dli_fname)
|
||||
return; //If we get here somehow, we face the risk of seg faults if somebody unloads our library
|
||||
|
||||
dlopen(info.dli_fname, RTLD_NOLOAD | RTLD_NODELETE);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
global:
|
||||
fdb_*;
|
||||
local:
|
||||
*;
|
||||
};
|
|
@ -0,0 +1,124 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="fdb_c_function_pointers.g.h" />
|
||||
<ClInclude Include="foundationdb\fdb_c.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="fdb_c.cpp" />
|
||||
<ClCompile Include="ThreadCleanup.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<MASM Include="fdb_c.g.asm" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="generate_asm.py" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{CACB2C8E-3E55-4309-A411-2A9C56C6C1CB}</ProjectGuid>
|
||||
<RootNamespace>c</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.props" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<ItemDefinitionGroup>
|
||||
<PreBuildEvent>
|
||||
</PreBuildEvent>
|
||||
<PostBuildEvent>
|
||||
<Command>
|
||||
FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)-%%i"</Command>
|
||||
</PostBuildEvent>
|
||||
</ItemDefinitionGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<PropertyGroup>
|
||||
<CustomBuildBeforeTargets>_MASM;ClCompile</CustomBuildBeforeTargets>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<CustomBuildStep>
|
||||
<Command>c:\Python27\python.exe "$(ProjectDir)/generate_asm.py" windows "$(ProjectDir)/fdb_c.cpp" "$(ProjectDir)/fdb_c.g.asm" "$(ProjectDir)/fdb_c_function_pointers.g.h"</Command>
|
||||
<Message>Generating API trampolines</Message>
|
||||
<Outputs>$(ProjectDir)/fdb_c_function_pointers.g.h;$(ProjectDir)/fdb_c.g.asm</Outputs>
|
||||
<Inputs>$(ProjectDir)/fdb_c.cpp;$(ProjectDir)/generate_asm.py</Inputs>
|
||||
</CustomBuildStep>
|
||||
</ItemDefinitionGroup>
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.targets" />
|
||||
</ImportGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
</Project>
|
|
@ -0,0 +1,314 @@
|
|||
/*
|
||||
* fdb_c.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_C_H
|
||||
#define FDB_C_H
|
||||
#pragma once
|
||||
|
||||
|
||||
#ifndef DLLEXPORT
|
||||
#define DLLEXPORT
|
||||
#endif
|
||||
|
||||
#if !defined(FDB_API_VERSION)
|
||||
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 500)
|
||||
#elif FDB_API_VERSION < 13
|
||||
#error API version no longer supported (upgrade to 13)
|
||||
#elif FDB_API_VERSION > 500
|
||||
#error Requested API version requires a newer version of this header
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION >= 23 && !defined(WARN_UNUSED_RESULT)
|
||||
#ifdef __GNUG__
|
||||
#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
#else
|
||||
#define WARN_UNUSED_RESULT
|
||||
#endif
|
||||
#else
|
||||
#define WARN_UNUSED_RESULT
|
||||
#endif
|
||||
|
||||
// With default settings, gcc will not warn about unprototyped functions being called, so it
|
||||
// is easy to erroneously call a function which is not available at FDB_API_VERSION and then
|
||||
// get an error only at runtime. These macros ensure a compile error in such cases, and
|
||||
// attempt to make the compile error slightly informative.
|
||||
#define This_FoundationDB_API_function_is_removed_at_this_FDB_API_VERSION() [=====]
|
||||
#define FDB_REMOVED_FUNCTION This_FoundationDB_API_function_is_removed_at_this_FDB_API_VERSION(0)
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "fdb_c_options.g.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Pointers to these opaque types represent objects in the FDB API */
|
||||
typedef struct future FDBFuture;
|
||||
typedef struct cluster FDBCluster;
|
||||
typedef struct database FDBDatabase;
|
||||
typedef struct transaction FDBTransaction;
|
||||
|
||||
typedef int fdb_error_t;
|
||||
typedef int fdb_bool_t;
|
||||
|
||||
DLLEXPORT const char*
|
||||
fdb_get_error( fdb_error_t code );
|
||||
|
||||
DLLEXPORT fdb_bool_t
|
||||
fdb_error_predicate( int predicate_test, fdb_error_t code );
|
||||
|
||||
#define /* fdb_error_t */ fdb_select_api_version(v) fdb_select_api_version_impl(v, FDB_API_VERSION)
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_network_set_option( FDBNetworkOption option, uint8_t const* value,
|
||||
int value_length );
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
|
||||
|
||||
#pragma pack(push, 4)
|
||||
typedef struct keyvalue {
|
||||
const void* key;
|
||||
int key_length;
|
||||
const void* value;
|
||||
int value_length;
|
||||
} FDBKeyValue;
|
||||
#pragma pack(pop)
|
||||
|
||||
DLLEXPORT void fdb_future_cancel( FDBFuture *f );
|
||||
|
||||
DLLEXPORT void fdb_future_release_memory( FDBFuture* f );
|
||||
|
||||
DLLEXPORT void fdb_future_destroy( FDBFuture* f );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_block_until_ready( FDBFuture* f );
|
||||
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_ready( FDBFuture* f );
|
||||
|
||||
typedef void (*FDBCallback)(FDBFuture* future, void* callback_parameter);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_set_callback( FDBFuture* f, FDBCallback callback,
|
||||
void* callback_parameter );
|
||||
|
||||
#if FDB_API_VERSION >= 23
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_error( FDBFuture* f );
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_version( FDBFuture* f, int64_t* out_version );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
|
||||
int* out_key_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_value( FDBFuture* f, fdb_bool_t *out_present,
|
||||
uint8_t const** out_value,
|
||||
int* out_value_length );
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_keyvalue_array( FDBFuture* f, FDBKeyValue const** out_kv,
|
||||
int* out_count, fdb_bool_t* out_more );
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_string_array(FDBFuture* f,
|
||||
const char*** out_strings, int* out_count);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_create_cluster( const char* cluster_file_path );
|
||||
|
||||
DLLEXPORT void fdb_cluster_destroy( FDBCluster* c );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_cluster_set_option( FDBCluster* c, FDBClusterOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
|
||||
int db_name_length );
|
||||
|
||||
DLLEXPORT void fdb_database_destroy( FDBDatabase* d );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_database_set_option( FDBDatabase* d, FDBDatabaseOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_database_create_transaction( FDBDatabase* d,
|
||||
FDBTransaction** out_transaction );
|
||||
|
||||
DLLEXPORT void fdb_transaction_destroy( FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT void fdb_transaction_cancel( FDBTransaction* tr);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_set_option( FDBTransaction* tr, FDBTransactionOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
#endif
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_set_read_version( FDBTransaction* tr, int64_t version );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_read_version( FDBTransaction* tr );
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t snapshot );
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_key( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t or_equal,
|
||||
int offset, fdb_bool_t snapshot );
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_addresses_for_key(FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset,
|
||||
uint8_t const* end_key_name, int end_key_name_length,
|
||||
fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes,
|
||||
FDBStreamingMode mode, int iteration, fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse );
|
||||
#endif
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_set( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* value,
|
||||
int value_length );
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_atomic_op( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* param,
|
||||
int param_length, FDBMutationType operation_type );
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_clear( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length );
|
||||
|
||||
DLLEXPORT void fdb_transaction_clear_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name,
|
||||
int end_key_name_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_watch( FDBTransaction *tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_commit( FDBTransaction* tr );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_get_committed_version( FDBTransaction* tr,
|
||||
int64_t* out_version );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_versionstamp( FDBTransaction* tr );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_on_error( FDBTransaction* tr, fdb_error_t error );
|
||||
|
||||
DLLEXPORT void fdb_transaction_reset( FDBTransaction* tr );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_add_conflict_range(FDBTransaction *tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
FDBConflictRangeType type);
|
||||
|
||||
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
|
||||
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
|
||||
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1
|
||||
#define FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(k, l) k, l, 0, 1
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_select_api_version_impl( int runtime_version, int header_version );
|
||||
|
||||
DLLEXPORT int fdb_get_max_api_version();
|
||||
DLLEXPORT const char* fdb_get_client_version();
|
||||
|
||||
/* LEGACY API VERSIONS */
|
||||
|
||||
#if FDB_API_VERSION < 23
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_error( FDBFuture* f,
|
||||
const char** out_description /* = NULL */ );
|
||||
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_error( FDBFuture* f );
|
||||
#else
|
||||
#define fdb_future_is_error(x) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION < 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyvalue_array(
|
||||
FDBFuture* f, FDBKeyValue const** out_kv, int* out_count );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get(
|
||||
FDBTransaction* tr, uint8_t const* key_name, int key_name_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_key(
|
||||
FDBTransaction* tr, uint8_t const* key_name, int key_name_length,
|
||||
fdb_bool_t or_equal, int offset );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network( const char* local_address );
|
||||
|
||||
DLLEXPORT void fdb_transaction_set_option(
|
||||
FDBTransaction* tr, FDBTransactionOption option );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name,
|
||||
int end_key_name_length, int limit );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_selector(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, fdb_bool_t begin_or_equal,
|
||||
int begin_offset, uint8_t const* end_key_name,
|
||||
int end_key_name_length, fdb_bool_t end_or_equal, int end_offset,
|
||||
int limit );
|
||||
#else
|
||||
#define fdb_transaction_get_range_selector(tr,bkn,bknl,boe,bo,ekn,eknl,eoe,eo,lim) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
|
@ -0,0 +1,93 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# generate_asm.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
(platform, source, asm, h) = sys.argv[1:]
|
||||
|
||||
functions = {}
|
||||
|
||||
func_re = re.compile("^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
|
||||
|
||||
with open(source, 'r') as srcfile:
|
||||
for l in srcfile:
|
||||
m = func_re.match(l)
|
||||
if m:
|
||||
func, ver = m.groups()
|
||||
if not func in functions:
|
||||
functions[func] = []
|
||||
ver = int(ver)
|
||||
if not ver in functions[func]:
|
||||
functions[func].append(ver)
|
||||
|
||||
def write_windows_asm(asmfile, functions):
|
||||
asmfile.write(".data\n")
|
||||
for f in functions:
|
||||
asmfile.write("\textern fdb_api_ptr_%s:qword\n" % f)
|
||||
|
||||
asmfile.write("\n.code\n")
|
||||
|
||||
for f in functions:
|
||||
asmfile.write("\n%s proc EXPORT\n" % f)
|
||||
asmfile.write("\tmov r11, qword ptr [fdb_api_ptr_%s]\n" % f)
|
||||
asmfile.write("\tjmp r11\n")
|
||||
asmfile.write("%s endp\n" % f)
|
||||
|
||||
asmfile.write("\nEND\n")
|
||||
|
||||
def write_unix_asm(asmfile, functions, prefix):
|
||||
asmfile.write(".intel_syntax noprefix\n")
|
||||
|
||||
if platform == "linux":
|
||||
asmfile.write("\n.data\n")
|
||||
for f in functions:
|
||||
asmfile.write("\t.extern fdb_api_ptr_%s\n" % f)
|
||||
|
||||
asmfile.write("\n.text\n")
|
||||
for f in functions:
|
||||
asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f))
|
||||
|
||||
for f in functions:
|
||||
asmfile.write("\n.globl %s%s\n" % (prefix, f))
|
||||
asmfile.write("%s%s:\n" % (prefix, f))
|
||||
asmfile.write("\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
|
||||
asmfile.write("\tmov r11, qword ptr [r11]\n")
|
||||
asmfile.write("\tjmp r11\n")
|
||||
|
||||
with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
|
||||
hfile.write("void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write("void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
|
||||
if platform == "linux":
|
||||
write_unix_asm(asmfile, functions, '')
|
||||
elif platform == "osx":
|
||||
write_unix_asm(asmfile, functions, '_')
|
||||
elif platform == "windows":
|
||||
write_windows_asm(asmfile, functions)
|
||||
|
||||
for f in functions:
|
||||
if platform == "windows":
|
||||
hfile.write("extern \"C\" ")
|
||||
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
|
||||
for v in functions[f]:
|
||||
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v-1))
|
|
@ -0,0 +1,76 @@
|
|||
#
|
||||
# local.mk
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# -*- mode: makefile; -*-
|
||||
|
||||
fdb_c_CFLAGS := $(fdbclient_CFLAGS)
|
||||
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
|
||||
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_c_LIBS += lib/libstdc++.a -lm -lpthread -lrt -ldl
|
||||
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete
|
||||
endif
|
||||
|
||||
ifeq ($(PLATFORM),osx)
|
||||
fdb_c_LDFLAGS += -lc++ -Xlinker -exported_symbols_list -Xlinker bindings/c/fdb_c.symbols
|
||||
|
||||
lib/libfdb_c.dylib: bindings/c/fdb_c.symbols
|
||||
|
||||
bindings/c/fdb_c.symbols: bindings/c/foundationdb/fdb_c.h $(ALL_MAKEFILES)
|
||||
@awk '{sub(/^[ \t]+/, "");} /^#/ {next;} /DLLEXPORT\ .*[^ ]\(/ {sub(/\(.*/, ""); print "_" $$NF; next;} /DLLEXPORT/ { DLLEXPORT=1; next;} DLLEXPORT==1 {sub(/\(.*/, ""); print "_" $$0; DLLEXPORT=0}' $< | sort | uniq > $@
|
||||
|
||||
fdb_c_clean: fdb_c_symbols_clean
|
||||
|
||||
fdb_c_symbols_clean:
|
||||
@rm -f bindings/c/fdb_c.symbols
|
||||
|
||||
fdb_javac_release: lib/libfdb_c.$(DLEXT)
|
||||
mkdir -p lib
|
||||
rm -f lib/libfdb_c.$(java_DLEXT)-*
|
||||
cp lib/libfdb_c.$(DLEXT) lib/libfdb_c.$(DLEXT)-$(VERSION_ID)
|
||||
cp lib/libfdb_c.$(DLEXT)-debug lib/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
|
||||
|
||||
fdb_javac_release_clean:
|
||||
rm -f lib/libfdb_c.$(DLEXT)-*
|
||||
rm -f lib/libfdb_c.$(javac_DLEXT)-*
|
||||
|
||||
# OS X needs to put its java lib in packages
|
||||
packages: fdb_javac_lib_package
|
||||
|
||||
fdb_javac_lib_package: lib/libfdb_c.dylib
|
||||
mkdir -p packages
|
||||
cp lib/libfdb_c.$(DLEXT) packages/libfdb_c.$(DLEXT)-$(VERSION_ID)
|
||||
cp lib/libfdb_c.$(DLEXT)-debug packages/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
|
||||
endif
|
||||
|
||||
fdb_c_GENERATED_SOURCES += bindings/c/foundationdb/fdb_c_options.g.h bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
|
||||
|
||||
bindings/c/%.g.S bindings/c/%_function_pointers.g.h: bindings/c/%.cpp bindings/c/generate_asm.py $(ALL_MAKEFILES)
|
||||
@echo "Scanning $<"
|
||||
@bindings/c/generate_asm.py $(PLATFORM) bindings/c/fdb_c.cpp bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
|
||||
|
||||
.PRECIOUS: bindings/c/fdb_c_function_pointers.g.h
|
||||
|
||||
fdb_c_BUILD_SOURCES += bindings/c/fdb_c.g.S
|
||||
|
||||
bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexillographer/fdb.options $(ALL_MAKEFILES)
|
||||
@echo "Building $@"
|
||||
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options c $@
|
|
@ -0,0 +1,507 @@
|
|||
/*
|
||||
* DirectoryLayer.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "DirectoryLayer.h"
|
||||
#include "DirectoryPartition.h"
|
||||
|
||||
namespace FDB {
|
||||
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = {1,0,0,0,0,0,0,0};
|
||||
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
|
||||
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
|
||||
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
|
||||
const uint64_t DirectoryLayer::SUB_DIR_KEY = 0;
|
||||
|
||||
const uint32_t DirectoryLayer::VERSION[3] = {1, 0, 0};
|
||||
|
||||
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
|
||||
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
|
||||
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
|
||||
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
|
||||
|
||||
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes) :
|
||||
nodeSubspace(nodeSubspace), contentSubspace(contentSubspace), allowManualPrefixes(allowManualPrefixes),
|
||||
rootNode(nodeSubspace.get(nodeSubspace.key())), allocator(rootNode.get(HIGH_CONTENTION_KEY))
|
||||
{ }
|
||||
|
||||
Subspace DirectoryLayer::nodeWithPrefix(StringRef const& prefix) const {
|
||||
return nodeSubspace.get(prefix);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
Optional<Subspace> DirectoryLayer::nodeWithPrefix(Optional<T> const& prefix) const {
|
||||
if(!prefix.present()) {
|
||||
return Optional<Subspace>();
|
||||
}
|
||||
|
||||
return nodeWithPrefix(prefix.get());
|
||||
}
|
||||
|
||||
ACTOR Future<DirectoryLayer::Node> find(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
state int pathIndex = 0;
|
||||
state DirectoryLayer::Node node = DirectoryLayer::Node(dirLayer, dirLayer->rootNode, IDirectory::Path(), path);
|
||||
|
||||
for(; pathIndex != path.size(); ++pathIndex) {
|
||||
ASSERT(node.subspace.present());
|
||||
Optional<FDBStandalone<ValueRef>> val = wait(tr->get(node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path[pathIndex], true).key()));
|
||||
|
||||
node.path.push_back(path[pathIndex]);
|
||||
node = DirectoryLayer::Node(dirLayer, dirLayer->nodeWithPrefix(val), node.path, path);
|
||||
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
|
||||
if(!node.exists() || node.layer == DirectoryLayer::PARTITION_LAYER) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
if(!node.loadedMetadata) {
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
IDirectory::Path DirectoryLayer::toAbsolutePath(IDirectory::Path const& subpath) const {
|
||||
Path path;
|
||||
|
||||
path.reserve(this->path.size() + subpath.size());
|
||||
path.insert(path.end(), this->path.begin(), this->path.end());
|
||||
path.insert(path.end(), subpath.begin(), subpath.end());
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::contentsOfNode(Subspace const& node, Path const& path, Standalone<StringRef> const& layer) {
|
||||
Standalone<StringRef> prefix = nodeSubspace.unpack(node.key()).getString(0);
|
||||
|
||||
if(layer == PARTITION_LAYER) {
|
||||
return Reference<DirectorySubspace>(new DirectoryPartition(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this)));
|
||||
}
|
||||
else {
|
||||
return Reference<DirectorySubspace>(new DirectorySubspace(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this), layer));
|
||||
}
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::openInternal(Standalone<StringRef> const& layer, Node const& existingNode, bool allowOpen) {
|
||||
if (!allowOpen) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
if(layer.size() > 0 && layer != existingNode.layer) {
|
||||
throw mismatched_layer();
|
||||
}
|
||||
|
||||
return existingNode.getContents();
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), false, true);
|
||||
}
|
||||
|
||||
void DirectoryLayer::initializeDirectory(Reference<Transaction> const& tr) const {
|
||||
tr->set(rootNode.pack(VERSION_KEY), StringRef((uint8_t*)VERSION, 12));
|
||||
}
|
||||
|
||||
ACTOR Future<Void> checkVersionInternal(const DirectoryLayer* dirLayer, Reference<Transaction> tr, bool writeAccess) {
|
||||
Optional<FDBStandalone<ValueRef>> versionBytes = wait(tr->get(dirLayer->rootNode.pack(DirectoryLayer::VERSION_KEY)));
|
||||
|
||||
if(!versionBytes.present()) {
|
||||
if(writeAccess) {
|
||||
dirLayer->initializeDirectory(tr);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
else {
|
||||
if(versionBytes.get().size() != 12) {
|
||||
throw invalid_database_value();
|
||||
}
|
||||
if(((uint32_t*)versionBytes.get().begin())[0] > DirectoryLayer::VERSION[0]) {
|
||||
throw incompatible_directory_version();
|
||||
}
|
||||
else if(((uint32_t*)versionBytes.get().begin())[1] > DirectoryLayer::VERSION[1] && writeAccess) {
|
||||
throw incompatible_directory_version();
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::checkVersion(Reference<Transaction> const& tr, bool writeAccess) const {
|
||||
return checkVersionInternal(this, tr, writeAccess);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<StringRef>> getPrefix(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Optional<Standalone<StringRef>> prefix) {
|
||||
if(!prefix.present()) {
|
||||
Standalone<StringRef> allocated = wait(dirLayer->allocator.allocate(tr));
|
||||
state Standalone<StringRef> finalPrefix = allocated.withPrefix(dirLayer->contentSubspace.key());
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(KeyRangeRef(finalPrefix, strinc(finalPrefix)), 1));
|
||||
|
||||
if(result.size() > 0) {
|
||||
throw directory_prefix_not_empty();
|
||||
}
|
||||
|
||||
return finalPrefix;
|
||||
}
|
||||
|
||||
return prefix.get();
|
||||
}
|
||||
|
||||
ACTOR Future<Optional<Subspace>> nodeContainingKey(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Standalone<StringRef> key, bool snapshot) {
|
||||
if(key.startsWith(dirLayer->nodeSubspace.key())) {
|
||||
return dirLayer->rootNode;
|
||||
}
|
||||
|
||||
KeyRange range = KeyRangeRef(dirLayer->nodeSubspace.range().begin, keyAfter(dirLayer->nodeSubspace.pack(key)));
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(range, 1, snapshot, true));
|
||||
|
||||
if(result.size() > 0) {
|
||||
Standalone<StringRef> prevPrefix = dirLayer->nodeSubspace.unpack(result[0].key).getString(0);
|
||||
if(key.startsWith(prevPrefix)) {
|
||||
return dirLayer->nodeWithPrefix(prevPrefix);
|
||||
}
|
||||
}
|
||||
|
||||
return Optional<Subspace>();
|
||||
}
|
||||
|
||||
ACTOR Future<bool> isPrefixFree(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Standalone<StringRef> prefix, bool snapshot){
|
||||
if(!prefix.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Optional<Subspace> node = wait(nodeContainingKey(dirLayer, tr, prefix, snapshot));
|
||||
if(node.present()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(KeyRangeRef(dirLayer->nodeSubspace.pack(prefix), dirLayer->nodeSubspace.pack(strinc(prefix))), 1, snapshot));
|
||||
return !result.size();
|
||||
|
||||
}
|
||||
|
||||
ACTOR Future<Subspace> getParentNode(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
if(path.size() > 1) {
|
||||
Reference<DirectorySubspace> parent = wait(dirLayer->createOrOpenInternal(tr, IDirectory::Path(path.begin(), path.end() - 1), StringRef(), Optional<Standalone<StringRef>>(), true, true));
|
||||
return dirLayer->nodeWithPrefix(parent->key());
|
||||
}
|
||||
else {
|
||||
return dirLayer->rootNode;
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> createInternal(
|
||||
Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path,
|
||||
Standalone<StringRef> layer, Optional<Standalone<StringRef>> prefix, bool allowCreate)
|
||||
{
|
||||
if(!allowCreate) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
Void _ = wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
state Standalone<StringRef> newPrefix = wait(getPrefix(dirLayer, tr, prefix));
|
||||
bool isFree = wait(isPrefixFree(dirLayer, tr, newPrefix, !prefix.present()));
|
||||
|
||||
if(!isFree) {
|
||||
throw directory_prefix_in_use();
|
||||
}
|
||||
|
||||
Subspace parentNode = wait(getParentNode(dirLayer, tr, path));
|
||||
Subspace node = dirLayer->nodeWithPrefix(newPrefix);
|
||||
|
||||
tr->set(parentNode.get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key(), newPrefix);
|
||||
tr->set(node.get(DirectoryLayer::LAYER_KEY).key(), layer);
|
||||
return dirLayer->contentsOfNode(node, path, layer);
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> _createOrOpenInternal(
|
||||
Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path,
|
||||
Standalone<StringRef> layer, Optional<Standalone<StringRef>> prefix, bool allowCreate, bool allowOpen)
|
||||
{
|
||||
ASSERT(!prefix.present() || allowCreate);
|
||||
Void _ = wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
if(prefix.present() && !dirLayer->allowManualPrefixes) {
|
||||
if(!dirLayer->getPath().size()) {
|
||||
throw manual_prefixes_not_enabled();
|
||||
}
|
||||
else {
|
||||
throw prefix_in_partition();
|
||||
}
|
||||
}
|
||||
|
||||
if(!path.size()){
|
||||
throw cannot_open_root_directory();
|
||||
}
|
||||
|
||||
state DirectoryLayer::Node existingNode = wait(find(dirLayer, tr, path));
|
||||
if(existingNode.exists()) {
|
||||
if(existingNode.isInPartition()) {
|
||||
IDirectory::Path subpath = existingNode.getPartitionSubpath();
|
||||
Reference<DirectorySubspace> dirSpace = wait(existingNode.getContents()->getDirectoryLayer()->createOrOpenInternal(tr, subpath, layer, prefix, allowCreate, allowOpen));
|
||||
return dirSpace;
|
||||
}
|
||||
return dirLayer->openInternal(layer, existingNode, allowOpen);
|
||||
}
|
||||
else {
|
||||
Reference<DirectorySubspace> dirSpace = wait(createInternal(dirLayer, tr, path, layer, prefix, allowCreate));
|
||||
return dirSpace;
|
||||
}
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpenInternal(
|
||||
Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix, bool allowCreate, bool allowOpen)
|
||||
{
|
||||
return _createOrOpenInternal(Reference<DirectoryLayer>::addRef(this), tr, path, layer, prefix, allowCreate, allowOpen);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::create(
|
||||
Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix)
|
||||
{
|
||||
return createOrOpenInternal(tr, path, layer, prefix, true, false);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), true, true);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<VectorRef<StringRef>>> listInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
Void _ = wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
if(node.isInPartition(true)) {
|
||||
Standalone<VectorRef<StringRef>> partitionList = wait(node.getContents()->getDirectoryLayer()->list(tr, node.getPartitionSubpath()));
|
||||
return partitionList;
|
||||
}
|
||||
|
||||
state Subspace subdir = node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state Standalone<VectorRef<StringRef>> subdirectories;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> subdirRange = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
|
||||
for(int i = 0; i < subdirRange.size(); ++i) {
|
||||
subdirectories.push_back_deep(subdirectories.arena(), subdir.unpack(subdirRange[i].key).getString(0));
|
||||
}
|
||||
|
||||
if(!subdirRange.more) {
|
||||
return subdirectories;
|
||||
}
|
||||
|
||||
begin = keyAfter(subdirRange.back().key);
|
||||
}
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectoryLayer::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return listInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
}
|
||||
|
||||
bool pathsEqual(IDirectory::Path const& path1, IDirectory::Path const& path2, size_t maxElementsToCheck = std::numeric_limits<size_t>::max()) {
|
||||
if(std::min(path1.size(), maxElementsToCheck) != std::min(path2.size(), maxElementsToCheck)) {
|
||||
return false;
|
||||
}
|
||||
for(int i = 0; i < path1.size() && i < maxElementsToCheck; ++i) {
|
||||
if(path1[i] != path2[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> removeFromParent(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
ASSERT(path.size() >= 1);
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(path.begin(), path.end() - 1)));
|
||||
if(parentNode.subspace.present()) {
|
||||
tr->clear(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key());
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> moveInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path oldPath, IDirectory::Path newPath) {
|
||||
Void _ = wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
if(oldPath.size() <= newPath.size()) {
|
||||
if(pathsEqual(oldPath, newPath, oldPath.size())) {
|
||||
throw invalid_destination_directory();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Future<DirectoryLayer::Node>> futures;
|
||||
futures.push_back(find(dirLayer, tr, oldPath));
|
||||
futures.push_back(find(dirLayer, tr, newPath));
|
||||
|
||||
std::vector<DirectoryLayer::Node> nodes = wait(getAll(futures));
|
||||
|
||||
state DirectoryLayer::Node oldNode = nodes[0];
|
||||
state DirectoryLayer::Node newNode = nodes[1];
|
||||
|
||||
if(!oldNode.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
if(oldNode.isInPartition() || newNode.isInPartition()) {
|
||||
if(!oldNode.isInPartition() || !newNode.isInPartition() || !pathsEqual(oldNode.path, newNode.path)) {
|
||||
throw cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> partitionMove = wait(newNode.getContents()->move(tr, oldNode.getPartitionSubpath(), newNode.getPartitionSubpath()));
|
||||
return partitionMove;
|
||||
}
|
||||
|
||||
if(newNode.exists() || newPath.empty()) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(newPath.begin(), newPath.end() - 1)));
|
||||
if(!parentNode.exists()) {
|
||||
throw parent_directory_does_not_exist();
|
||||
}
|
||||
|
||||
tr->set(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(newPath.back(), true).key(), dirLayer->nodeSubspace.unpack(oldNode.subspace.get().key()).getString(0));
|
||||
Void _ = wait(removeFromParent(dirLayer, tr, oldPath));
|
||||
|
||||
return dirLayer->contentsOfNode(oldNode.subspace.get(), newPath, oldNode.layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) {
|
||||
return moveInternal(Reference<DirectoryLayer>::addRef(this), tr, oldPath, newPath);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) {
|
||||
throw cannot_modify_root_directory();
|
||||
}
|
||||
|
||||
Future<Void> removeRecursive(Reference<DirectoryLayer> const&, Reference<Transaction> const&, Subspace const&);
|
||||
ACTOR Future<Void> removeRecursive(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Subspace nodeSub) {
|
||||
state Subspace subdir = nodeSub.get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state std::vector<Future<Void>> futures;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
for (int i = 0; i < range.size(); ++i) {
|
||||
Subspace subNode = dirLayer->nodeWithPrefix(range[i].value);
|
||||
futures.push_back(removeRecursive(dirLayer, tr, subNode));
|
||||
}
|
||||
|
||||
if(!range.more) {
|
||||
break;
|
||||
}
|
||||
|
||||
begin = keyAfter(range.back().key);
|
||||
}
|
||||
|
||||
// waits are done concurrently
|
||||
Void _ = wait(waitForAll(futures));
|
||||
|
||||
Standalone<StringRef> nodePrefix = dirLayer->nodeSubspace.unpack(nodeSub.key()).getString(0);
|
||||
|
||||
tr->clear(KeyRangeRef(nodePrefix, strinc(nodePrefix)));
|
||||
tr->clear(nodeSub.range());
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> removeInternal(Reference<DirectoryLayer> const&, Reference<Transaction> const&, IDirectory::Path const&, bool const&);
|
||||
ACTOR Future<bool> removeInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path, bool failOnNonexistent) {
|
||||
Void _ = wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
if(path.empty()) {
|
||||
throw cannot_modify_root_directory();
|
||||
}
|
||||
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
if(failOnNonexistent) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if(node.isInPartition()) {
|
||||
bool recurse = wait(removeInternal(node.getContents()->getDirectoryLayer(), tr, node.getPartitionSubpath(), failOnNonexistent));
|
||||
return recurse;
|
||||
}
|
||||
|
||||
|
||||
state std::vector<Future<Void>> futures;
|
||||
futures.push_back(removeRecursive(dirLayer, tr, node.subspace.get()));
|
||||
futures.push_back(removeFromParent(dirLayer, tr, path));
|
||||
|
||||
Void _ = wait(waitForAll(futures));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
return success(removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, true));
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, false);
|
||||
}
|
||||
|
||||
ACTOR Future<bool> existsInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
Void _ = wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(node.isInPartition()) {
|
||||
bool exists = wait(node.getContents()->getDirectoryLayer()->exists(tr, node.getPartitionSubpath()));
|
||||
return exists;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return existsInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectoryLayer::getDirectoryLayer() {
|
||||
return Reference<DirectoryLayer>::addRef(this);
|
||||
}
|
||||
|
||||
const Standalone<StringRef> DirectoryLayer::getLayer() const {
|
||||
return StringRef();
|
||||
}
|
||||
|
||||
const IDirectory::Path DirectoryLayer::getPath() const {
|
||||
return path;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* DirectoryLayer.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_DIRECTORY_LAYER_H
|
||||
#define FDB_FLOW_DIRECTORY_LAYER_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "IDirectory.h"
|
||||
#include "DirectorySubspace.h"
|
||||
#include "HighContentionAllocator.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryLayer : public IDirectory {
|
||||
public:
|
||||
DirectoryLayer(Subspace nodeSubspace = DEFAULT_NODE_SUBSPACE, Subspace contentSubspace = DEFAULT_CONTENT_SUBSPACE, bool allowManualPrefixes = false);
|
||||
|
||||
Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(), Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
|
||||
Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath);
|
||||
Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
|
||||
Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
Reference<DirectoryLayer> getDirectoryLayer();
|
||||
const Standalone<StringRef> getLayer() const;
|
||||
const Path getPath() const;
|
||||
|
||||
static const Subspace DEFAULT_NODE_SUBSPACE;
|
||||
static const Subspace DEFAULT_CONTENT_SUBSPACE;
|
||||
static const StringRef PARTITION_LAYER;
|
||||
|
||||
//private:
|
||||
static const uint8_t LITTLE_ENDIAN_LONG_ONE[8];
|
||||
static const StringRef HIGH_CONTENTION_KEY;
|
||||
static const StringRef LAYER_KEY;
|
||||
static const StringRef VERSION_KEY;
|
||||
static const uint64_t SUB_DIR_KEY;
|
||||
static const uint32_t VERSION[3];
|
||||
static const StringRef DEFAULT_NODE_SUBSPACE_PREFIX;
|
||||
|
||||
struct Node {
|
||||
Node() {}
|
||||
Node(Reference<DirectoryLayer> const& directoryLayer, Optional<Subspace> const& subspace, Path const& path, Path const& targetPath);
|
||||
|
||||
bool exists() const;
|
||||
|
||||
Future<Node> loadMetadata(Reference<Transaction> tr);
|
||||
void ensureMetadataLoaded() const;
|
||||
|
||||
bool isInPartition(bool includeEmptySubpath = false) const;
|
||||
Path getPartitionSubpath() const;
|
||||
Reference<DirectorySubspace> getContents() const;
|
||||
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Optional<Subspace> subspace;
|
||||
Path path;
|
||||
Path targetPath;
|
||||
Standalone<StringRef> layer;
|
||||
|
||||
bool loadedMetadata;
|
||||
};
|
||||
|
||||
Reference<DirectorySubspace> openInternal(Standalone<StringRef> const& layer, Node const& existingNode, bool allowOpen);
|
||||
Future<Reference<DirectorySubspace>> createOrOpenInternal(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer, Optional<Standalone<StringRef>> const& prefix, bool allowCreate, bool allowOpen);
|
||||
|
||||
void initializeDirectory(Reference<Transaction> const& tr) const;
|
||||
Future<Void> checkVersion(Reference<Transaction> const& tr, bool writeAccess) const;
|
||||
|
||||
template <class T>
|
||||
Optional<Subspace> nodeWithPrefix(Optional<T> const& prefix) const;
|
||||
Subspace nodeWithPrefix(StringRef const& prefix) const;
|
||||
|
||||
Reference<DirectorySubspace> contentsOfNode(Subspace const& node, Path const& path, Standalone<StringRef> const& layer);
|
||||
|
||||
Path toAbsolutePath(Path const& subpath) const;
|
||||
|
||||
Subspace rootNode;
|
||||
Subspace nodeSubspace;
|
||||
Subspace contentSubspace;
|
||||
HighContentionAllocator allocator;
|
||||
bool allowManualPrefixes;
|
||||
|
||||
Path path;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* DirectoryPartition.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_DIRECTORY_PARTITION_H
|
||||
#define FDB_FLOW_DIRECTORY_PARTITION_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "IDirectory.h"
|
||||
#include "DirectorySubspace.h"
|
||||
#include "DirectoryLayer.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryPartition : public DirectorySubspace {
|
||||
|
||||
public:
|
||||
DirectoryPartition(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> parentDirectoryLayer)
|
||||
: DirectorySubspace(path, prefix, Reference<DirectoryLayer>(new DirectoryLayer(Subspace(DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX.withPrefix(prefix)), Subspace(prefix))), DirectoryLayer::PARTITION_LAYER),
|
||||
parentDirectoryLayer(parentDirectoryLayer)
|
||||
{
|
||||
this->directoryLayer->path = path;
|
||||
}
|
||||
virtual ~DirectoryPartition() {}
|
||||
|
||||
virtual Key key() const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual bool contains(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Tuple unpack(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
virtual Subspace subspace(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Subspace get(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
protected:
|
||||
Reference<DirectoryLayer> parentDirectoryLayer;
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const {
|
||||
return path.empty() ? parentDirectoryLayer : directoryLayer;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* DirectorySubspace.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "DirectorySubspace.h"
|
||||
|
||||
namespace FDB {
|
||||
DirectorySubspace::DirectorySubspace(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> directoryLayer, Standalone<StringRef> const& layer)
|
||||
: Subspace(prefix), directoryLayer(directoryLayer), path(path), layer(layer) { }
|
||||
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix)
|
||||
{
|
||||
return directoryLayer->create(tr, getPartitionSubpath(path), layer, prefix);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->open(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->createOrOpen(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->exists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectorySubspace::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return directoryLayer->list(tr, getPartitionSubpath(path));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) {
|
||||
return directoryLayer->move(tr, getPartitionSubpath(oldPath), getPartitionSubpath(newPath));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(Path());
|
||||
Path directoryLayerPath = directoryLayer->getPath();
|
||||
|
||||
if(directoryLayerPath.size() > newAbsolutePath.size()) {
|
||||
return cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
for(int i = 0; i < directoryLayerPath.size(); ++i) {
|
||||
if(directoryLayerPath[i] != newAbsolutePath[i]) {
|
||||
return cannot_move_directory_between_partitions();
|
||||
}
|
||||
}
|
||||
|
||||
Path newRelativePath(newAbsolutePath.begin() + directoryLayerPath.size(), newAbsolutePath.end());
|
||||
return directoryLayer->move(tr, getPartitionSubpath(Path(), directoryLayer), newRelativePath);
|
||||
}
|
||||
|
||||
Future<Void> DirectorySubspace::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->remove(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->removeIfExists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayer() {
|
||||
return directoryLayer;
|
||||
}
|
||||
|
||||
const Standalone<StringRef> DirectorySubspace::getLayer() const {
|
||||
return layer;
|
||||
}
|
||||
|
||||
const IDirectory::Path DirectorySubspace::getPath() const {
|
||||
return path;
|
||||
}
|
||||
|
||||
IDirectory::Path DirectorySubspace::getPartitionSubpath(Path const& path, Reference<DirectoryLayer> directoryLayer) const {
|
||||
if(!directoryLayer) {
|
||||
directoryLayer = this->directoryLayer;
|
||||
}
|
||||
|
||||
Path newPath(this->path.begin() + directoryLayer->getPath().size(), this->path.end());
|
||||
newPath.insert(newPath.end(), path.begin(), path.end());
|
||||
|
||||
return newPath;
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayerForPath(Path const& path) const {
|
||||
return directoryLayer;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* DirectorySubspace.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_DIRECTORY_SUBSPACE_H
|
||||
#define FDB_FLOW_DIRECTORY_SUBSPACE_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "IDirectory.h"
|
||||
#include "DirectoryLayer.h"
|
||||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectorySubspace : public IDirectory, public Subspace {
|
||||
|
||||
public:
|
||||
DirectorySubspace(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> directorLayer, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual ~DirectorySubspace() {}
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath);
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer();
|
||||
virtual const Standalone<StringRef> getLayer() const;
|
||||
virtual const Path getPath() const;
|
||||
|
||||
protected:
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Path path;
|
||||
Standalone<StringRef> layer;
|
||||
|
||||
virtual Path getPartitionSubpath(Path const& path, Reference<DirectoryLayer> directoryLayer = Reference<DirectoryLayer>()) const;
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,315 @@
|
|||
/*
|
||||
* FDBLoanerTypes.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_LOANER_TYPES_H
|
||||
#define FDB_FLOW_LOANER_TYPES_H
|
||||
|
||||
namespace FDB {
|
||||
typedef StringRef KeyRef;
|
||||
typedef StringRef ValueRef;
|
||||
|
||||
typedef int64_t Version;
|
||||
|
||||
typedef Standalone<KeyRef> Key;
|
||||
typedef Standalone<ValueRef> Value;
|
||||
|
||||
inline Key keyAfter( const KeyRef& key ) {
|
||||
if(key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
|
||||
Standalone<StringRef> r;
|
||||
uint8_t* s = new (r.arena()) uint8_t[ key.size() + 1 ];
|
||||
memcpy(s, key.begin(), key.size() );
|
||||
s[key.size()] = 0;
|
||||
((StringRef&) r) = StringRef( s, key.size() + 1 );
|
||||
return r;
|
||||
}
|
||||
|
||||
inline KeyRef keyAfter( const KeyRef& key, Arena& arena ) {
|
||||
if(key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
uint8_t* t = new ( arena ) uint8_t[ key.size()+1 ];
|
||||
memcpy(t, key.begin(), key.size() );
|
||||
t[key.size()] = 0;
|
||||
return KeyRef(t,key.size()+1);
|
||||
}
|
||||
|
||||
struct KeySelectorRef {
|
||||
KeyRef key; // Find the last item less than key
|
||||
bool orEqual; // (or equal to key, if this is true)
|
||||
int offset; // and then move forward this many items (or backward if negative)
|
||||
KeySelectorRef() {}
|
||||
KeySelectorRef( const KeyRef& key, bool orEqual, int offset ) : key(key), orEqual(orEqual), offset(offset) {}
|
||||
|
||||
KeySelectorRef( Arena& arena, const KeySelectorRef& copyFrom ) : key(arena,copyFrom.key), orEqual(copyFrom.orEqual), offset(copyFrom.offset) {}
|
||||
int expectedSize() const { return key.expectedSize(); }
|
||||
|
||||
// std::string toString() const {
|
||||
// if (offset > 0) {
|
||||
// if (orEqual) return format("firstGreaterThan(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// else return format("firstGreaterOrEqual(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// } else {
|
||||
// if (orEqual) return format("lastLessOrEqual(%s)%+d", printable(key).c_str(), offset);
|
||||
// else return format("lastLessThan(%s)%+d", printable(key).c_str(), offset);
|
||||
// }
|
||||
// }
|
||||
|
||||
bool isBackward() const { return !orEqual && offset<=0; } // True if the resolution of the KeySelector depends only on keys less than key
|
||||
bool isFirstGreaterOrEqual() const { return !orEqual && offset==1; }
|
||||
bool isFirstGreaterThan() const { return orEqual && offset==1; }
|
||||
bool isLastLessOrEqual() const { return orEqual && offset==0; }
|
||||
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key > rhs
|
||||
bool isDefinitelyGreater( KeyRef const& k ) {
|
||||
return offset >= 1 && ( isFirstGreaterOrEqual() ? key > k : key >= k );
|
||||
}
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key < rhs
|
||||
bool isDefinitelyLess( KeyRef const& k ) {
|
||||
return offset <= 0 && ( isLastLessOrEqual() ? key < k : key <= k );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & orEqual & offset;
|
||||
}
|
||||
};
|
||||
inline bool operator == (const KeySelectorRef& lhs, const KeySelectorRef& rhs) { return lhs.key == rhs.key && lhs.orEqual==rhs.orEqual && lhs.offset==rhs.offset; }
|
||||
inline KeySelectorRef lastLessThan( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, false, 0 );
|
||||
}
|
||||
inline KeySelectorRef lastLessOrEqual( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, true, 0 );
|
||||
}
|
||||
inline KeySelectorRef firstGreaterThan( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, true, +1 );
|
||||
}
|
||||
inline KeySelectorRef firstGreaterOrEqual( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, false, +1 );
|
||||
}
|
||||
inline KeySelectorRef operator + (const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset+off);
|
||||
}
|
||||
inline KeySelectorRef operator - (const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset-off);
|
||||
}
|
||||
|
||||
typedef Standalone<KeySelectorRef> KeySelector;
|
||||
|
||||
struct KeyValueRef {
|
||||
KeyRef key;
|
||||
ValueRef value;
|
||||
KeyValueRef() {}
|
||||
KeyValueRef( const KeyRef& key, const ValueRef& value ) : key(key), value(value) {}
|
||||
KeyValueRef( Arena& a, const KeyValueRef& copyFrom ) : key(a, copyFrom.key), value(a, copyFrom.value) {}
|
||||
bool operator == ( const KeyValueRef& r ) const { return key == r.key && value == r.value; }
|
||||
|
||||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { ar & key & value; }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
return a.key < b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a < b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key < b;
|
||||
}
|
||||
};
|
||||
|
||||
struct OrderByKeyBack {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
return a.key > b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a > b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key > b;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
typedef Standalone<KeyValueRef> KeyValue;
|
||||
|
||||
struct RangeResultRef : VectorRef<KeyValueRef> {
|
||||
bool more; // True if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the limits requested)
|
||||
// False implies that no such values remain
|
||||
Optional<KeyRef> readThrough; // Only present when 'more' is true. When present, this value represent the end (or beginning if reverse) of the range
|
||||
// which was read to produce these results. This is guarenteed to be less than the requested range.
|
||||
bool readToBegin;
|
||||
bool readThroughEnd;
|
||||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef( Arena& p, const RangeResultRef& toCopy ) : more( toCopy.more ), readToBegin( toCopy.readToBegin ), readThroughEnd( toCopy.readThroughEnd ), readThrough( toCopy.readThrough.present() ? KeyRef( p, toCopy.readThrough.get() ) : Optional<KeyRef>() ), VectorRef<KeyValueRef>( p, toCopy ) {}
|
||||
RangeResultRef( const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>() ) : VectorRef<KeyValueRef>( value ), more( more ), readThrough( readThrough ), readToBegin( false ), readThroughEnd( false ) {}
|
||||
RangeResultRef( bool readToBegin, bool readThroughEnd ) : more(false), readToBegin(readToBegin), readThroughEnd(readThroughEnd) { }
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((VectorRef<KeyValueRef>&)*this) & more & readThrough & readToBegin & readThroughEnd;
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRangeLimits {
|
||||
enum { ROW_LIMIT_UNLIMITED = -1, BYTE_LIMIT_UNLIMITED = -1 };
|
||||
|
||||
int rows;
|
||||
int minRows;
|
||||
int bytes;
|
||||
|
||||
GetRangeLimits() : rows( ROW_LIMIT_UNLIMITED ), minRows(1), bytes( BYTE_LIMIT_UNLIMITED ) {}
|
||||
explicit GetRangeLimits( int rowLimit ) : rows( rowLimit ), minRows(1), bytes( BYTE_LIMIT_UNLIMITED ) {}
|
||||
GetRangeLimits( int rowLimit, int byteLimit ) : rows( rowLimit ), minRows(1), bytes( byteLimit ) {}
|
||||
|
||||
void decrement( VectorRef<KeyValueRef> const& data );
|
||||
void decrement( KeyValueRef const& data );
|
||||
|
||||
// True if either the row or byte limit has been reached
|
||||
bool isReached();
|
||||
|
||||
// True if data would cause the row or byte limit to be reached
|
||||
bool reachedBy( VectorRef<KeyValueRef> const& data );
|
||||
|
||||
bool hasByteLimit();
|
||||
bool hasRowLimit();
|
||||
|
||||
bool hasSatisfiedMinRows();
|
||||
bool isValid() { return (rows >= 0 || rows == ROW_LIMIT_UNLIMITED)
|
||||
&& (bytes >= 0 || bytes == BYTE_LIMIT_UNLIMITED)
|
||||
&& minRows >= 0 && (minRows <= rows || rows == ROW_LIMIT_UNLIMITED); }
|
||||
};
|
||||
|
||||
struct KeyRangeRef {
|
||||
const KeyRef begin, end;
|
||||
KeyRangeRef() {}
|
||||
KeyRangeRef( const KeyRef& begin, const KeyRef& end ) : begin(begin), end(end) {
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
}
|
||||
}
|
||||
KeyRangeRef( Arena& a, const KeyRangeRef& copyFrom ) : begin(a, copyFrom.begin), end(a, copyFrom.end) {}
|
||||
bool operator == ( const KeyRangeRef& r ) const { return begin == r.begin && end == r.end; }
|
||||
bool operator != ( const KeyRangeRef& r ) const { return begin != r.begin || end != r.end; }
|
||||
bool contains( const KeyRef& key ) const { return begin <= key && key < end; }
|
||||
bool contains( const KeyRangeRef& keys ) const { return begin <= keys.begin && keys.end <= end; }
|
||||
bool intersects( const KeyRangeRef& keys ) const { return begin < keys.end && keys.begin < end; }
|
||||
bool empty() const { return begin == end; }
|
||||
|
||||
Standalone<KeyRangeRef> withPrefix( const StringRef& prefix ) const {
|
||||
return KeyRangeRef( begin.withPrefix(prefix), end.withPrefix(prefix) );
|
||||
}
|
||||
|
||||
const KeyRangeRef& operator = (const KeyRangeRef& rhs) {
|
||||
const_cast<KeyRef&>(begin) = rhs.begin;
|
||||
const_cast<KeyRef&>(end) = rhs.end;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int expectedSize() const { return begin.expectedSize() + end.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
ar & const_cast<KeyRef&>(begin) & const_cast<KeyRef&>(end);
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
}
|
||||
|
||||
struct ArbitraryOrder {
|
||||
bool operator()(KeyRangeRef const& a, KeyRangeRef const& b) const {
|
||||
if (a.begin < b.begin) return true;
|
||||
if (a.begin > b.begin) return false;
|
||||
return a.end < b.end;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
inline KeyRangeRef operator & (const KeyRangeRef& lhs, const KeyRangeRef& rhs) {
|
||||
KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end);
|
||||
if (e < b)
|
||||
return KeyRangeRef();
|
||||
return KeyRangeRef(b,e);
|
||||
}
|
||||
|
||||
typedef Standalone<KeyRangeRef> KeyRange;
|
||||
|
||||
std::string printable( const StringRef& val );
|
||||
|
||||
template <class T>
|
||||
static std::string describe(T const& item) {
|
||||
return item.toString();
|
||||
}
|
||||
template <class K, class V>
|
||||
static std::string describe(std::map<K, V> const& items, int max_items = -1) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto it = items.begin(); it != items.end(); it++) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1) s += ",";
|
||||
s += describe(it->first) + "=>" + describe(it->second);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describeList(T const& items, int max_items) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto const& item : items) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1) s += ",";
|
||||
s += describe(item);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::vector<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::set<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T1, class T2>
|
||||
static std::string describe(std::pair<T1, T2> const& pair) {
|
||||
return "first: " + describe(pair.first) + " second: " + describe(pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* FDB_LOANER_TYPES_H */
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* HighContentionAllocator.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "HighContentionAllocator.h"
|
||||
|
||||
namespace FDB {
|
||||
ACTOR Future<Standalone<StringRef>> _allocate(Reference<Transaction> tr, Subspace counters, Subspace recent){
|
||||
state int64_t start = 0;
|
||||
state int64_t window = 0;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(counters.range(), 1, true, true));
|
||||
|
||||
if(range.size() > 0) {
|
||||
start = counters.unpack(range[0].key).getInt(0);
|
||||
}
|
||||
|
||||
state bool windowAdvanced = false;
|
||||
loop {
|
||||
// if thread safety is needed, this should be locked {
|
||||
if(windowAdvanced) {
|
||||
tr->clear(KeyRangeRef(counters.key(), counters.get(start).key()));
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->clear(KeyRangeRef(recent.key(), recent.get(start).key()));
|
||||
}
|
||||
|
||||
int64_t inc = 1;
|
||||
tr->atomicOp(counters.get(start).key(), StringRef((uint8_t*)&inc, 8), FDB_MUTATION_TYPE_ADD);
|
||||
Future<Optional<FDBStandalone<ValueRef>>> countFuture = tr->get(counters.get(start).key(), true);
|
||||
// }
|
||||
|
||||
Optional<FDBStandalone<ValueRef>> countValue = wait(countFuture);
|
||||
|
||||
int64_t count = 0;
|
||||
if(countValue.present()) {
|
||||
if(countValue.get().size() != 8) {
|
||||
throw invalid_database_value();
|
||||
}
|
||||
count = *(int64_t*)countValue.get().begin();
|
||||
}
|
||||
|
||||
window = HighContentionAllocator::windowSize(start);
|
||||
if(count * 2 < window) {
|
||||
break;
|
||||
}
|
||||
|
||||
start += window;
|
||||
windowAdvanced = true;
|
||||
}
|
||||
|
||||
loop {
|
||||
state int64_t candidate = g_random->randomInt(start, start + window);
|
||||
|
||||
// if thread safety is needed, this should be locked {
|
||||
state Future<FDBStandalone<RangeResultRef>> latestCounter = tr->getRange(counters.range(), 1, true, true);
|
||||
state Future<Optional<FDBStandalone<ValueRef>>> candidateValue = tr->get(recent.get(candidate).key());
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->set(recent.get(candidate).key(), ValueRef());
|
||||
// }
|
||||
|
||||
Void _ = wait(success(latestCounter) && success(candidateValue));
|
||||
int64_t currentWindowStart = 0;
|
||||
if(latestCounter.get().size() > 0) {
|
||||
currentWindowStart = counters.unpack(latestCounter.get()[0].key).getInt(0);
|
||||
}
|
||||
|
||||
if(currentWindowStart > start) {
|
||||
break;
|
||||
}
|
||||
|
||||
if(!candidateValue.get().present()) {
|
||||
tr->addWriteConflictKey(recent.get(candidate).key());
|
||||
return Tuple().append(candidate).pack();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<Standalone<StringRef>> HighContentionAllocator::allocate(Reference<Transaction> const& tr) const {
|
||||
return _allocate(tr, counters, recent);
|
||||
}
|
||||
|
||||
int64_t HighContentionAllocator::windowSize(int64_t start) {
|
||||
if (start < 255) {
|
||||
return 64;
|
||||
}
|
||||
if (start < 65535) {
|
||||
return 1024;
|
||||
}
|
||||
|
||||
return 8192;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* HighContentionAllocator.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_HIGH_CONTENTION_ALLOCATOR_H
|
||||
#define FDB_FLOW_HIGH_CONTENTION_ALLOCATOR_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
class HighContentionAllocator {
|
||||
public:
|
||||
HighContentionAllocator(Subspace subspace) : counters(subspace.get(0)), recent(subspace.get(1)) {}
|
||||
Future<Standalone<StringRef>> allocate(Reference<Transaction> const& tr) const;
|
||||
|
||||
static int64_t windowSize(int64_t start);
|
||||
private:
|
||||
Subspace counters;
|
||||
Subspace recent;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* IDirectory.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_IDIRECTORY_H
|
||||
#define FDB_FLOW_IDIRECTORY_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "bindings/flow/fdb_flow.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryLayer;
|
||||
class DirectorySubspace;
|
||||
|
||||
class IDirectory : public ReferenceCounted<IDirectory> {
|
||||
public:
|
||||
typedef std::vector<Standalone<StringRef>> Path;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>()) = 0;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) = 0;
|
||||
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer() = 0;
|
||||
virtual const Standalone<StringRef> getLayer() const = 0;
|
||||
virtual const Path getPath() const = 0;
|
||||
|
||||
virtual ~IDirectory() {};
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Node.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "DirectoryLayer.h"
|
||||
|
||||
namespace FDB {
|
||||
DirectoryLayer::Node::Node(Reference<DirectoryLayer> const& directoryLayer, Optional<Subspace> const& subspace, IDirectory::Path const& path, IDirectory::Path const& targetPath)
|
||||
: directoryLayer(directoryLayer),
|
||||
subspace(subspace),
|
||||
path(path),
|
||||
targetPath(targetPath),
|
||||
loadedMetadata(false)
|
||||
{ }
|
||||
|
||||
bool DirectoryLayer::Node::exists() const {
|
||||
return subspace.present();
|
||||
}
|
||||
|
||||
ACTOR Future<DirectoryLayer::Node> loadMetadata(DirectoryLayer::Node *n, Reference<Transaction> tr) {
|
||||
if(!n->exists()){
|
||||
n->loadedMetadata = true;
|
||||
return *n;
|
||||
}
|
||||
|
||||
Optional<FDBStandalone<ValueRef>> layer = wait(tr->get(n->subspace.get().pack(DirectoryLayer::LAYER_KEY)));
|
||||
|
||||
n->layer = layer.present() ? layer.get() : Standalone<StringRef>();
|
||||
n->loadedMetadata = true;
|
||||
|
||||
return *n;
|
||||
}
|
||||
|
||||
//Calls to loadMetadata must keep the Node alive while the future is outstanding
|
||||
Future<DirectoryLayer::Node> DirectoryLayer::Node::loadMetadata(Reference<Transaction> tr) {
|
||||
return FDB::loadMetadata(this, tr);
|
||||
}
|
||||
|
||||
bool DirectoryLayer::Node::isInPartition(bool includeEmptySubpath) const {
|
||||
ASSERT(loadedMetadata);
|
||||
return exists() && layer == DirectoryLayer::PARTITION_LAYER && (includeEmptySubpath || targetPath.size() > path.size());
|
||||
}
|
||||
|
||||
IDirectory::Path DirectoryLayer::Node::getPartitionSubpath() const {
|
||||
return Path(targetPath.begin() + path.size(), targetPath.end());
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::Node::getContents() const {
|
||||
ASSERT(exists());
|
||||
ASSERT(loadedMetadata);
|
||||
|
||||
return directoryLayer->contentsOfNode(subspace.get(), path, layer);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Subspace.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
Subspace::Subspace(Tuple const& tuple, StringRef const& rawPrefix){
|
||||
StringRef packed = tuple.pack();
|
||||
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + packed.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), packed.begin(), packed.size());
|
||||
}
|
||||
|
||||
Subspace::Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix) {
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + tuple.pack().size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
}
|
||||
|
||||
Subspace::Subspace(StringRef const& rawPrefix){
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
|
||||
Subspace::~Subspace() { }
|
||||
|
||||
Key Subspace::key() const {
|
||||
return StringRef(rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
|
||||
Key Subspace::pack(const Tuple& tuple) const {
|
||||
return tuple.pack().withPrefix(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
|
||||
Tuple Subspace::unpack(StringRef const& key) const {
|
||||
if (!contains(key)) {
|
||||
throw key_not_in_subspace();
|
||||
}
|
||||
return Tuple::unpack(key.substr(rawPrefix.size()));
|
||||
}
|
||||
|
||||
KeyRange Subspace::range(Tuple const& tuple) const {
|
||||
VectorRef<uint8_t> begin;
|
||||
VectorRef<uint8_t> end;
|
||||
|
||||
KeyRange keyRange;
|
||||
|
||||
begin.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
begin.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
begin.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
begin.push_back(keyRange.arena(), uint8_t('\x00'));
|
||||
|
||||
end.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
end.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
end.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
end.push_back(keyRange.arena(), uint8_t('\xff'));
|
||||
|
||||
// FIXME: test that this uses the keyRange arena and doesn't create another one
|
||||
keyRange.KeyRangeRef::operator=(KeyRangeRef(StringRef(begin.begin(), begin.size()), StringRef(end.begin(), end.size())));
|
||||
return keyRange;
|
||||
}
|
||||
|
||||
bool Subspace::contains(KeyRef const& key) const {
|
||||
return key.startsWith(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
|
||||
Subspace Subspace::subspace(Tuple const& tuple) const {
|
||||
return Subspace(tuple, rawPrefix);
|
||||
}
|
||||
|
||||
Subspace Subspace::get(Tuple const& tuple) const {
|
||||
return subspace(tuple);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Subspace.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_SUBSPACE_H
|
||||
#define FDB_FLOW_SUBSPACE_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "bindings/flow/fdb_flow.h"
|
||||
#include "Tuple.h"
|
||||
|
||||
namespace FDB {
|
||||
class Subspace {
|
||||
public:
|
||||
Subspace(Tuple const& tuple = Tuple(), StringRef const& rawPrefix = StringRef());
|
||||
Subspace(StringRef const& rawPrefix);
|
||||
|
||||
virtual ~Subspace();
|
||||
|
||||
virtual Key key() const;
|
||||
virtual bool contains(KeyRef const& key) const;
|
||||
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const;
|
||||
virtual Tuple unpack(KeyRef const& key) const;
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
|
||||
template <class T>
|
||||
Key pack(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return pack(t);
|
||||
}
|
||||
|
||||
Key pack(StringRef const& item, bool utf8=false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return pack(t);
|
||||
}
|
||||
|
||||
virtual Subspace subspace(Tuple const& tuple) const;
|
||||
virtual Subspace get(Tuple const& tuple) const;
|
||||
|
||||
template <class T>
|
||||
Subspace get(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return get(t);
|
||||
}
|
||||
|
||||
Subspace get(StringRef const& item, bool utf8=false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return get(t);
|
||||
}
|
||||
|
||||
private:
|
||||
Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix);
|
||||
Standalone<VectorRef<uint8_t>> rawPrefix;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
* Tuple.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Tuple.h"
|
||||
|
||||
namespace FDB {
|
||||
static size_t find_string_terminator(const StringRef data, size_t offset) {
|
||||
size_t i = offset;
|
||||
while (i < data.size() - 1 && !(data[i] == '\x00' && data[i+1] != (uint8_t)'\xff')) {
|
||||
i += (data[i] == '\x00' ? 2 : 1);
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
Tuple::Tuple(StringRef const& str) {
|
||||
data.append(data.arena(), str.begin(), str.size());
|
||||
|
||||
size_t i = 0;
|
||||
while(i < data.size()) {
|
||||
offsets.push_back(i);
|
||||
|
||||
if(data[i] == '\x01' || data[i] == '\x02') {
|
||||
i = find_string_terminator(str, i+1) + 1;
|
||||
}
|
||||
else if(data[i] >= '\x0c' && data[i] <= '\x1c') {
|
||||
i += abs(data[i] - '\x14') + 1;
|
||||
}
|
||||
else if(data[i] == '\x00') {
|
||||
i += 1;
|
||||
}
|
||||
else {
|
||||
throw invalid_tuple_data_type();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tuple Tuple::unpack(StringRef const& str) {
|
||||
return Tuple(str);
|
||||
}
|
||||
|
||||
Tuple& Tuple::append(Tuple const& tuple) {
|
||||
for(size_t offset : tuple.offsets) {
|
||||
offsets.push_back(offset + data.size());
|
||||
}
|
||||
|
||||
data.append(data.arena(), tuple.data.begin(), tuple.data.size());
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
Tuple& Tuple::append(StringRef const& str, bool utf8) {
|
||||
offsets.push_back(data.size());
|
||||
|
||||
const uint8_t utfChar = uint8_t(utf8 ? '\x02' : '\x01');
|
||||
data.append(data.arena(), &utfChar, 1);
|
||||
|
||||
size_t lastPos = 0;
|
||||
for(size_t pos = 0; pos < str.size(); ++pos) {
|
||||
if(str[pos] == '\x00') {
|
||||
data.append(data.arena(), str.begin() + lastPos, pos - lastPos);
|
||||
data.push_back(data.arena(), (uint8_t)'\x00');
|
||||
data.push_back(data.arena(), (uint8_t)'\xff');
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
}
|
||||
|
||||
data.append(data.arena(), str.begin() + lastPos, str.size() - lastPos);
|
||||
data.push_back(data.arena(), (uint8_t)'\x00');
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
Tuple& Tuple::append( int64_t value ) {
|
||||
uint64_t swap = value;
|
||||
bool neg = false;
|
||||
|
||||
offsets.push_back( data.size() );
|
||||
|
||||
if ( value < 0 ) {
|
||||
value = ~(-value);
|
||||
neg = true;
|
||||
}
|
||||
|
||||
swap = bigEndian64(value);
|
||||
|
||||
for ( int i = 0; i < 8; i++ ) {
|
||||
if ( ((uint8_t*)&swap)[i] != (neg ? 255 : 0) ) {
|
||||
data.push_back( data.arena(), (uint8_t)(20 + (8-i) * (neg ? -1 : 1)) );
|
||||
data.append( data.arena(), ((const uint8_t *)&swap) + i, 8 - i );
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
|
||||
data.push_back( data.arena(), (uint8_t)'\x14' );
|
||||
return *this;
|
||||
}
|
||||
|
||||
Tuple& Tuple::appendNull() {
|
||||
offsets.push_back(data.size());
|
||||
data.push_back(data.arena(), (uint8_t)'\x00');
|
||||
return *this;
|
||||
}
|
||||
|
||||
Tuple::ElementType Tuple::getType(size_t index) const {
|
||||
if(index >= offsets.size()) {
|
||||
throw invalid_tuple_index();
|
||||
}
|
||||
|
||||
uint8_t code = data[offsets[index]];
|
||||
|
||||
if(code == '\x00') {
|
||||
return ElementType::NULL_TYPE;
|
||||
}
|
||||
else if(code == '\x01') {
|
||||
return ElementType::BYTES;
|
||||
}
|
||||
else if(code == '\x02') {
|
||||
return ElementType::UTF8;
|
||||
}
|
||||
else if(code >= '\x0c' && code <= '\x1c') {
|
||||
return ElementType::INT;
|
||||
}
|
||||
else {
|
||||
throw invalid_tuple_data_type();
|
||||
}
|
||||
}
|
||||
|
||||
Standalone<StringRef> Tuple::getString(size_t index) const {
|
||||
if(index >= offsets.size()) {
|
||||
throw invalid_tuple_index();
|
||||
}
|
||||
|
||||
uint8_t code = data[offsets[index]];
|
||||
if(code != '\x01' && code != '\x02') {
|
||||
throw invalid_tuple_data_type();
|
||||
}
|
||||
|
||||
size_t b = offsets[index] + 1;
|
||||
size_t e;
|
||||
if (offsets.size() > index + 1) {
|
||||
e = offsets[index+1];
|
||||
} else {
|
||||
e = data.size();
|
||||
}
|
||||
|
||||
Standalone<StringRef> result;
|
||||
VectorRef<uint8_t> staging;
|
||||
|
||||
for (size_t i = b; i < e; ++i) {
|
||||
if(data[i] == '\x00') {
|
||||
staging.append(result.arena(), data.begin() + b, i - b);
|
||||
++i;
|
||||
b = i + 1;
|
||||
|
||||
if(i < e) {
|
||||
staging.push_back(result.arena(), '\x00');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(b < e) {
|
||||
staging.append(result.arena(), data.begin() + b, e - b);
|
||||
}
|
||||
|
||||
result.StringRef::operator=(StringRef(staging.begin(), staging.size()));
|
||||
return result;
|
||||
}
|
||||
|
||||
int64_t Tuple::getInt(size_t index) const {
|
||||
if(index >= offsets.size()) {
|
||||
throw invalid_tuple_index();
|
||||
}
|
||||
|
||||
int64_t swap;
|
||||
bool neg = false;
|
||||
|
||||
ASSERT(offsets[index] < data.size());
|
||||
uint8_t code = data[offsets[index]];
|
||||
if(code < '\x0c' || code > '\x1c') {
|
||||
throw invalid_tuple_data_type();
|
||||
}
|
||||
|
||||
int8_t len = code - '\x14';
|
||||
|
||||
if ( len < 0 ) {
|
||||
len = -len;
|
||||
neg = true;
|
||||
}
|
||||
|
||||
memset( &swap, neg ? '\xff' : 0, 8 - len );
|
||||
memcpy( ((uint8_t*)&swap) + 8 - len, data.begin() + offsets[index] + 1, len );
|
||||
|
||||
swap = bigEndian64( swap );
|
||||
|
||||
if ( neg ) {
|
||||
swap = -(~swap);
|
||||
}
|
||||
|
||||
return swap;
|
||||
}
|
||||
|
||||
KeyRange Tuple::range(Tuple const& tuple) const {
|
||||
VectorRef<uint8_t> begin;
|
||||
VectorRef<uint8_t> end;
|
||||
|
||||
KeyRange keyRange;
|
||||
|
||||
begin.reserve(keyRange.arena(), data.size() + tuple.pack().size() + 1);
|
||||
begin.append(keyRange.arena(), data.begin(), data.size());
|
||||
begin.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
begin.push_back(keyRange.arena(), uint8_t('\x00'));
|
||||
|
||||
end.reserve(keyRange.arena(), data.size() + tuple.pack().size() + 1);
|
||||
end.append(keyRange.arena(), data.begin(), data.size());
|
||||
end.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
end.push_back(keyRange.arena(), uint8_t('\xff'));
|
||||
|
||||
keyRange.KeyRangeRef::operator=(KeyRangeRef(StringRef(begin.begin(), begin.size()), StringRef(end.begin(), end.size())));
|
||||
return keyRange;
|
||||
}
|
||||
|
||||
Tuple Tuple::subTuple(size_t start, size_t end) const {
|
||||
if(start >= offsets.size() || end <= start) {
|
||||
return Tuple();
|
||||
}
|
||||
|
||||
size_t endPos = end < offsets.size() ? offsets[end] : data.size();
|
||||
return Tuple(StringRef(data.begin() + offsets[start], endPos - offsets[start]));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Tuple.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_TUPLE_H
|
||||
#define FDB_FLOW_TUPLE_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "bindings/flow/fdb_flow.h"
|
||||
|
||||
namespace FDB {
|
||||
struct Tuple {
|
||||
Tuple() {}
|
||||
|
||||
static Tuple unpack(StringRef const& str);
|
||||
|
||||
Tuple& append(Tuple const& tuple);
|
||||
Tuple& append(StringRef const& str, bool utf8=false);
|
||||
Tuple& append(int64_t);
|
||||
Tuple& appendNull();
|
||||
|
||||
StringRef pack() const { return StringRef(data.begin(), data.size()); }
|
||||
|
||||
template <typename T>
|
||||
Tuple& operator<<(T const& t) {
|
||||
return append(t);
|
||||
}
|
||||
|
||||
enum ElementType { NULL_TYPE, INT, BYTES, UTF8 };
|
||||
|
||||
// this is number of elements, not length of data
|
||||
size_t size() const { return offsets.size(); }
|
||||
|
||||
ElementType getType(size_t index) const;
|
||||
Standalone<StringRef> getString(size_t index) const;
|
||||
int64_t getInt(size_t index) const;
|
||||
|
||||
KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
|
||||
Tuple subTuple(size_t beginIndex, size_t endIndex = std::numeric_limits<size_t>::max()) const;
|
||||
|
||||
private:
|
||||
Tuple(const StringRef& data);
|
||||
Standalone<VectorRef<uint8_t>> data;
|
||||
std::vector<size_t> offsets;
|
||||
};
|
||||
}
|
||||
|
||||
#endif /* _FDB_TUPLE_H_ */
|
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* fdb_flow.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdb_flow.h"
|
||||
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/SystemMonitor.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace FDB;
|
||||
|
||||
THREAD_FUNC networkThread(void* fdb) {
|
||||
((FDB::API*)fdb)->runNetwork();
|
||||
THREAD_RETURN;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> _test() {
|
||||
API *fdb = FDB::API::selectAPIVersion(400);
|
||||
auto c = fdb->createCluster( std::string() );
|
||||
auto db = c->createDatabase( LiteralStringRef("DB") );
|
||||
state Reference<Transaction> tr( new Transaction(db) );
|
||||
|
||||
// tr->setVersion(1);
|
||||
|
||||
Version ver = wait( tr->getReadVersion() );
|
||||
printf("%ld\n", ver);
|
||||
|
||||
state std::vector< Future<Version> > versions;
|
||||
|
||||
state double starttime = timer_monotonic();
|
||||
state int i;
|
||||
// for (i = 0; i < 100000; i++) {
|
||||
// Version v = wait( tr->getReadVersion() );
|
||||
// }
|
||||
for ( i = 0; i < 100000; i++ ) {
|
||||
versions.push_back( tr->getReadVersion() );
|
||||
}
|
||||
for ( i = 0; i < 100000; i++ ) {
|
||||
Version v = wait( versions[i] );
|
||||
}
|
||||
// Void _ = wait( waitForAllReady( versions ) );
|
||||
printf("Elapsed: %lf\n", timer_monotonic() - starttime );
|
||||
|
||||
tr->set( LiteralStringRef("foo"), LiteralStringRef("bar") );
|
||||
|
||||
Optional< FDBStandalone<ValueRef> > v = wait( tr->get( LiteralStringRef("foo") ) );
|
||||
if ( v.present() ) {
|
||||
printf("%s\n", v.get().toString().c_str() );
|
||||
}
|
||||
|
||||
FDBStandalone<RangeResultRef> r = wait( tr->getRange( KeyRangeRef( LiteralStringRef("a"), LiteralStringRef("z") ), 100 ) );
|
||||
|
||||
for ( auto kv : r ) {
|
||||
printf("%s is %s\n", kv.key.toString().c_str(), kv.value.toString().c_str());
|
||||
}
|
||||
|
||||
g_network->stop();
|
||||
return Void();
|
||||
}
|
||||
|
||||
void fdb_flow_test() {
|
||||
API *fdb = FDB::API::selectAPIVersion(400);
|
||||
fdb->setupNetwork();
|
||||
startThread(networkThread, fdb);
|
||||
|
||||
int randomSeed = platform::getRandomSeed();
|
||||
|
||||
g_random = new DeterministicRandom(randomSeed);
|
||||
g_nondeterministic_random = new DeterministicRandom(platform::getRandomSeed());
|
||||
g_debug_random = new DeterministicRandom(platform::getRandomSeed());
|
||||
|
||||
g_network = newNet2( NetworkAddress(), false );
|
||||
|
||||
openTraceFile(NetworkAddress(), 1000000, 1000000, ".");
|
||||
systemMonitor();
|
||||
uncancellable(recurring(&systemMonitor, 5.0, TaskFlushTrace));
|
||||
|
||||
Future<Void> t = _test();
|
||||
|
||||
g_network->run();
|
||||
}
|
||||
|
||||
namespace FDB {
|
||||
|
||||
static inline void throw_on_error( fdb_error_t e ) {
|
||||
if (e)
|
||||
throw Error(e);
|
||||
}
|
||||
|
||||
void CFuture::blockUntilReady() {
|
||||
throw_on_error( fdb_future_block_until_ready( f ) );
|
||||
}
|
||||
|
||||
void backToFutureCallback( FDBFuture* f, void* data ) {
|
||||
g_network->onMainThread( Promise<Void>((SAV<Void>*)data), TaskDefaultOnMainThread ); // SOMEDAY: think about this priority
|
||||
}
|
||||
|
||||
// backToFuture<Type>( FDBFuture*, (FDBFuture* -> Type) ) -> Future<Type>
|
||||
// Takes an FDBFuture (from the alien client world, with callbacks potentially firing on an alien thread)
|
||||
// and converts it into a Future<T> (with callbacks working on this thread, cancellation etc).
|
||||
// You must pass as the second parameter a function which takes a ready FDBFuture* and returns a value of Type
|
||||
ACTOR template<class T, class Function> static Future<T> backToFuture( FDBFuture* _f, Function convertValue ) {
|
||||
state Reference<CFuture> f( new CFuture(_f) );
|
||||
|
||||
Promise<Void> ready;
|
||||
Future<Void> onReady = ready.getFuture();
|
||||
|
||||
throw_on_error( fdb_future_set_callback( f->f, backToFutureCallback, ready.extractRawPointer() ) );
|
||||
Void _ = wait( onReady );
|
||||
|
||||
return convertValue( f );
|
||||
}
|
||||
|
||||
void API::setNetworkOption( FDBNetworkOption option, Optional<StringRef> value ) {
|
||||
if ( value.present() )
|
||||
throw_on_error( fdb_network_set_option( option, value.get().begin(), value.get().size() ) );
|
||||
else
|
||||
throw_on_error( fdb_network_set_option( option, NULL, 0 ) );
|
||||
}
|
||||
|
||||
API* API::instance = NULL;
|
||||
API::API(int version) : version(version) {}
|
||||
|
||||
API* API::selectAPIVersion(int apiVersion) {
|
||||
if(API::instance && apiVersion != API::instance->version) {
|
||||
throw api_version_already_set();
|
||||
}
|
||||
|
||||
if(apiVersion < 200 || apiVersion > FDB_API_VERSION) {
|
||||
throw api_version_not_supported();
|
||||
}
|
||||
|
||||
throw_on_error( fdb_select_api_version_impl(apiVersion, FDB_API_VERSION) );
|
||||
|
||||
if(!API::instance) {
|
||||
API::instance = new API(apiVersion);
|
||||
}
|
||||
|
||||
return API::instance;
|
||||
}
|
||||
|
||||
void API::setupNetwork() {
|
||||
throw_on_error( fdb_setup_network() );
|
||||
}
|
||||
|
||||
void API::runNetwork() {
|
||||
throw_on_error( fdb_run_network() );
|
||||
}
|
||||
|
||||
void API::stopNetwork() {
|
||||
throw_on_error( fdb_stop_network() );
|
||||
}
|
||||
|
||||
bool API::evaluatePredicate(FDBErrorPredicate pred, Error const& e) {
|
||||
return fdb_error_predicate( pred, e.code() );
|
||||
}
|
||||
|
||||
Reference<Cluster> API::createCluster( std::string const& connFilename ) {
|
||||
CFuture f( fdb_create_cluster( connFilename.c_str() ) );
|
||||
f.blockUntilReady();
|
||||
|
||||
FDBCluster* c;
|
||||
throw_on_error( fdb_future_get_cluster( f.f, &c ) );
|
||||
|
||||
return Reference<Cluster>( new Cluster(c) );
|
||||
}
|
||||
|
||||
Reference<DatabaseContext> Cluster::createDatabase( Standalone<StringRef> dbName ) {
|
||||
CFuture f( fdb_cluster_create_database( c, dbName.begin(), dbName.size() ) );
|
||||
f.blockUntilReady();
|
||||
|
||||
FDBDatabase* db;
|
||||
throw_on_error( fdb_future_get_database( f.f, &db ) );
|
||||
|
||||
return Reference<DatabaseContext>( new DatabaseContext(db) );
|
||||
}
|
||||
|
||||
void DatabaseContext::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
|
||||
if (value.present())
|
||||
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
|
||||
else
|
||||
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
|
||||
}
|
||||
|
||||
Transaction::Transaction( Reference<DatabaseContext> const& db ) {
|
||||
throw_on_error( fdb_database_create_transaction( db->db, &tr ) );
|
||||
}
|
||||
|
||||
void Transaction::setVersion( Version v ) {
|
||||
fdb_transaction_set_read_version( tr, v );
|
||||
}
|
||||
|
||||
Future<Version> Transaction::getReadVersion() {
|
||||
return backToFuture<Version>( fdb_transaction_get_read_version( tr ), [](Reference<CFuture> f){
|
||||
Version value;
|
||||
|
||||
throw_on_error( fdb_future_get_version( f->f, &value ) );
|
||||
|
||||
return value;
|
||||
} );
|
||||
}
|
||||
|
||||
Future< Optional<FDBStandalone<ValueRef>> > Transaction::get( const Key& key, bool snapshot ) {
|
||||
return backToFuture< Optional<FDBStandalone<ValueRef>> >( fdb_transaction_get( tr, key.begin(), key.size(), snapshot ), [](Reference<CFuture> f) {
|
||||
fdb_bool_t present;
|
||||
uint8_t const* value;
|
||||
int value_length;
|
||||
|
||||
throw_on_error( fdb_future_get_value( f->f, &present, &value, &value_length ) );
|
||||
|
||||
if ( present ) {
|
||||
return Optional<FDBStandalone<ValueRef>>( FDBStandalone<ValueRef>( f, ValueRef( value, value_length ) ) );
|
||||
} else {
|
||||
return Optional<FDBStandalone<ValueRef>>();
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
Future< Void > Transaction::watch( const Key& key ) {
|
||||
return backToFuture< Void >( fdb_transaction_watch( tr, key.begin(), key.size() ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
Future< FDBStandalone<KeyRef> > Transaction::getKey( const KeySelector& key, bool snapshot ) {
|
||||
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_key( tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot ), [](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
|
||||
|
||||
return FDBStandalone<KeyRef>( f, KeyRef( key, key_length ) );
|
||||
} );
|
||||
}
|
||||
|
||||
Future< FDBStandalone<RangeResultRef> > Transaction::getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode ) {
|
||||
// FIXME: iteration
|
||||
return backToFuture< FDBStandalone<RangeResultRef> >( fdb_transaction_get_range( tr, begin.key.begin(), begin.key.size(), begin.orEqual, begin.offset, end.key.begin(), end.key.size(), end.orEqual, end.offset, limits.rows, limits.bytes, streamingMode, 1, snapshot, reverse ), [](Reference<CFuture> f) {
|
||||
FDBKeyValue const* kv;
|
||||
int count;
|
||||
fdb_bool_t more;
|
||||
|
||||
throw_on_error( fdb_future_get_keyvalue_array( f->f, &kv, &count, &more ) );
|
||||
|
||||
return FDBStandalone<RangeResultRef>( f, RangeResultRef( VectorRef<KeyValueRef>( (KeyValueRef*)kv, count ), more ) );
|
||||
} );
|
||||
}
|
||||
|
||||
void Transaction::addReadConflictRange( KeyRangeRef const& keys ) {
|
||||
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
|
||||
}
|
||||
|
||||
void Transaction::addReadConflictKey( KeyRef const& key ) {
|
||||
return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void Transaction::addWriteConflictRange( KeyRangeRef const& keys ) {
|
||||
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE ) );
|
||||
}
|
||||
|
||||
void Transaction::addWriteConflictKey( KeyRef const& key ) {
|
||||
return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void Transaction::atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType ) {
|
||||
fdb_transaction_atomic_op( tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType );
|
||||
}
|
||||
|
||||
void Transaction::set( const KeyRef& key, const ValueRef& value ) {
|
||||
fdb_transaction_set( tr, key.begin(), key.size(), value.begin(), value.size() );
|
||||
}
|
||||
|
||||
void Transaction::clear( const KeyRangeRef& range ) {
|
||||
fdb_transaction_clear_range( tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size() );
|
||||
}
|
||||
|
||||
void Transaction::clear( const KeyRef& key ) {
|
||||
fdb_transaction_clear( tr, key.begin(), key.size() );
|
||||
}
|
||||
|
||||
Future<Void> Transaction::commit() {
|
||||
return backToFuture< Void >( fdb_transaction_commit( tr ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
Version Transaction::getCommittedVersion() {
|
||||
Version v;
|
||||
|
||||
throw_on_error( fdb_transaction_get_committed_version( tr, &v ) );
|
||||
return v;
|
||||
}
|
||||
|
||||
Future<FDBStandalone<StringRef>> Transaction::getVersionstamp() {
|
||||
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_versionstamp( tr ), [](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
|
||||
|
||||
return FDBStandalone<StringRef>( f, StringRef( key, key_length ) );
|
||||
} );
|
||||
}
|
||||
|
||||
void Transaction::setOption( FDBTransactionOption option, Optional<StringRef> value ) {
|
||||
if ( value.present() ) {
|
||||
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
|
||||
} else {
|
||||
throw_on_error( fdb_transaction_set_option( tr, option, NULL, 0 ) );
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> Transaction::onError( Error const& e ) {
|
||||
return backToFuture< Void >( fdb_transaction_on_error( tr, e.code() ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
void Transaction::cancel() {
|
||||
fdb_transaction_cancel( tr );
|
||||
}
|
||||
|
||||
void Transaction::reset() {
|
||||
fdb_transaction_reset( tr );
|
||||
}
|
||||
|
||||
std::string printable( const StringRef& val ) {
|
||||
std::string s;
|
||||
for(int i=0; i<val.size(); i++) {
|
||||
uint8_t b = val[i];
|
||||
if (b >= 32 && b < 127 && b != '\\') s += (char)b;
|
||||
else if (b == '\\') s += "\\\\";
|
||||
else s += format("\\x%02x", b);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* fdb_flow.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_FLOW_FDB_FLOW_H
|
||||
#define FDB_FLOW_FDB_FLOW_H
|
||||
|
||||
#include <flow/flow.h>
|
||||
|
||||
#define FDB_API_VERSION 500
|
||||
#include <bindings/c/foundationdb/fdb_c.h>
|
||||
#undef DLLEXPORT
|
||||
|
||||
#include "FDBLoanerTypes.h"
|
||||
|
||||
namespace FDB {
|
||||
|
||||
class DatabaseContext : public ReferenceCounted<DatabaseContext>, NonCopyable {
|
||||
friend class Cluster;
|
||||
friend class Transaction;
|
||||
public:
|
||||
~DatabaseContext() {
|
||||
fdb_database_destroy( db );
|
||||
}
|
||||
|
||||
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>());
|
||||
|
||||
private:
|
||||
FDBDatabase* db;
|
||||
explicit DatabaseContext( FDBDatabase* db ) : db(db) {}
|
||||
};
|
||||
|
||||
class Cluster : public ReferenceCounted<Cluster>, NonCopyable {
|
||||
public:
|
||||
~Cluster() {
|
||||
fdb_cluster_destroy( c );
|
||||
}
|
||||
|
||||
Reference<DatabaseContext> createDatabase( Standalone<StringRef> dbName );
|
||||
|
||||
private:
|
||||
explicit Cluster( FDBCluster* c ) : c(c) {}
|
||||
FDBCluster* c;
|
||||
|
||||
friend class API;
|
||||
};
|
||||
|
||||
class API {
|
||||
public:
|
||||
static API* selectAPIVersion(int apiVersion);
|
||||
|
||||
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
|
||||
|
||||
void setupNetwork();
|
||||
void runNetwork();
|
||||
void stopNetwork();
|
||||
|
||||
Reference<Cluster> createCluster( std::string const& connFilename );
|
||||
|
||||
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
|
||||
|
||||
private:
|
||||
static API* instance;
|
||||
|
||||
API(int version);
|
||||
int version;
|
||||
};
|
||||
|
||||
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
|
||||
CFuture() : f(NULL) {}
|
||||
explicit CFuture( FDBFuture* f ) : f(f) {}
|
||||
~CFuture() {
|
||||
if (f) {
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
}
|
||||
|
||||
void blockUntilReady();
|
||||
|
||||
FDBFuture* f;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class FDBStandalone : public T {
|
||||
public:
|
||||
FDBStandalone() {}
|
||||
FDBStandalone( Reference<CFuture> f, T const& t ) : T(t), f(f) {}
|
||||
FDBStandalone( FDBStandalone const& o ) : T((T const&)o), f(o.f) {}
|
||||
private:
|
||||
Reference<CFuture> f;
|
||||
};
|
||||
|
||||
class Transaction : public ReferenceCounted<Transaction>, private NonCopyable, public FastAllocated<Transaction> {
|
||||
public:
|
||||
explicit Transaction( Reference<DatabaseContext> const& db );
|
||||
~Transaction() {
|
||||
if (tr) {
|
||||
fdb_transaction_destroy(tr);
|
||||
}
|
||||
}
|
||||
|
||||
void setVersion( Version v );
|
||||
Future<Version> getReadVersion();
|
||||
|
||||
Future< Optional<FDBStandalone<ValueRef>> > get( const Key& key, bool snapshot = false );
|
||||
Future< Void > watch( const Key& key );
|
||||
Future< FDBStandalone<KeyRef> > getKey( const KeySelector& key, bool snapshot = false );
|
||||
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL);
|
||||
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
|
||||
return getRange( begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode );
|
||||
}
|
||||
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
|
||||
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
|
||||
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
|
||||
limit, snapshot, reverse, streamingMode );
|
||||
}
|
||||
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
|
||||
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
|
||||
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
|
||||
limits, snapshot, reverse, streamingMode );
|
||||
}
|
||||
|
||||
// Future< Standalone<VectorRef<const char*>> > getAddressesForKey(const Key& key);
|
||||
|
||||
void addReadConflictRange( KeyRangeRef const& keys );
|
||||
void addReadConflictKey( KeyRef const& key );
|
||||
void addWriteConflictRange( KeyRangeRef const& keys );
|
||||
void addWriteConflictKey( KeyRef const& key );
|
||||
// void makeSelfConflicting() { tr.makeSelfConflicting(); }
|
||||
|
||||
void atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType );
|
||||
void set( const KeyRef& key, const ValueRef& value );
|
||||
void clear( const KeyRangeRef& range );
|
||||
void clear( const KeyRef& key );
|
||||
|
||||
Future<Void> commit();
|
||||
Version getCommittedVersion();
|
||||
Future<FDBStandalone<StringRef>> getVersionstamp();
|
||||
|
||||
void setOption( FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>() );
|
||||
|
||||
Future<Void> onError( Error const& e );
|
||||
|
||||
void cancel();
|
||||
void reset();
|
||||
// double getBackoff() { return tr.getBackoff(); }
|
||||
// void debugTransaction(UID dID) { tr.debugTransaction(dID); }
|
||||
|
||||
Transaction() : tr(NULL) {}
|
||||
Transaction( Transaction&& r ) noexcept(true) {
|
||||
tr = r.tr;
|
||||
r.tr = NULL;
|
||||
}
|
||||
Transaction& operator=( Transaction&& r ) noexcept(true) {
|
||||
tr = r.tr;
|
||||
r.tr = NULL;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
FDBTransaction* tr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,150 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|X64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>X64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|X64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>X64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="fdb_flow.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="DirectoryPartition.h" />
|
||||
<ClInclude Include="FDBLoanerTypes.h" />
|
||||
<ClInclude Include="fdb_flow.h" />
|
||||
<ClInclude Include="Tuple.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="Tuple.cpp" />
|
||||
<ClInclude Include="IDirectory.h" />
|
||||
<ClInclude Include="Subspace.h" />
|
||||
<ClCompile Include="Subspace.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="HighContentionAllocator.h" />
|
||||
<ActorCompiler Include="HighContentionAllocator.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="DirectoryLayer.h" />
|
||||
<ActorCompiler Include="DirectoryLayer.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="DirectorySubspace.h" />
|
||||
<ClCompile Include="DirectorySubspace.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="Node.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<None Include="no_intellisense.opt" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGUID>{2BA0A5E2-EB4C-4A32-948C-CBAABD77AF87}</ProjectGUID>
|
||||
<TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>fdb_flow</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
<BuildLogFile>$(IntDir)\$(MSBuildProjectName).log</BuildLogFile>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets">
|
||||
<Import Project="$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props')" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
|
||||
</Link>
|
||||
<Lib>
|
||||
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
|
||||
</Lib>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>false</EnableCOMDATFolding>
|
||||
<OptimizeReferences>false</OptimizeReferences>
|
||||
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
|
||||
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
|
||||
<AdditionalOptions>/LTCG %(AdditionalOptions)</AdditionalOptions>
|
||||
</Link>
|
||||
<Lib>
|
||||
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
|
||||
</Lib>
|
||||
</ItemDefinitionGroup>
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
<Import Project="..\..\flow\actorcompiler\ActorCompiler.targets" />
|
||||
</ImportGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<Target Name="MyPreCompileSteps" AfterTargets="CLCompile">
|
||||
<Exec Command=""$(SolutionDir)bin\$(Configuration)\coveragetool.exe" "$(OutDir)coverage.$(TargetName).xml" @(ActorCompiler -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLInclude -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLCompile -> '%(RelativeDir)%(Filename)%(Extension)', ' ')" />
|
||||
</Target>
|
||||
</Project>
|
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# local.mk
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# -*- mode: makefile; -*-
|
||||
|
||||
fdb_flow_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
|
||||
fdb_flow_LDFLAGS := -Llib -lfdb_c $(fdbrpc_LDFLAGS)
|
||||
fdb_flow_LIBS := lib/libfdbrpc.a
|
||||
|
|
@ -0,0 +1,561 @@
|
|||
/*
|
||||
* DirectoryTester.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "Tester.actor.h"
|
||||
|
||||
using namespace FDB;
|
||||
|
||||
ACTOR Future<std::vector<Tuple>> popTuples(Reference<FlowTesterData> data, int count = 1) {
|
||||
state std::vector<Tuple> tuples;
|
||||
|
||||
while(tuples.size() < count) {
|
||||
Standalone<StringRef> sizeStr = wait(data->stack.pop()[0].value);
|
||||
int size = Tuple::unpack(sizeStr).getInt(0);
|
||||
|
||||
state std::vector<StackItem> tupleItems = data->stack.pop(size);
|
||||
state Tuple tuple;
|
||||
|
||||
state int index;
|
||||
for(index = 0; index < tupleItems.size(); ++index) {
|
||||
Standalone<StringRef> itemStr = wait(tupleItems[index].value);
|
||||
tuple.append(Tuple::unpack(itemStr));
|
||||
}
|
||||
|
||||
tuples.push_back(tuple);
|
||||
}
|
||||
|
||||
return tuples;
|
||||
}
|
||||
|
||||
ACTOR Future<Tuple> popTuple(Reference<FlowTesterData> data) {
|
||||
std::vector<Tuple> tuples = wait(popTuples(data));
|
||||
return tuples[0];
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<IDirectory::Path>> popPaths(Reference<FlowTesterData> data, int count = 1) {
|
||||
std::vector<Tuple> tuples = wait(popTuples(data, count));
|
||||
|
||||
std::vector<IDirectory::Path> paths;
|
||||
for(auto &tuple : tuples) {
|
||||
IDirectory::Path path;
|
||||
for(int i = 0; i < tuple.size(); ++i) {
|
||||
path.push_back(tuple.getString(i));
|
||||
}
|
||||
|
||||
paths.push_back(path);
|
||||
}
|
||||
|
||||
return paths;
|
||||
}
|
||||
|
||||
ACTOR Future<IDirectory::Path> popPath(Reference<FlowTesterData> data) {
|
||||
std::vector<IDirectory::Path> paths = wait(popPaths(data));
|
||||
return paths[0];
|
||||
}
|
||||
|
||||
std::string pathToString(IDirectory::Path const& path) {
|
||||
std::string str;
|
||||
str += "[";
|
||||
for(int i = 0; i < path.size(); ++i) {
|
||||
str += path[i].toString();
|
||||
if(i < path.size() - 1) {
|
||||
str += ", ";
|
||||
}
|
||||
}
|
||||
|
||||
return str + "]";
|
||||
}
|
||||
|
||||
IDirectory::Path combinePaths(IDirectory::Path const& path1, IDirectory::Path const& path2) {
|
||||
IDirectory::Path outPath(path1.begin(), path1.end());
|
||||
for(auto p : path2) {
|
||||
outPath.push_back(p);
|
||||
}
|
||||
|
||||
return outPath;
|
||||
}
|
||||
|
||||
void logOp(std::string message, bool force=false) {
|
||||
if(LOG_OPS || force) {
|
||||
printf("%s\n", message.c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
//DIRECTORY_CREATE_SUBSPACE
|
||||
struct DirectoryCreateSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
state Tuple path = wait(popTuple(data));
|
||||
Tuple rawPrefix = wait(data->stack.waitAndPop());
|
||||
|
||||
logOp(format("Created subspace at %s: %s", tupleToString(path).c_str(), printable(rawPrefix.getString(0)).c_str()));
|
||||
data->directoryData.push(new Subspace(path, rawPrefix.getString(0)));
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryCreateSubspaceFunc::name = "DIRECTORY_CREATE_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateSubspaceFunc);
|
||||
|
||||
//DIRECTORY_CREATE_LAYER
|
||||
struct DirectoryCreateLayerFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
std::vector<Tuple> args = wait(data->stack.waitAndPop(3));
|
||||
|
||||
int index1 = args[0].getInt(0);
|
||||
int index2 = args[1].getInt(0);
|
||||
bool allowManualPrefixes = args[2].getInt(0) != 0;
|
||||
|
||||
if(!data->directoryData.directoryList[index1].valid() || !data->directoryData.directoryList[index2].valid()) {
|
||||
logOp("Create directory layer: None");
|
||||
data->directoryData.push();
|
||||
}
|
||||
else {
|
||||
Subspace* nodeSubspace = data->directoryData.directoryList[index1].subspace.get();
|
||||
Subspace* contentSubspace = data->directoryData.directoryList[index2].subspace.get();
|
||||
logOp(format("Create directory layer: node_subspace (%d) = %s, content_subspace (%d) = %s, allow_manual_prefixes = %d", index1, printable(nodeSubspace->key()).c_str(), index2, printable(nodeSubspace->key()).c_str(), allowManualPrefixes));
|
||||
data->directoryData.push(Reference<IDirectory>(new DirectoryLayer(*nodeSubspace, *contentSubspace, allowManualPrefixes)));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryCreateLayerFunc::name = "DIRECTORY_CREATE_LAYER";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateLayerFunc);
|
||||
|
||||
//DIRECTORY_CHANGE
|
||||
struct DirectoryChangeFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple index = wait(data->stack.waitAndPop());
|
||||
data->directoryData.directoryListIndex = index.getInt(0);
|
||||
ASSERT(data->directoryData.directoryListIndex < data->directoryData.directoryList.size());
|
||||
|
||||
if(!data->directoryData.directoryList[data->directoryData.directoryListIndex].valid()) {
|
||||
data->directoryData.directoryListIndex = data->directoryData.directoryErrorIndex;
|
||||
}
|
||||
|
||||
if(LOG_DIRS) {
|
||||
DirectoryOrSubspace d = data->directoryData.directoryList[data->directoryData.directoryListIndex];
|
||||
printf("Changed directory to %d (%s @\'%s\')\n", data->directoryData.directoryListIndex, d.typeString().c_str(), d.directory.present() ? pathToString(d.directory.get()->getPath()).c_str() : printable(d.subspace.get()->key()).c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryChangeFunc::name = "DIRECTORY_CHANGE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryChangeFunc);
|
||||
|
||||
//DIRECTORY_SET_ERROR_INDEX
|
||||
struct DirectorySetErrorIndexFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple index = wait(data->stack.waitAndPop());
|
||||
data->directoryData.directoryErrorIndex = index.getInt(0);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectorySetErrorIndexFunc::name = "DIRECTORY_SET_ERROR_INDEX";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectorySetErrorIndexFunc);
|
||||
|
||||
//DIRECTORY_CREATE_OR_OPEN
|
||||
struct DirectoryCreateOrOpenFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
state IDirectory::Path path = wait(popPath(data));
|
||||
Tuple layerTuple = wait(data->stack.waitAndPop());
|
||||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create_or_open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer] () {
|
||||
return directory->createOrOpen(instruction->tr, path, layer);
|
||||
}));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryCreateOrOpenFunc::name = "DIRECTORY_CREATE_OR_OPEN";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateOrOpenFunc);
|
||||
|
||||
//DIRECTORY_CREATE
|
||||
struct DirectoryCreateFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
state IDirectory::Path path = wait(popPath(data));
|
||||
std::vector<Tuple> args = wait(data->stack.waitAndPop(2));
|
||||
Standalone<StringRef> layer = args[0].getType(0) == Tuple::NULL_TYPE ? StringRef() : args[0].getString(0);
|
||||
Optional<Standalone<StringRef>> prefix = args[1].getType(0) == Tuple::NULL_TYPE ? Optional<Standalone<StringRef>>() : args[1].getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create %s: layer=%s, prefix=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str(), prefix.present() ? printable(prefix.get()).c_str() : "<not present>"));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer, prefix] () {
|
||||
return directory->create(instruction->tr, path, layer, prefix);
|
||||
}));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryCreateFunc::name = "DIRECTORY_CREATE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateFunc);
|
||||
|
||||
//DIRECTORY_OPEN
|
||||
struct DirectoryOpenFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
state IDirectory::Path path = wait(popPath(data));
|
||||
Tuple layerTuple = wait(data->stack.waitAndPop());
|
||||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str()));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(directory->open(instruction->tr, path, layer));
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryOpenFunc::name = "DIRECTORY_OPEN";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryOpenFunc);
|
||||
|
||||
//DIRECTORY_MOVE
|
||||
struct DirectoryMoveFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
std::vector<IDirectory::Path> paths = wait(popPaths(data, 2));
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("move %s to %s", pathToString(combinePaths(directory->getPath(), paths[0])).c_str(), pathToString(combinePaths(directory->getPath(), paths[1])).c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, paths] () {
|
||||
return directory->move(instruction->tr, paths[0], paths[1]);
|
||||
}));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryMoveFunc::name = "DIRECTORY_MOVE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryMoveFunc);
|
||||
|
||||
//DIRECTORY_MOVE_TO
|
||||
struct DirectoryMoveToFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("move %s to %s", pathToString(directory->getPath()).c_str(), pathToString(path).c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, path] () {
|
||||
return directory->moveTo(instruction->tr, path);
|
||||
}));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryMoveToFunc::name = "DIRECTORY_MOVE_TO";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryMoveToFunc);
|
||||
|
||||
//DIRECTORY_REMOVE
|
||||
struct DirectoryRemoveFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
if(count.getInt(0) == 0) {
|
||||
logOp(format("remove %s", pathToString(directory->getPath()).c_str()));
|
||||
|
||||
Void _ = wait(executeMutation(instruction, [this] () {
|
||||
return directory->remove(instruction->tr);
|
||||
}));
|
||||
}
|
||||
else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("remove %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
|
||||
Void _ = wait(executeMutation(instruction, [this, path] () {
|
||||
return directory->remove(instruction->tr, path);
|
||||
}));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryRemoveFunc::name = "DIRECTORY_REMOVE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRemoveFunc);
|
||||
|
||||
//DIRECTORY_REMOVE_IF_EXISTS
|
||||
struct DirectoryRemoveIfExistsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
if(count.getInt(0) == 0) {
|
||||
logOp(format("remove_if_exists %s", pathToString(directory->getPath()).c_str()));
|
||||
|
||||
bool _ = wait(executeMutation(instruction, [this] () {
|
||||
return directory->removeIfExists(instruction->tr);
|
||||
}));
|
||||
}
|
||||
else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("remove_if_exists %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
|
||||
bool _ = wait(executeMutation(instruction, [this, path] () {
|
||||
return directory->removeIfExists(instruction->tr, path);
|
||||
}));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryRemoveIfExistsFunc::name = "DIRECTORY_REMOVE_IF_EXISTS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRemoveIfExistsFunc);
|
||||
|
||||
//DIRECTORY_LIST
|
||||
struct DirectoryListFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
state Standalone<VectorRef<StringRef>> subdirs;
|
||||
if(count.getInt(0) == 0) {
|
||||
logOp(format("list %s", pathToString(directory->getPath()).c_str()));
|
||||
Standalone<VectorRef<StringRef>> _subdirs = wait(directory->list(instruction->tr));
|
||||
subdirs = _subdirs;
|
||||
}
|
||||
else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("list %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
Standalone<VectorRef<StringRef>> _subdirs = wait(directory->list(instruction->tr, path));
|
||||
subdirs = _subdirs;
|
||||
}
|
||||
|
||||
Tuple subdirTuple;
|
||||
for(auto &sd : subdirs) {
|
||||
subdirTuple.append(sd, true);
|
||||
}
|
||||
|
||||
data->stack.pushTuple(subdirTuple.pack());
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryListFunc::name = "DIRECTORY_LIST";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryListFunc);
|
||||
|
||||
//DIRECTORY_EXISTS
|
||||
struct DirectoryExistsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
state bool result;
|
||||
if(count.getInt(0) == 0) {
|
||||
bool _result = wait(directory->exists(instruction->tr));
|
||||
result = _result;
|
||||
logOp(format("exists %s: %d", pathToString(directory->getPath()).c_str(), result));
|
||||
}
|
||||
else {
|
||||
state IDirectory::Path path = wait(popPath(data));
|
||||
bool _result = wait(directory->exists(instruction->tr, path));
|
||||
result = _result;
|
||||
logOp(format("exists %s: %d", pathToString(combinePaths(directory->getPath(), path)).c_str(), result));
|
||||
}
|
||||
|
||||
data->stack.push(Tuple().append(result ? 1 : 0).pack());
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryExistsFunc::name = "DIRECTORY_EXISTS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryExistsFunc);
|
||||
|
||||
//DIRECTORY_PACK_KEY
|
||||
struct DirectoryPackKeyFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple tuple = wait(popTuple(data));
|
||||
data->stack.pushTuple(data->directoryData.subspace()->pack(tuple));
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryPackKeyFunc::name = "DIRECTORY_PACK_KEY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryPackKeyFunc);
|
||||
|
||||
//DIRECTORY_UNPACK_KEY
|
||||
struct DirectoryUnpackKeyFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple key = wait(data->stack.waitAndPop());
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
logOp(format("Unpack %s in subspace with prefix %s", printable(key.getString(0)).c_str(), printable(subspace->key()).c_str()));
|
||||
Tuple tuple = subspace->unpack(key.getString(0));
|
||||
for(int i = 0; i < tuple.size(); ++i) {
|
||||
data->stack.push(tuple.subTuple(i, i+1).pack());
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryUnpackKeyFunc::name = "DIRECTORY_UNPACK_KEY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryUnpackKeyFunc);
|
||||
|
||||
//DIRECTORY_RANGE
|
||||
struct DirectoryRangeFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple tuple = wait(popTuple(data));
|
||||
KeyRange range = data->directoryData.subspace()->range(tuple);
|
||||
data->stack.pushTuple(range.begin);
|
||||
data->stack.pushTuple(range.end);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryRangeFunc::name = "DIRECTORY_RANGE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRangeFunc);
|
||||
|
||||
//DIRECTORY_CONTAINS
|
||||
struct DirectoryContainsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple key = wait(data->stack.waitAndPop());
|
||||
bool result = data->directoryData.subspace()->contains(key.getString(0));
|
||||
data->stack.push(Tuple().append(result ? 1 : 0).pack());
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryContainsFunc::name = "DIRECTORY_CONTAINS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryContainsFunc);
|
||||
|
||||
//DIRECTORY_OPEN_SUBSPACE
|
||||
struct DirectoryOpenSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple tuple = wait(popTuple(data));
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
logOp(format("open_subspace %s (at %s)", tupleToString(tuple).c_str(), printable(subspace->key()).c_str()));
|
||||
Subspace *child = new Subspace(subspace->subspace(tuple));
|
||||
data->directoryData.push(child);
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryOpenSubspaceFunc::name = "DIRECTORY_OPEN_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryOpenSubspaceFunc);
|
||||
|
||||
//DIRECTORY_LOG_SUBSPACE
|
||||
struct DirectoryLogSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple prefix = wait(data->stack.waitAndPop());
|
||||
Tuple tuple;
|
||||
tuple.append(data->directoryData.directoryListIndex);
|
||||
instruction->tr->set(Subspace(tuple, prefix.getString(0)).key(), data->directoryData.subspace()->key());
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryLogSubspaceFunc::name = "DIRECTORY_LOG_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryLogSubspaceFunc);
|
||||
|
||||
//DIRECTORY_LOG_DIRECTORY
|
||||
struct DirectoryLogDirectoryFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
state Tuple prefix = wait(data->stack.waitAndPop());
|
||||
state bool exists = wait(directory->exists(instruction->tr));
|
||||
|
||||
state Tuple childrenTuple;
|
||||
if(exists) {
|
||||
Standalone<VectorRef<StringRef>> children = wait(directory->list(instruction->tr));
|
||||
for(auto &c : children) {
|
||||
childrenTuple.append(c, true);
|
||||
}
|
||||
}
|
||||
|
||||
Subspace logSubspace(Tuple().append(data->directoryData.directoryListIndex), prefix.getString(0));
|
||||
|
||||
Tuple pathTuple;
|
||||
for(auto &p : directory->getPath()) {
|
||||
pathTuple.append(p, true);
|
||||
}
|
||||
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("path"), true), pathTuple.pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true), Tuple().append(directory->getLayer()).pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("exists"), true), Tuple().append(exists ? 1 : 0).pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("children"), true), childrenTuple.pack());
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryLogDirectoryFunc::name = "DIRECTORY_LOG_DIRECTORY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryLogDirectoryFunc);
|
||||
|
||||
//DIRECTORY_STRIP_PREFIX
|
||||
struct DirectoryStripPrefixFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple str = wait(data->stack.waitAndPop());
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
ASSERT(str.getString(0).startsWith(subspace->key()));
|
||||
data->stack.pushTuple(str.getString(0).substr(subspace->key().size()));
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
const char* DirectoryStripPrefixFunc::name = "DIRECTORY_STRIP_PREFIX";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryStripPrefixFunc);
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Tester.actor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source version.
|
||||
#if defined(NO_INTELLISENSE) && !defined(FDB_FLOW_TESTER_TESTER_ACTOR_G_H)
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_G_H
|
||||
#include "Tester.actor.g.h"
|
||||
#elif !defined(FDB_FLOW_TESTER_TESTER_ACTOR_H)
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "flow/IDispatched.h"
|
||||
#include "bindings/flow/fdb_flow.h"
|
||||
#include "bindings/flow/IDirectory.h"
|
||||
#include "bindings/flow/Subspace.h"
|
||||
#include "bindings/flow/DirectoryLayer.h"
|
||||
|
||||
#define LOG_ALL 0
|
||||
#define LOG_INSTRUCTIONS LOG_ALL || 0
|
||||
#define LOG_OPS LOG_ALL || 0
|
||||
#define LOG_DIRS LOG_ALL || 0
|
||||
#define LOG_ERRORS LOG_ALL || 0
|
||||
|
||||
struct FlowTesterData;
|
||||
|
||||
struct StackItem {
|
||||
StackItem() : index(-1) {}
|
||||
StackItem(uint32_t i, Future<Standalone<StringRef>> v) : index(i), value(v) {}
|
||||
StackItem(uint32_t i, Standalone<StringRef> v) : index(i), value(v) {}
|
||||
uint32_t index;
|
||||
Future<Standalone<StringRef>> value;
|
||||
};
|
||||
|
||||
struct FlowTesterStack {
|
||||
uint32_t index;
|
||||
std::vector<StackItem> data;
|
||||
|
||||
void push(Future<Standalone<StringRef>> value) {
|
||||
data.push_back(StackItem(index, value));
|
||||
}
|
||||
|
||||
void push(Standalone<StringRef> value) {
|
||||
push(Future<Standalone<StringRef>>(value));
|
||||
}
|
||||
|
||||
void push(const StackItem& item) {
|
||||
data.push_back(item);
|
||||
}
|
||||
|
||||
void pushTuple(StringRef value, bool utf8=false) {
|
||||
FDB::Tuple t;
|
||||
t.append(value, utf8);
|
||||
data.push_back(StackItem(index, t.pack()));
|
||||
}
|
||||
|
||||
void pushError(int errorCode) {
|
||||
FDB::Tuple t;
|
||||
t.append(LiteralStringRef("ERROR"));
|
||||
t.append(format("%d", errorCode));
|
||||
// pack above as error string into another tuple
|
||||
pushTuple(t.pack().toString());
|
||||
}
|
||||
|
||||
std::vector<StackItem> pop(uint32_t count = 1) {
|
||||
std::vector<StackItem> items;
|
||||
while (!data.empty() && count > 0) {
|
||||
items.push_back(data.back());
|
||||
data.pop_back();
|
||||
count--;
|
||||
}
|
||||
return items;
|
||||
}
|
||||
|
||||
Future<std::vector<FDB::Tuple>> waitAndPop(int count);
|
||||
Future<FDB::Tuple> waitAndPop();
|
||||
|
||||
void dup() {
|
||||
if (data.empty())
|
||||
return;
|
||||
data.push_back(data.back());
|
||||
}
|
||||
|
||||
void clear() {
|
||||
data.clear();
|
||||
}
|
||||
};
|
||||
|
||||
struct InstructionData : public ReferenceCounted<InstructionData> {
|
||||
bool isDatabase;
|
||||
bool isSnapshot;
|
||||
StringRef instruction;
|
||||
Reference<FDB::Transaction> tr;
|
||||
|
||||
InstructionData(bool _isDatabase, bool _isSnapshot, StringRef _instruction, Reference<FDB::Transaction> _tr)
|
||||
: isDatabase(_isDatabase)
|
||||
, isSnapshot(_isSnapshot)
|
||||
, instruction(_instruction)
|
||||
, tr(_tr) {}
|
||||
};
|
||||
|
||||
struct FlowTesterData;
|
||||
|
||||
struct InstructionFunc : IDispatched<InstructionFunc, std::string, std::function<Future<Void>(Reference<FlowTesterData> data, Reference<InstructionData> instruction)>> {
|
||||
static Future<Void> call(std::string op, Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
ASSERT(data);
|
||||
ASSERT(instruction);
|
||||
|
||||
auto it = dispatches().find(op);
|
||||
if(it == dispatches().end()) {
|
||||
fprintf(stderr, "Unrecognized instruction: %s\n", op.c_str());
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
return dispatch(op)(data, instruction);
|
||||
}
|
||||
};
|
||||
#define REGISTER_INSTRUCTION_FUNC(Op) REGISTER_COMMAND(InstructionFunc, Op, name, call)
|
||||
|
||||
struct DirectoryOrSubspace {
|
||||
Optional<Reference<FDB::IDirectory>> directory;
|
||||
Optional<FDB::Subspace*> subspace;
|
||||
|
||||
DirectoryOrSubspace() {}
|
||||
DirectoryOrSubspace(Reference<FDB::IDirectory> directory) : directory(directory) {}
|
||||
DirectoryOrSubspace(FDB::Subspace *subspace) : subspace(subspace) {}
|
||||
DirectoryOrSubspace(Reference<FDB::DirectorySubspace> dirSubspace) : directory(dirSubspace), subspace(dirSubspace.getPtr()) {}
|
||||
|
||||
bool valid() {
|
||||
return directory.present() || subspace.present();
|
||||
}
|
||||
|
||||
std::string typeString() {
|
||||
if(directory.present() && subspace.present()) {
|
||||
return "DirectorySubspace";
|
||||
}
|
||||
else if(directory.present()) {
|
||||
return "IDirectory";
|
||||
}
|
||||
else if(subspace.present()) {
|
||||
return "Subspace";
|
||||
}
|
||||
else {
|
||||
return "InvalidDirectory";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct DirectoryTesterData {
|
||||
std::vector<DirectoryOrSubspace> directoryList;
|
||||
int directoryListIndex;
|
||||
int directoryErrorIndex;
|
||||
|
||||
Reference<FDB::IDirectory> directory() {
|
||||
ASSERT(directoryListIndex < directoryList.size());
|
||||
ASSERT(directoryList[directoryListIndex].directory.present());
|
||||
return directoryList[directoryListIndex].directory.get();
|
||||
}
|
||||
|
||||
FDB::Subspace* subspace() {
|
||||
ASSERT(directoryListIndex < directoryList.size());
|
||||
ASSERT(directoryList[directoryListIndex].subspace.present());
|
||||
return directoryList[directoryListIndex].subspace.get();
|
||||
}
|
||||
|
||||
DirectoryTesterData() : directoryListIndex(0), directoryErrorIndex(0) {
|
||||
directoryList.push_back(Reference<FDB::IDirectory>(new FDB::DirectoryLayer()));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void push(T item) {
|
||||
directoryList.push_back(DirectoryOrSubspace(item));
|
||||
if(LOG_DIRS) {
|
||||
printf("Pushed %s at %lu\n", directoryList.back().typeString().c_str(), directoryList.size()-1);
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
void push() { push(DirectoryOrSubspace()); }
|
||||
};
|
||||
|
||||
struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
|
||||
FDB::API *api;
|
||||
Reference<FDB::DatabaseContext> db;
|
||||
Standalone<FDB::RangeResultRef> instructions;
|
||||
Standalone<StringRef> trName;
|
||||
FlowTesterStack stack;
|
||||
FDB::Version lastVersion;
|
||||
DirectoryTesterData directoryData;
|
||||
|
||||
std::vector<Future<Void>> subThreads;
|
||||
|
||||
Future<Void> processInstruction(Reference<InstructionData> instruction) {
|
||||
return InstructionFunc::call(instruction->instruction.toString(), Reference<FlowTesterData>::addRef(this), instruction);
|
||||
}
|
||||
|
||||
FlowTesterData(FDB::API *api) {
|
||||
this->api = api;
|
||||
}
|
||||
};
|
||||
|
||||
std::string tupleToString(FDB::Tuple const& tuple);
|
||||
|
||||
ACTOR template <class F>
|
||||
Future<decltype(fake<F>()().getValue())> executeMutation(Reference<InstructionData> instruction, F func) {
|
||||
loop {
|
||||
try {
|
||||
state decltype(fake<F>()().getValue()) result = wait(func());
|
||||
if(instruction->isDatabase) {
|
||||
Void _ = wait(instruction->tr->commit());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
catch(Error &e) {
|
||||
if(instruction->isDatabase) {
|
||||
Void _ = wait(instruction->tr->onError(e));
|
||||
}
|
||||
else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,128 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
<PreReleaseDecoration>
|
||||
</PreReleaseDecoration>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|X64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|X64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="Tester.actor.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="DirectoryTester.actor.cpp" />
|
||||
<ActorCompiler Include="Tester.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{086EB89C-CDBD-4ABE-8296-5CA224244C80}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>fdb_flow_tester</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_52_0</IncludePath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
|
||||
</Link>
|
||||
<PreBuildEvent>
|
||||
<Command>
|
||||
</Command>
|
||||
</PreBuildEvent>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>Full</Optimization>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>false</EnableCOMDATFolding>
|
||||
<OptimizeReferences>false</OptimizeReferences>
|
||||
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
|
||||
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
|
||||
</Link>
|
||||
<PreBuildEvent>
|
||||
<Command>
|
||||
</Command>
|
||||
</PreBuildEvent>
|
||||
</ItemDefinitionGroup>
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
<Import Project="..\..\..\flow\actorcompiler\ActorCompiler.targets" />
|
||||
</ImportGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
</Project>
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="DirectoryTester.actor.cpp" />
|
||||
<ActorCompiler Include="Tester.actor.cpp" />
|
||||
<ActorCompiler Include="Tester.actor.h" />
|
||||
</ItemGroup>
|
||||
</Project>
|
|
@ -0,0 +1,42 @@
|
|||
#
|
||||
# local.mk
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# -*- mode: makefile; -*-
|
||||
|
||||
fdb_flow_tester_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
|
||||
fdb_flow_tester_LDFLAGS := -Llib $(fdbrpc_LDFLAGS) -lfdb_c
|
||||
fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libfdbrpc.a lib/libflow.a lib/libfdb_c.$(DLEXT)
|
||||
|
||||
fdb_flow_tester: lib/libfdb_c.$(DLEXT)
|
||||
@mkdir -p bindings/flow/bin
|
||||
@rm -f bindings/flow/bin/fdb_flow_tester
|
||||
@cp bin/fdb_flow_tester bindings/flow/bin/fdb_flow_tester
|
||||
|
||||
fdb_flow_tester_clean: _fdb_flow_tester_clean
|
||||
|
||||
_fdb_flow_tester_clean:
|
||||
@rm -rf bindings/flow/bin
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_flow_tester_LIBS += -ldl -lpthread -lrt
|
||||
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
fdb_flow_tester_LDFLAGS += -lc++
|
||||
endif
|
|
@ -0,0 +1,21 @@
|
|||
fdb-go
|
||||
======
|
||||
|
||||
[Go language](http://golang.org) bindings for [FoundationDB](http://foundationdb.org/documentation/), a distributed key-value store with ACID transactions.
|
||||
|
||||
This package requires:
|
||||
|
||||
- Go 1.1+ with CGO enabled
|
||||
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://files.foundationdb.org/fdb-c/))
|
||||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-500.
|
||||
|
||||
To install this package, in the top level of this repository run:
|
||||
|
||||
make fdb_go
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
* [API documentation](https://foundationdb.org/documentation/godoc/fdb.html)
|
||||
* [Tutorial](https://foundationdb.org/documentation/class-scheduling-go.html)
|
|
@ -0,0 +1,567 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/* A little code to ease navigation of these documents.
|
||||
*
|
||||
* On window load we:
|
||||
* + Bind search box hint placeholder show/hide events (bindSearchEvents)
|
||||
* + Generate a table of contents (generateTOC)
|
||||
* + Bind foldable sections (bindToggles)
|
||||
* + Bind links to foldable sections (bindToggleLinks)
|
||||
*/
|
||||
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
// Mobile-friendly topbar menu
|
||||
$(function() {
|
||||
var menu = $('#menu');
|
||||
var menuButton = $('#menu-button');
|
||||
var menuButtonArrow = $('#menu-button-arrow');
|
||||
menuButton.click(function(event) {
|
||||
menu.toggleClass('menu-visible');
|
||||
menuButtonArrow.toggleClass('vertical-flip');
|
||||
event.preventDefault();
|
||||
return false;
|
||||
});
|
||||
});
|
||||
|
||||
function bindSearchEvents() {
|
||||
|
||||
var search = $('#search');
|
||||
if (search.length === 0) {
|
||||
return; // no search box
|
||||
}
|
||||
|
||||
function clearInactive() {
|
||||
if (search.is('.inactive')) {
|
||||
search.val('');
|
||||
search.removeClass('inactive');
|
||||
}
|
||||
}
|
||||
|
||||
function restoreInactive() {
|
||||
if (search.val() !== '') {
|
||||
return;
|
||||
}
|
||||
search.val(search.attr('placeholder'));
|
||||
search.addClass('inactive');
|
||||
}
|
||||
|
||||
search.on('focus', clearInactive);
|
||||
search.on('blur', restoreInactive);
|
||||
|
||||
restoreInactive();
|
||||
}
|
||||
|
||||
/* Generates a table of contents: looks for h2 and h3 elements and generates
|
||||
* links. "Decorates" the element with id=="nav" with this table of contents.
|
||||
*/
|
||||
function generateTOC() {
|
||||
if ($('#manual-nav').length > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var nav = $('#nav');
|
||||
if (nav.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var toc_items = [];
|
||||
$(nav).nextAll('h2, h3').each(function() {
|
||||
var node = this;
|
||||
if (node.id == '')
|
||||
node.id = 'tmp_' + toc_items.length;
|
||||
var link = $('<a/>').attr('href', '#' + node.id).text($(node).text());
|
||||
var item;
|
||||
if ($(node).is('h2')) {
|
||||
item = $('<dt/>');
|
||||
} else { // h3
|
||||
item = $('<dd class="indent"/>');
|
||||
}
|
||||
item.append(link);
|
||||
toc_items.push(item);
|
||||
});
|
||||
if (toc_items.length <= 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
var dl1 = $('<dl/>');
|
||||
var dl2 = $('<dl/>');
|
||||
|
||||
var split_index = (toc_items.length / 2) + 1;
|
||||
if (split_index < 8) {
|
||||
split_index = toc_items.length;
|
||||
}
|
||||
for (var i = 0; i < split_index; i++) {
|
||||
dl1.append(toc_items[i]);
|
||||
}
|
||||
for (/* keep using i */; i < toc_items.length; i++) {
|
||||
dl2.append(toc_items[i]);
|
||||
}
|
||||
|
||||
var tocTable = $('<table class="unruled"/>').appendTo(nav);
|
||||
var tocBody = $('<tbody/>').appendTo(tocTable);
|
||||
var tocRow = $('<tr/>').appendTo(tocBody);
|
||||
|
||||
// 1st column
|
||||
$('<td class="first"/>').appendTo(tocRow).append(dl1);
|
||||
// 2nd column
|
||||
$('<td/>').appendTo(tocRow).append(dl2);
|
||||
}
|
||||
|
||||
function bindToggle(el) {
|
||||
$('.toggleButton', el).click(function() {
|
||||
if ($(el).is('.toggle')) {
|
||||
$(el).addClass('toggleVisible').removeClass('toggle');
|
||||
} else {
|
||||
$(el).addClass('toggle').removeClass('toggleVisible');
|
||||
}
|
||||
});
|
||||
}
|
||||
function bindToggles(selector) {
|
||||
$(selector).each(function(i, el) {
|
||||
bindToggle(el);
|
||||
});
|
||||
}
|
||||
|
||||
function bindToggleLink(el, prefix) {
|
||||
$(el).click(function() {
|
||||
var href = $(el).attr('href');
|
||||
var i = href.indexOf('#'+prefix);
|
||||
if (i < 0) {
|
||||
return;
|
||||
}
|
||||
var id = '#' + prefix + href.slice(i+1+prefix.length);
|
||||
if ($(id).is('.toggle')) {
|
||||
$(id).find('.toggleButton').first().click();
|
||||
}
|
||||
});
|
||||
}
|
||||
function bindToggleLinks(selector, prefix) {
|
||||
$(selector).each(function(i, el) {
|
||||
bindToggleLink(el, prefix);
|
||||
});
|
||||
}
|
||||
|
||||
function setupDropdownPlayground() {
|
||||
if (!$('#page').is('.wide')) {
|
||||
return; // don't show on front page
|
||||
}
|
||||
var button = $('#playgroundButton');
|
||||
var div = $('#playground');
|
||||
var setup = false;
|
||||
button.toggle(function() {
|
||||
button.addClass('active');
|
||||
div.show();
|
||||
if (setup) {
|
||||
return;
|
||||
}
|
||||
setup = true;
|
||||
playground({
|
||||
'codeEl': $('.code', div),
|
||||
'outputEl': $('.output', div),
|
||||
'runEl': $('.run', div),
|
||||
'fmtEl': $('.fmt', div),
|
||||
'shareEl': $('.share', div),
|
||||
'shareRedirect': '//play.golang.org/p/'
|
||||
});
|
||||
},
|
||||
function() {
|
||||
button.removeClass('active');
|
||||
div.hide();
|
||||
});
|
||||
button.show();
|
||||
$('#menu').css('min-width', '+=60');
|
||||
}
|
||||
|
||||
function setupInlinePlayground() {
|
||||
'use strict';
|
||||
// Set up playground when each element is toggled.
|
||||
$('div.play').each(function (i, el) {
|
||||
// Set up playground for this example.
|
||||
var setup = function() {
|
||||
var code = $('.code', el);
|
||||
playground({
|
||||
'codeEl': code,
|
||||
'outputEl': $('.output', el),
|
||||
'runEl': $('.run', el),
|
||||
'fmtEl': $('.fmt', el),
|
||||
'shareEl': $('.share', el),
|
||||
'shareRedirect': '//play.golang.org/p/'
|
||||
});
|
||||
|
||||
// Make the code textarea resize to fit content.
|
||||
var resize = function() {
|
||||
code.height(0);
|
||||
var h = code[0].scrollHeight;
|
||||
code.height(h+20); // minimize bouncing.
|
||||
code.closest('.input').height(h);
|
||||
};
|
||||
code.on('keydown', resize);
|
||||
code.on('keyup', resize);
|
||||
code.keyup(); // resize now.
|
||||
};
|
||||
|
||||
// If example already visible, set up playground now.
|
||||
if ($(el).is(':visible')) {
|
||||
setup();
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, set up playground when example is expanded.
|
||||
var built = false;
|
||||
$(el).closest('.toggle').click(function() {
|
||||
// Only set up once.
|
||||
if (!built) {
|
||||
setup();
|
||||
built = true;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// fixFocus tries to put focus to div#page so that keyboard navigation works.
|
||||
function fixFocus() {
|
||||
var page = $('div#page');
|
||||
var topbar = $('div#topbar');
|
||||
page.css('outline', 0); // disable outline when focused
|
||||
page.attr('tabindex', -1); // and set tabindex so that it is focusable
|
||||
$(window).resize(function (evt) {
|
||||
// only focus page when the topbar is at fixed position (that is, it's in
|
||||
// front of page, and keyboard event will go to the former by default.)
|
||||
// by focusing page, keyboard event will go to page so that up/down arrow,
|
||||
// space, etc. will work as expected.
|
||||
if (topbar.css('position') == "fixed")
|
||||
page.focus();
|
||||
}).resize();
|
||||
}
|
||||
|
||||
function toggleHash() {
|
||||
var hash = $(window.location.hash);
|
||||
if (hash.is('.toggle')) {
|
||||
hash.find('.toggleButton').first().click();
|
||||
}
|
||||
}
|
||||
|
||||
function personalizeInstallInstructions() {
|
||||
var prefix = '?download=';
|
||||
var s = window.location.search;
|
||||
if (s.indexOf(prefix) != 0) {
|
||||
// No 'download' query string; bail.
|
||||
return;
|
||||
}
|
||||
|
||||
var filename = s.substr(prefix.length);
|
||||
var filenameRE = /^go1\.\d+(\.\d+)?([a-z0-9]+)?\.([a-z0-9]+)(-[a-z0-9]+)?(-osx10\.[68])?\.([a-z.]+)$/;
|
||||
$('.downloadFilename').text(filename);
|
||||
$('.hideFromDownload').hide();
|
||||
var m = filenameRE.exec(filename);
|
||||
if (!m) {
|
||||
// Can't interpret file name; bail.
|
||||
return;
|
||||
}
|
||||
|
||||
var os = m[3];
|
||||
var ext = m[6];
|
||||
if (ext != 'tar.gz') {
|
||||
$('#tarballInstructions').hide();
|
||||
}
|
||||
if (os != 'darwin' || ext != 'pkg') {
|
||||
$('#darwinPackageInstructions').hide();
|
||||
}
|
||||
if (os != 'windows') {
|
||||
$('#windowsInstructions').hide();
|
||||
} else {
|
||||
if (ext != 'msi') {
|
||||
$('#windowsInstallerInstructions').hide();
|
||||
}
|
||||
if (ext != 'zip') {
|
||||
$('#windowsZipInstructions').hide();
|
||||
}
|
||||
}
|
||||
|
||||
var download = "https://storage.googleapis.com/golang/" + filename;
|
||||
|
||||
var message = $('<p class="downloading">'+
|
||||
'Your download should begin shortly. '+
|
||||
'If it does not, click <a>this link</a>.</p>');
|
||||
message.find('a').attr('href', download);
|
||||
message.insertAfter('#nav');
|
||||
|
||||
window.location = download;
|
||||
}
|
||||
|
||||
$(document).ready(function() {
|
||||
bindSearchEvents();
|
||||
generateTOC();
|
||||
bindToggles(".toggle");
|
||||
bindToggles(".toggleVisible");
|
||||
bindToggleLinks(".exampleLink", "example_");
|
||||
bindToggleLinks(".overviewLink", "");
|
||||
bindToggleLinks(".examplesLink", "");
|
||||
bindToggleLinks(".indexLink", "");
|
||||
setupDropdownPlayground();
|
||||
setupInlinePlayground();
|
||||
fixFocus();
|
||||
setupTypeInfo();
|
||||
setupCallgraphs();
|
||||
toggleHash();
|
||||
personalizeInstallInstructions();
|
||||
|
||||
// godoc.html defines window.initFuncs in the <head> tag, and root.html and
|
||||
// codewalk.js push their on-page-ready functions to the list.
|
||||
// We execute those functions here, to avoid loading jQuery until the page
|
||||
// content is loaded.
|
||||
for (var i = 0; i < window.initFuncs.length; i++) window.initFuncs[i]();
|
||||
});
|
||||
|
||||
// -- analysis ---------------------------------------------------------
|
||||
|
||||
// escapeHTML returns HTML for s, with metacharacters quoted.
|
||||
// It is safe for use in both elements and attributes
|
||||
// (unlike the "set innerText, read innerHTML" trick).
|
||||
function escapeHTML(s) {
|
||||
return s.replace(/&/g, '&').
|
||||
replace(/\"/g, '"').
|
||||
replace(/\'/g, ''').
|
||||
replace(/</g, '<').
|
||||
replace(/>/g, '>');
|
||||
}
|
||||
|
||||
// makeAnchor returns HTML for an <a> element, given an anchorJSON object.
|
||||
function makeAnchor(json) {
|
||||
var html = escapeHTML(json.Text);
|
||||
if (json.Href != "") {
|
||||
html = "<a href='" + escapeHTML(json.Href) + "'>" + html + "</a>";
|
||||
}
|
||||
return html;
|
||||
}
|
||||
|
||||
function showLowFrame(html) {
|
||||
var lowframe = document.getElementById('lowframe');
|
||||
lowframe.style.height = "200px";
|
||||
lowframe.innerHTML = "<p style='text-align: left;'>" + html + "</p>\n" +
|
||||
"<div onclick='hideLowFrame()' style='position: absolute; top: 0; right: 0; cursor: pointer;'>✘</div>"
|
||||
};
|
||||
|
||||
document.hideLowFrame = function() {
|
||||
var lowframe = document.getElementById('lowframe');
|
||||
lowframe.style.height = "0px";
|
||||
}
|
||||
|
||||
// onClickCallers is the onclick action for the 'func' tokens of a
|
||||
// function declaration.
|
||||
document.onClickCallers = function(index) {
|
||||
var data = document.ANALYSIS_DATA[index]
|
||||
if (data.Callers.length == 1 && data.Callers[0].Sites.length == 1) {
|
||||
document.location = data.Callers[0].Sites[0].Href; // jump to sole caller
|
||||
return;
|
||||
}
|
||||
|
||||
var html = "Callers of <code>" + escapeHTML(data.Callee) + "</code>:<br/>\n";
|
||||
for (var i = 0; i < data.Callers.length; i++) {
|
||||
var caller = data.Callers[i];
|
||||
html += "<code>" + escapeHTML(caller.Func) + "</code>";
|
||||
var sites = caller.Sites;
|
||||
if (sites != null && sites.length > 0) {
|
||||
html += " at line ";
|
||||
for (var j = 0; j < sites.length; j++) {
|
||||
if (j > 0) {
|
||||
html += ", ";
|
||||
}
|
||||
html += "<code>" + makeAnchor(sites[j]) + "</code>";
|
||||
}
|
||||
}
|
||||
html += "<br/>\n";
|
||||
}
|
||||
showLowFrame(html);
|
||||
};
|
||||
|
||||
// onClickCallees is the onclick action for the '(' token of a function call.
|
||||
document.onClickCallees = function(index) {
|
||||
var data = document.ANALYSIS_DATA[index]
|
||||
if (data.Callees.length == 1) {
|
||||
document.location = data.Callees[0].Href; // jump to sole callee
|
||||
return;
|
||||
}
|
||||
|
||||
var html = "Callees of this " + escapeHTML(data.Descr) + ":<br/>\n";
|
||||
for (var i = 0; i < data.Callees.length; i++) {
|
||||
html += "<code>" + makeAnchor(data.Callees[i]) + "</code><br/>\n";
|
||||
}
|
||||
showLowFrame(html);
|
||||
};
|
||||
|
||||
// onClickTypeInfo is the onclick action for identifiers declaring a named type.
|
||||
document.onClickTypeInfo = function(index) {
|
||||
var data = document.ANALYSIS_DATA[index];
|
||||
var html = "Type <code>" + data.Name + "</code>: " +
|
||||
" <small>(size=" + data.Size + ", align=" + data.Align + ")</small><br/>\n";
|
||||
html += implementsHTML(data);
|
||||
html += methodsetHTML(data);
|
||||
showLowFrame(html);
|
||||
};
|
||||
|
||||
// implementsHTML returns HTML for the implements relation of the
|
||||
// specified TypeInfoJSON value.
|
||||
function implementsHTML(info) {
|
||||
var html = "";
|
||||
if (info.ImplGroups != null) {
|
||||
for (var i = 0; i < info.ImplGroups.length; i++) {
|
||||
var group = info.ImplGroups[i];
|
||||
var x = "<code>" + escapeHTML(group.Descr) + "</code> ";
|
||||
for (var j = 0; j < group.Facts.length; j++) {
|
||||
var fact = group.Facts[j];
|
||||
var y = "<code>" + makeAnchor(fact.Other) + "</code>";
|
||||
if (fact.ByKind != null) {
|
||||
html += escapeHTML(fact.ByKind) + " type " + y + " implements " + x;
|
||||
} else {
|
||||
html += x + " implements " + y;
|
||||
}
|
||||
html += "<br/>\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
return html;
|
||||
}
|
||||
|
||||
|
||||
// methodsetHTML returns HTML for the methodset of the specified
|
||||
// TypeInfoJSON value.
|
||||
function methodsetHTML(info) {
|
||||
var html = "";
|
||||
if (info.Methods != null) {
|
||||
for (var i = 0; i < info.Methods.length; i++) {
|
||||
html += "<code>" + makeAnchor(info.Methods[i]) + "</code><br/>\n";
|
||||
}
|
||||
}
|
||||
return html;
|
||||
}
|
||||
|
||||
// onClickComm is the onclick action for channel "make" and "<-"
|
||||
// send/receive tokens.
|
||||
document.onClickComm = function(index) {
|
||||
var ops = document.ANALYSIS_DATA[index].Ops
|
||||
if (ops.length == 1) {
|
||||
document.location = ops[0].Op.Href; // jump to sole element
|
||||
return;
|
||||
}
|
||||
|
||||
var html = "Operations on this channel:<br/>\n";
|
||||
for (var i = 0; i < ops.length; i++) {
|
||||
html += makeAnchor(ops[i].Op) + " by <code>" + escapeHTML(ops[i].Fn) + "</code><br/>\n";
|
||||
}
|
||||
if (ops.length == 0) {
|
||||
html += "(none)<br/>\n";
|
||||
}
|
||||
showLowFrame(html);
|
||||
};
|
||||
|
||||
$(window).load(function() {
|
||||
// Scroll window so that first selection is visible.
|
||||
// (This means we don't need to emit id='L%d' spans for each line.)
|
||||
// TODO(adonovan): ideally, scroll it so that it's under the pointer,
|
||||
// but I don't know how to get the pointer y coordinate.
|
||||
var elts = document.getElementsByClassName("selection");
|
||||
if (elts.length > 0) {
|
||||
elts[0].scrollIntoView()
|
||||
}
|
||||
});
|
||||
|
||||
// setupTypeInfo populates the "Implements" and "Method set" toggle for
|
||||
// each type in the package doc.
|
||||
function setupTypeInfo() {
|
||||
for (var i in document.ANALYSIS_DATA) {
|
||||
var data = document.ANALYSIS_DATA[i];
|
||||
|
||||
var el = document.getElementById("implements-" + i);
|
||||
if (el != null) {
|
||||
// el != null => data is TypeInfoJSON.
|
||||
if (data.ImplGroups != null) {
|
||||
el.innerHTML = implementsHTML(data);
|
||||
el.parentNode.parentNode.style.display = "block";
|
||||
}
|
||||
}
|
||||
|
||||
var el = document.getElementById("methodset-" + i);
|
||||
if (el != null) {
|
||||
// el != null => data is TypeInfoJSON.
|
||||
if (data.Methods != null) {
|
||||
el.innerHTML = methodsetHTML(data);
|
||||
el.parentNode.parentNode.style.display = "block";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function setupCallgraphs() {
|
||||
if (document.CALLGRAPH == null) {
|
||||
return
|
||||
}
|
||||
document.getElementById("pkg-callgraph").style.display = "block";
|
||||
|
||||
var treeviews = document.getElementsByClassName("treeview");
|
||||
for (var i = 0; i < treeviews.length; i++) {
|
||||
var tree = treeviews[i];
|
||||
if (tree.id == null || tree.id.indexOf("callgraph-") != 0) {
|
||||
continue;
|
||||
}
|
||||
var id = tree.id.substring("callgraph-".length);
|
||||
$(tree).treeview({collapsed: true, animated: "fast"});
|
||||
document.cgAddChildren(tree, tree, [id]);
|
||||
tree.parentNode.parentNode.style.display = "block";
|
||||
}
|
||||
}
|
||||
|
||||
document.cgAddChildren = function(tree, ul, indices) {
|
||||
if (indices != null) {
|
||||
for (var i = 0; i < indices.length; i++) {
|
||||
var li = cgAddChild(tree, ul, document.CALLGRAPH[indices[i]]);
|
||||
if (i == indices.length - 1) {
|
||||
$(li).addClass("last");
|
||||
}
|
||||
}
|
||||
}
|
||||
$(tree).treeview({animated: "fast", add: ul});
|
||||
}
|
||||
|
||||
// cgAddChild adds an <li> element for document.CALLGRAPH node cgn to
|
||||
// the parent <ul> element ul. tree is the tree's root <ul> element.
|
||||
function cgAddChild(tree, ul, cgn) {
|
||||
var li = document.createElement("li");
|
||||
ul.appendChild(li);
|
||||
li.className = "closed";
|
||||
|
||||
var code = document.createElement("code");
|
||||
|
||||
if (cgn.Callees != null) {
|
||||
$(li).addClass("expandable");
|
||||
|
||||
// Event handlers and innerHTML updates don't play nicely together,
|
||||
// hence all this explicit DOM manipulation.
|
||||
var hitarea = document.createElement("div");
|
||||
hitarea.className = "hitarea expandable-hitarea";
|
||||
li.appendChild(hitarea);
|
||||
|
||||
li.appendChild(code);
|
||||
|
||||
var childUL = document.createElement("ul");
|
||||
li.appendChild(childUL);
|
||||
childUL.setAttribute('style', "display: none;");
|
||||
|
||||
var onClick = function() {
|
||||
document.cgAddChildren(tree, childUL, cgn.Callees);
|
||||
hitarea.removeEventListener('click', onClick)
|
||||
};
|
||||
hitarea.addEventListener('click', onClick);
|
||||
|
||||
} else {
|
||||
li.appendChild(code);
|
||||
}
|
||||
code.innerHTML += " " + makeAnchor(cgn.Func);
|
||||
return li
|
||||
}
|
||||
|
||||
})();
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,76 @@
|
|||
/* https://github.com/jzaefferer/jquery-treeview/blob/master/jquery.treeview.css */
|
||||
/* License: MIT. */
|
||||
.treeview, .treeview ul {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.treeview ul {
|
||||
background-color: white;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.treeview .hitarea {
|
||||
background: url(images/treeview-default.gif) -64px -25px no-repeat;
|
||||
height: 16px;
|
||||
width: 16px;
|
||||
margin-left: -16px;
|
||||
float: left;
|
||||
cursor: pointer;
|
||||
}
|
||||
/* fix for IE6 */
|
||||
* html .hitarea {
|
||||
display: inline;
|
||||
float:none;
|
||||
}
|
||||
|
||||
.treeview li {
|
||||
margin: 0;
|
||||
padding: 3px 0pt 3px 16px;
|
||||
}
|
||||
|
||||
.treeview a.selected {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
#treecontrol { margin: 1em 0; display: none; }
|
||||
|
||||
.treeview .hover { color: red; cursor: pointer; }
|
||||
|
||||
.treeview li { background: url(images/treeview-default-line.gif) 0 0 no-repeat; }
|
||||
.treeview li.collapsable, .treeview li.expandable { background-position: 0 -176px; }
|
||||
|
||||
.treeview .expandable-hitarea { background-position: -80px -3px; }
|
||||
|
||||
.treeview li.last { background-position: 0 -1766px }
|
||||
.treeview li.lastCollapsable, .treeview li.lastExpandable { background-image: url(images/treeview-default.gif); }
|
||||
.treeview li.lastCollapsable { background-position: 0 -111px }
|
||||
.treeview li.lastExpandable { background-position: -32px -67px }
|
||||
|
||||
.treeview div.lastCollapsable-hitarea, .treeview div.lastExpandable-hitarea { background-position: 0; }
|
||||
|
||||
.treeview-red li { background-image: url(images/treeview-red-line.gif); }
|
||||
.treeview-red .hitarea, .treeview-red li.lastCollapsable, .treeview-red li.lastExpandable { background-image: url(images/treeview-red.gif); }
|
||||
|
||||
.treeview-black li { background-image: url(images/treeview-black-line.gif); }
|
||||
.treeview-black .hitarea, .treeview-black li.lastCollapsable, .treeview-black li.lastExpandable { background-image: url(images/treeview-black.gif); }
|
||||
|
||||
.treeview-gray li { background-image: url(images/treeview-gray-line.gif); }
|
||||
.treeview-gray .hitarea, .treeview-gray li.lastCollapsable, .treeview-gray li.lastExpandable { background-image: url(images/treeview-gray.gif); }
|
||||
|
||||
.treeview-famfamfam li { background-image: url(images/treeview-famfamfam-line.gif); }
|
||||
.treeview-famfamfam .hitarea, .treeview-famfamfam li.lastCollapsable, .treeview-famfamfam li.lastExpandable { background-image: url(images/treeview-famfamfam.gif); }
|
||||
|
||||
.treeview .placeholder {
|
||||
background: url(images/ajax-loader.gif) 0 0 no-repeat;
|
||||
height: 16px;
|
||||
width: 16px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.filetree li { padding: 3px 0 2px 16px; }
|
||||
.filetree span.folder, .filetree span.file { padding: 1px 0 1px 16px; display: block; }
|
||||
.filetree span.folder { background: url(images/folder.gif) 0 0 no-repeat; }
|
||||
.filetree li.expandable span.folder { background: url(images/folder-closed.gif) 0 0 no-repeat; }
|
||||
.filetree span.file { background: url(images/file.gif) 0 0 no-repeat; }
|
|
@ -0,0 +1,39 @@
|
|||
/* https://github.com/jzaefferer/jquery-treeview/blob/master/jquery.treeview.edit.js */
|
||||
/* License: MIT. */
|
||||
(function($) {
|
||||
var CLASSES = $.treeview.classes;
|
||||
var proxied = $.fn.treeview;
|
||||
$.fn.treeview = function(settings) {
|
||||
settings = $.extend({}, settings);
|
||||
if (settings.add) {
|
||||
return this.trigger("add", [settings.add]);
|
||||
}
|
||||
if (settings.remove) {
|
||||
return this.trigger("remove", [settings.remove]);
|
||||
}
|
||||
return proxied.apply(this, arguments).bind("add", function(event, branches) {
|
||||
$(branches).prev()
|
||||
.removeClass(CLASSES.last)
|
||||
.removeClass(CLASSES.lastCollapsable)
|
||||
.removeClass(CLASSES.lastExpandable)
|
||||
.find(">.hitarea")
|
||||
.removeClass(CLASSES.lastCollapsableHitarea)
|
||||
.removeClass(CLASSES.lastExpandableHitarea);
|
||||
$(branches).find("li").andSelf().prepareBranches(settings).applyClasses(settings, $(this).data("toggler"));
|
||||
}).bind("remove", function(event, branches) {
|
||||
var prev = $(branches).prev();
|
||||
var parent = $(branches).parent();
|
||||
$(branches).remove();
|
||||
prev.filter(":last-child").addClass(CLASSES.last)
|
||||
.filter("." + CLASSES.expandable).replaceClass(CLASSES.last, CLASSES.lastExpandable).end()
|
||||
.find(">.hitarea").replaceClass(CLASSES.expandableHitarea, CLASSES.lastExpandableHitarea).end()
|
||||
.filter("." + CLASSES.collapsable).replaceClass(CLASSES.last, CLASSES.lastCollapsable).end()
|
||||
.find(">.hitarea").replaceClass(CLASSES.collapsableHitarea, CLASSES.lastCollapsableHitarea);
|
||||
if (parent.is(":not(:has(>))") && parent[0] != this) {
|
||||
parent.parent().removeClass(CLASSES.collapsable).removeClass(CLASSES.expandable)
|
||||
parent.siblings(".hitarea").andSelf().remove();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
})(jQuery);
|
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* Treeview 1.4.1 - jQuery plugin to hide and show branches of a tree
|
||||
*
|
||||
* http://bassistance.de/jquery-plugins/jquery-plugin-treeview/
|
||||
* http://docs.jquery.com/Plugins/Treeview
|
||||
*
|
||||
* Copyright (c) 2007 Jörn Zaefferer
|
||||
*
|
||||
* Dual licensed under the MIT and GPL licenses:
|
||||
* http://www.opensource.org/licenses/mit-license.php
|
||||
* http://www.gnu.org/licenses/gpl.html
|
||||
*
|
||||
* Revision: $Id: jquery.treeview.js 5759 2008-07-01 07:50:28Z joern.zaefferer $
|
||||
*
|
||||
*/
|
||||
|
||||
;(function($) {
|
||||
|
||||
// TODO rewrite as a widget, removing all the extra plugins
|
||||
$.extend($.fn, {
|
||||
swapClass: function(c1, c2) {
|
||||
var c1Elements = this.filter('.' + c1);
|
||||
this.filter('.' + c2).removeClass(c2).addClass(c1);
|
||||
c1Elements.removeClass(c1).addClass(c2);
|
||||
return this;
|
||||
},
|
||||
replaceClass: function(c1, c2) {
|
||||
return this.filter('.' + c1).removeClass(c1).addClass(c2).end();
|
||||
},
|
||||
hoverClass: function(className) {
|
||||
className = className || "hover";
|
||||
return this.hover(function() {
|
||||
$(this).addClass(className);
|
||||
}, function() {
|
||||
$(this).removeClass(className);
|
||||
});
|
||||
},
|
||||
heightToggle: function(animated, callback) {
|
||||
animated ?
|
||||
this.animate({ height: "toggle" }, animated, callback) :
|
||||
this.each(function(){
|
||||
jQuery(this)[ jQuery(this).is(":hidden") ? "show" : "hide" ]();
|
||||
if(callback)
|
||||
callback.apply(this, arguments);
|
||||
});
|
||||
},
|
||||
heightHide: function(animated, callback) {
|
||||
if (animated) {
|
||||
this.animate({ height: "hide" }, animated, callback);
|
||||
} else {
|
||||
this.hide();
|
||||
if (callback)
|
||||
this.each(callback);
|
||||
}
|
||||
},
|
||||
prepareBranches: function(settings) {
|
||||
if (!settings.prerendered) {
|
||||
// mark last tree items
|
||||
this.filter(":last-child:not(ul)").addClass(CLASSES.last);
|
||||
// collapse whole tree, or only those marked as closed, anyway except those marked as open
|
||||
this.filter((settings.collapsed ? "" : "." + CLASSES.closed) + ":not(." + CLASSES.open + ")").find(">ul").hide();
|
||||
}
|
||||
// return all items with sublists
|
||||
return this.filter(":has(>ul)");
|
||||
},
|
||||
applyClasses: function(settings, toggler) {
|
||||
// TODO use event delegation
|
||||
this.filter(":has(>ul):not(:has(>a))").find(">span").unbind("click.treeview").bind("click.treeview", function(event) {
|
||||
// don't handle click events on children, eg. checkboxes
|
||||
if ( this == event.target )
|
||||
toggler.apply($(this).next());
|
||||
}).add( $("a", this) ).hoverClass();
|
||||
|
||||
if (!settings.prerendered) {
|
||||
// handle closed ones first
|
||||
this.filter(":has(>ul:hidden)")
|
||||
.addClass(CLASSES.expandable)
|
||||
.replaceClass(CLASSES.last, CLASSES.lastExpandable);
|
||||
|
||||
// handle open ones
|
||||
this.not(":has(>ul:hidden)")
|
||||
.addClass(CLASSES.collapsable)
|
||||
.replaceClass(CLASSES.last, CLASSES.lastCollapsable);
|
||||
|
||||
// create hitarea if not present
|
||||
var hitarea = this.find("div." + CLASSES.hitarea);
|
||||
if (!hitarea.length)
|
||||
hitarea = this.prepend("<div class=\"" + CLASSES.hitarea + "\"/>").find("div." + CLASSES.hitarea);
|
||||
hitarea.removeClass().addClass(CLASSES.hitarea).each(function() {
|
||||
var classes = "";
|
||||
$.each($(this).parent().attr("class").split(" "), function() {
|
||||
classes += this + "-hitarea ";
|
||||
});
|
||||
$(this).addClass( classes );
|
||||
})
|
||||
}
|
||||
|
||||
// apply event to hitarea
|
||||
this.find("div." + CLASSES.hitarea).click( toggler );
|
||||
},
|
||||
treeview: function(settings) {
|
||||
|
||||
settings = $.extend({
|
||||
cookieId: "treeview"
|
||||
}, settings);
|
||||
|
||||
if ( settings.toggle ) {
|
||||
var callback = settings.toggle;
|
||||
settings.toggle = function() {
|
||||
return callback.apply($(this).parent()[0], arguments);
|
||||
};
|
||||
}
|
||||
|
||||
// factory for treecontroller
|
||||
function treeController(tree, control) {
|
||||
// factory for click handlers
|
||||
function handler(filter) {
|
||||
return function() {
|
||||
// reuse toggle event handler, applying the elements to toggle
|
||||
// start searching for all hitareas
|
||||
toggler.apply( $("div." + CLASSES.hitarea, tree).filter(function() {
|
||||
// for plain toggle, no filter is provided, otherwise we need to check the parent element
|
||||
return filter ? $(this).parent("." + filter).length : true;
|
||||
}) );
|
||||
return false;
|
||||
};
|
||||
}
|
||||
// click on first element to collapse tree
|
||||
$("a:eq(0)", control).click( handler(CLASSES.collapsable) );
|
||||
// click on second to expand tree
|
||||
$("a:eq(1)", control).click( handler(CLASSES.expandable) );
|
||||
// click on third to toggle tree
|
||||
$("a:eq(2)", control).click( handler() );
|
||||
}
|
||||
|
||||
// handle toggle event
|
||||
function toggler() {
|
||||
$(this)
|
||||
.parent()
|
||||
// swap classes for hitarea
|
||||
.find(">.hitarea")
|
||||
.swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea )
|
||||
.end()
|
||||
// swap classes for parent li
|
||||
.swapClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
// find child lists
|
||||
.find( ">ul" )
|
||||
// toggle them
|
||||
.heightToggle( settings.animated, settings.toggle );
|
||||
if ( settings.unique ) {
|
||||
$(this).parent()
|
||||
.siblings()
|
||||
// swap classes for hitarea
|
||||
.find(">.hitarea")
|
||||
.replaceClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.replaceClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea )
|
||||
.end()
|
||||
.replaceClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.replaceClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
.find( ">ul" )
|
||||
.heightHide( settings.animated, settings.toggle );
|
||||
}
|
||||
}
|
||||
this.data("toggler", toggler);
|
||||
|
||||
function serialize() {
|
||||
function binary(arg) {
|
||||
return arg ? 1 : 0;
|
||||
}
|
||||
var data = [];
|
||||
branches.each(function(i, e) {
|
||||
data[i] = $(e).is(":has(>ul:visible)") ? 1 : 0;
|
||||
});
|
||||
$.cookie(settings.cookieId, data.join(""), settings.cookieOptions );
|
||||
}
|
||||
|
||||
function deserialize() {
|
||||
var stored = $.cookie(settings.cookieId);
|
||||
if ( stored ) {
|
||||
var data = stored.split("");
|
||||
branches.each(function(i, e) {
|
||||
$(e).find(">ul")[ parseInt(data[i]) ? "show" : "hide" ]();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// add treeview class to activate styles
|
||||
this.addClass("treeview");
|
||||
|
||||
// prepare branches and find all tree items with child lists
|
||||
var branches = this.find("li").prepareBranches(settings);
|
||||
|
||||
switch(settings.persist) {
|
||||
case "cookie":
|
||||
var toggleCallback = settings.toggle;
|
||||
settings.toggle = function() {
|
||||
serialize();
|
||||
if (toggleCallback) {
|
||||
toggleCallback.apply(this, arguments);
|
||||
}
|
||||
};
|
||||
deserialize();
|
||||
break;
|
||||
case "location":
|
||||
var current = this.find("a").filter(function() {
|
||||
return this.href.toLowerCase() == location.href.toLowerCase();
|
||||
});
|
||||
if ( current.length ) {
|
||||
// TODO update the open/closed classes
|
||||
var items = current.addClass("selected").parents("ul, li").add( current.next() ).show();
|
||||
if (settings.prerendered) {
|
||||
// if prerendered is on, replicate the basic class swapping
|
||||
items.filter("li")
|
||||
.swapClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
.find(">.hitarea")
|
||||
.swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea );
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
branches.applyClasses(settings, toggler);
|
||||
|
||||
// if control option is set, create the treecontroller and show it
|
||||
if ( settings.control ) {
|
||||
treeController(this, settings.control);
|
||||
$(settings.control).show();
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
});
|
||||
|
||||
// classes used by the plugin
|
||||
// need to be styled via external stylesheet, see first example
|
||||
$.treeview = {};
|
||||
var CLASSES = ($.treeview.classes = {
|
||||
open: "open",
|
||||
closed: "closed",
|
||||
expandable: "expandable",
|
||||
expandableHitarea: "expandable-hitarea",
|
||||
lastExpandableHitarea: "lastExpandable-hitarea",
|
||||
collapsable: "collapsable",
|
||||
collapsableHitarea: "collapsable-hitarea",
|
||||
lastCollapsableHitarea: "lastCollapsable-hitarea",
|
||||
lastCollapsable: "lastCollapsable",
|
||||
lastExpandable: "lastExpandable",
|
||||
last: "last",
|
||||
hitarea: "hitarea"
|
||||
});
|
||||
|
||||
})(jQuery);
|
|
@ -0,0 +1,776 @@
|
|||
body {
|
||||
margin: 0;
|
||||
font-family: Arial, sans-serif;
|
||||
font-size: 16px;
|
||||
background-color: #fff;
|
||||
line-height: 1.3em;
|
||||
}
|
||||
pre,
|
||||
code {
|
||||
font-family: Menlo, monospace;
|
||||
font-size: 14px;
|
||||
}
|
||||
pre {
|
||||
line-height: 1.4em;
|
||||
overflow-x: auto;
|
||||
}
|
||||
pre .comment {
|
||||
color: #006600;
|
||||
}
|
||||
pre .highlight,
|
||||
pre .highlight-comment,
|
||||
pre .selection-highlight,
|
||||
pre .selection-highlight-comment {
|
||||
background: #FFFF00;
|
||||
}
|
||||
pre .selection,
|
||||
pre .selection-comment {
|
||||
background: #FF9632;
|
||||
}
|
||||
pre .ln {
|
||||
color: #999;
|
||||
}
|
||||
body {
|
||||
color: #222;
|
||||
}
|
||||
a,
|
||||
.exampleHeading .text {
|
||||
color: #375EAB;
|
||||
text-decoration: none;
|
||||
}
|
||||
a:hover,
|
||||
.exampleHeading .text:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
p {
|
||||
max-width: 800px;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
p,
|
||||
pre,
|
||||
ul,
|
||||
ol {
|
||||
margin: 20px;
|
||||
}
|
||||
pre {
|
||||
background: #EFEFEF;
|
||||
padding: 10px;
|
||||
|
||||
-webkit-border-radius: 5px;
|
||||
-moz-border-radius: 5px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
.rootHeading {
|
||||
margin: 20px 0 20px;
|
||||
padding: 0;
|
||||
color: #375EAB;
|
||||
font-weight: bold;
|
||||
}
|
||||
h1 {
|
||||
font-size: 28px;
|
||||
line-height: 1;
|
||||
}
|
||||
h2 {
|
||||
font-size: 20px;
|
||||
background: #E0EBF5;
|
||||
padding: 8px;
|
||||
line-height: 1.25;
|
||||
font-weight: normal;
|
||||
}
|
||||
h2 a {
|
||||
font-weight: bold;
|
||||
}
|
||||
h3 {
|
||||
font-size: 20px;
|
||||
}
|
||||
h3,
|
||||
h4 {
|
||||
margin: 20px 5px;
|
||||
}
|
||||
h4 {
|
||||
font-size: 16px;
|
||||
}
|
||||
.rootHeading {
|
||||
font-size: 20px;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin: 20px;
|
||||
}
|
||||
dd {
|
||||
margin: 0;
|
||||
}
|
||||
dd.indent {
|
||||
margin: 0 20px;
|
||||
}
|
||||
dl,
|
||||
dd {
|
||||
font-size: 14px;
|
||||
}
|
||||
div#nav table td {
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
|
||||
.pkg-dir {
|
||||
padding: 0 10px;
|
||||
}
|
||||
.pkg-dir table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
}
|
||||
.pkg-name {
|
||||
padding-right: 10px;
|
||||
}
|
||||
.alert {
|
||||
color: #AA0000;
|
||||
}
|
||||
|
||||
.top-heading {
|
||||
float: left;
|
||||
padding: 21px 0;
|
||||
font-size: 20px;
|
||||
font-weight: normal;
|
||||
}
|
||||
.top-heading a {
|
||||
color: #222;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
div#topbar {
|
||||
background: #E0EBF5;
|
||||
height: 64px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
body {
|
||||
text-align: center;
|
||||
}
|
||||
div#page {
|
||||
width: 100%;
|
||||
}
|
||||
div#page > .container,
|
||||
div#topbar > .container {
|
||||
text-align: left;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
padding: 0 20px;
|
||||
}
|
||||
div#topbar > .container,
|
||||
div#page > .container {
|
||||
max-width: 950px;
|
||||
}
|
||||
div#page.wide > .container,
|
||||
div#topbar.wide > .container {
|
||||
max-width: none;
|
||||
}
|
||||
div#plusone {
|
||||
float: right;
|
||||
clear: right;
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
div#footer {
|
||||
text-align: center;
|
||||
color: #666;
|
||||
font-size: 14px;
|
||||
margin: 40px 0;
|
||||
}
|
||||
|
||||
div#menu > a,
|
||||
div#menu > input,
|
||||
div#learn .buttons a,
|
||||
div.play .buttons a,
|
||||
div#blog .read a,
|
||||
#menu-button {
|
||||
padding: 10px;
|
||||
|
||||
text-decoration: none;
|
||||
font-size: 16px;
|
||||
|
||||
-webkit-border-radius: 5px;
|
||||
-moz-border-radius: 5px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
div#playground .buttons a,
|
||||
div#menu > a,
|
||||
div#menu > input,
|
||||
#menu-button {
|
||||
border: 1px solid #375EAB;
|
||||
}
|
||||
div#playground .buttons a,
|
||||
div#menu > a,
|
||||
#menu-button {
|
||||
color: white;
|
||||
background: #375EAB;
|
||||
}
|
||||
#playgroundButton.active {
|
||||
background: white;
|
||||
color: #375EAB;
|
||||
}
|
||||
a#start,
|
||||
div#learn .buttons a,
|
||||
div.play .buttons a,
|
||||
div#blog .read a {
|
||||
color: #222;
|
||||
border: 1px solid #375EAB;
|
||||
background: #E0EBF5;
|
||||
}
|
||||
.download {
|
||||
width: 150px;
|
||||
}
|
||||
|
||||
div#menu {
|
||||
text-align: right;
|
||||
padding: 10px;
|
||||
white-space: nowrap;
|
||||
max-height: 0;
|
||||
-moz-transition: max-height .25s linear;
|
||||
transition: max-height .25s linear;
|
||||
width: 100%;
|
||||
}
|
||||
div#menu.menu-visible {
|
||||
max-height: 500px;
|
||||
}
|
||||
div#menu > a,
|
||||
#menu-button {
|
||||
margin: 10px 2px;
|
||||
padding: 10px;
|
||||
}
|
||||
div#menu > input {
|
||||
position: relative;
|
||||
top: 1px;
|
||||
width: 140px;
|
||||
background: white;
|
||||
color: #222;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
div#menu > input.inactive {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
#menu-button {
|
||||
display: none;
|
||||
position: absolute;
|
||||
right: 5px;
|
||||
top: 0;
|
||||
margin-right: 5px;
|
||||
}
|
||||
#menu-button-arrow {
|
||||
display: inline-block;
|
||||
}
|
||||
.vertical-flip {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
|
||||
div.left {
|
||||
float: left;
|
||||
clear: left;
|
||||
margin-right: 2.5%;
|
||||
}
|
||||
div.right {
|
||||
float: right;
|
||||
clear: right;
|
||||
margin-left: 2.5%;
|
||||
}
|
||||
div.left,
|
||||
div.right {
|
||||
width: 45%;
|
||||
}
|
||||
|
||||
div#learn,
|
||||
div#about {
|
||||
padding-top: 20px;
|
||||
}
|
||||
div#learn h2,
|
||||
div#about {
|
||||
margin: 0;
|
||||
}
|
||||
div#about {
|
||||
font-size: 20px;
|
||||
margin: 0 auto 30px;
|
||||
}
|
||||
div#gopher {
|
||||
background: url(/doc/gopher/frontpage.png) no-repeat;
|
||||
background-position: center top;
|
||||
height: 155px;
|
||||
}
|
||||
a#start {
|
||||
display: block;
|
||||
padding: 10px;
|
||||
|
||||
text-align: center;
|
||||
text-decoration: none;
|
||||
|
||||
-webkit-border-radius: 5px;
|
||||
-moz-border-radius: 5px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
a#start .big {
|
||||
display: block;
|
||||
font-weight: bold;
|
||||
font-size: 20px;
|
||||
}
|
||||
a#start .desc {
|
||||
display: block;
|
||||
font-size: 14px;
|
||||
font-weight: normal;
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
div#learn .popout {
|
||||
float: right;
|
||||
display: block;
|
||||
cursor: pointer;
|
||||
font-size: 12px;
|
||||
background: url(/doc/share.png) no-repeat;
|
||||
background-position: right top;
|
||||
padding: 5px 27px;
|
||||
}
|
||||
div#learn pre,
|
||||
div#learn textarea {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
font-family: Menlo, monospace;
|
||||
font-size: 14px;
|
||||
}
|
||||
div#learn .input {
|
||||
padding: 10px;
|
||||
margin-top: 10px;
|
||||
height: 150px;
|
||||
|
||||
-webkit-border-top-left-radius: 5px;
|
||||
-webkit-border-top-right-radius: 5px;
|
||||
-moz-border-radius-topleft: 5px;
|
||||
-moz-border-radius-topright: 5px;
|
||||
border-top-left-radius: 5px;
|
||||
border-top-right-radius: 5px;
|
||||
}
|
||||
div#learn .input textarea {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: none;
|
||||
outline: none;
|
||||
resize: none;
|
||||
}
|
||||
div#learn .output {
|
||||
border-top: none !important;
|
||||
|
||||
padding: 10px;
|
||||
height: 59px;
|
||||
overflow: auto;
|
||||
|
||||
-webkit-border-bottom-right-radius: 5px;
|
||||
-webkit-border-bottom-left-radius: 5px;
|
||||
-moz-border-radius-bottomright: 5px;
|
||||
-moz-border-radius-bottomleft: 5px;
|
||||
border-bottom-right-radius: 5px;
|
||||
border-bottom-left-radius: 5px;
|
||||
}
|
||||
div#learn .output pre {
|
||||
padding: 0;
|
||||
|
||||
-webkit-border-radius: 0;
|
||||
-moz-border-radius: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
div#learn .input,
|
||||
div#learn .input textarea,
|
||||
div#learn .output,
|
||||
div#learn .output pre {
|
||||
background: #FFFFD8;
|
||||
}
|
||||
div#learn .input,
|
||||
div#learn .output {
|
||||
border: 1px solid #375EAB;
|
||||
}
|
||||
div#learn .buttons {
|
||||
float: right;
|
||||
padding: 20px 0 10px 0;
|
||||
text-align: right;
|
||||
}
|
||||
div#learn .buttons a {
|
||||
height: 16px;
|
||||
margin-left: 5px;
|
||||
padding: 10px;
|
||||
}
|
||||
div#learn .toys {
|
||||
margin-top: 8px;
|
||||
}
|
||||
div#learn .toys select {
|
||||
border: 1px solid #375EAB;
|
||||
margin: 0;
|
||||
}
|
||||
div#learn .output .exit {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div#video {
|
||||
max-width: 100%;
|
||||
}
|
||||
div#blog,
|
||||
div#video {
|
||||
margin-top: 40px;
|
||||
}
|
||||
div#blog > a,
|
||||
div#blog > div,
|
||||
div#blog > h2,
|
||||
div#video > a,
|
||||
div#video > div,
|
||||
div#video > h2 {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
div#blog .title,
|
||||
div#video .title {
|
||||
display: block;
|
||||
font-size: 20px;
|
||||
}
|
||||
div#blog .when {
|
||||
color: #666;
|
||||
font-size: 14px;
|
||||
}
|
||||
div#blog .read {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.toggleButton { cursor: pointer; }
|
||||
.toggle .collapsed { display: block; }
|
||||
.toggle .expanded { display: none; }
|
||||
.toggleVisible .collapsed { display: none; }
|
||||
.toggleVisible .expanded { display: block; }
|
||||
|
||||
table.codetable { margin-left: auto; margin-right: auto; border-style: none; }
|
||||
table.codetable td { padding-right: 10px; }
|
||||
hr { border-style: none; border-top: 1px solid black; }
|
||||
|
||||
img.gopher {
|
||||
float: right;
|
||||
margin-left: 10px;
|
||||
margin-bottom: 10px;
|
||||
z-index: -1;
|
||||
}
|
||||
h2 { clear: right; }
|
||||
|
||||
/* example and drop-down playground */
|
||||
div.play {
|
||||
padding: 0 20px 40px 20px;
|
||||
}
|
||||
div.play pre,
|
||||
div.play textarea,
|
||||
div.play .lines {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
font-family: Menlo, monospace;
|
||||
font-size: 14px;
|
||||
}
|
||||
div.play .input {
|
||||
padding: 10px;
|
||||
margin-top: 10px;
|
||||
|
||||
-webkit-border-top-left-radius: 5px;
|
||||
-webkit-border-top-right-radius: 5px;
|
||||
-moz-border-radius-topleft: 5px;
|
||||
-moz-border-radius-topright: 5px;
|
||||
border-top-left-radius: 5px;
|
||||
border-top-right-radius: 5px;
|
||||
|
||||
overflow: hidden;
|
||||
}
|
||||
div.play .input textarea {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: none;
|
||||
outline: none;
|
||||
resize: none;
|
||||
|
||||
overflow: hidden;
|
||||
}
|
||||
div#playground .input textarea {
|
||||
overflow: auto;
|
||||
resize: auto;
|
||||
}
|
||||
div.play .output {
|
||||
border-top: none !important;
|
||||
|
||||
padding: 10px;
|
||||
max-height: 200px;
|
||||
overflow: auto;
|
||||
|
||||
-webkit-border-bottom-right-radius: 5px;
|
||||
-webkit-border-bottom-left-radius: 5px;
|
||||
-moz-border-radius-bottomright: 5px;
|
||||
-moz-border-radius-bottomleft: 5px;
|
||||
border-bottom-right-radius: 5px;
|
||||
border-bottom-left-radius: 5px;
|
||||
}
|
||||
div.play .output pre {
|
||||
padding: 0;
|
||||
|
||||
-webkit-border-radius: 0;
|
||||
-moz-border-radius: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
div.play .input,
|
||||
div.play .input textarea,
|
||||
div.play .output,
|
||||
div.play .output pre {
|
||||
background: #FFFFD8;
|
||||
}
|
||||
div.play .input,
|
||||
div.play .output {
|
||||
border: 1px solid #375EAB;
|
||||
}
|
||||
div.play .buttons {
|
||||
float: right;
|
||||
padding: 20px 0 10px 0;
|
||||
text-align: right;
|
||||
}
|
||||
div.play .buttons a {
|
||||
height: 16px;
|
||||
margin-left: 5px;
|
||||
padding: 10px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.output .stderr {
|
||||
color: #933;
|
||||
}
|
||||
.output .system {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
/* drop-down playground */
|
||||
#playgroundButton,
|
||||
div#playground {
|
||||
/* start hidden; revealed by javascript */
|
||||
display: none;
|
||||
}
|
||||
div#playground {
|
||||
position: absolute;
|
||||
top: 63px;
|
||||
right: 20px;
|
||||
padding: 0 10px 10px 10px;
|
||||
z-index: 1;
|
||||
text-align: left;
|
||||
background: #E0EBF5;
|
||||
|
||||
border: 1px solid #B0BBC5;
|
||||
border-top: none;
|
||||
|
||||
-webkit-border-bottom-left-radius: 5px;
|
||||
-webkit-border-bottom-right-radius: 5px;
|
||||
-moz-border-radius-bottomleft: 5px;
|
||||
-moz-border-radius-bottomright: 5px;
|
||||
border-bottom-left-radius: 5px;
|
||||
border-bottom-right-radius: 5px;
|
||||
}
|
||||
div#playground .code {
|
||||
width: 520px;
|
||||
height: 200px;
|
||||
}
|
||||
div#playground .output {
|
||||
height: 100px;
|
||||
}
|
||||
|
||||
/* Inline runnable snippets (play.js/initPlayground) */
|
||||
#content .code pre, #content .playground pre, #content .output pre {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background: none;
|
||||
border: none;
|
||||
outline: 0px solid transparent;
|
||||
overflow: auto;
|
||||
}
|
||||
#content .playground .number, #content .code .number {
|
||||
color: #999;
|
||||
}
|
||||
#content .code, #content .playground, #content .output {
|
||||
width: auto;
|
||||
margin: 20px;
|
||||
padding: 10px;
|
||||
-webkit-border-radius: 5px;
|
||||
-moz-border-radius: 5px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
#content .code, #content .playground {
|
||||
background: #e9e9e9;
|
||||
}
|
||||
#content .output {
|
||||
background: #202020;
|
||||
}
|
||||
#content .output .stdout, #content .output pre {
|
||||
color: #e6e6e6;
|
||||
}
|
||||
#content .output .stderr, #content .output .error {
|
||||
color: rgb(244, 74, 63);
|
||||
}
|
||||
#content .output .system, #content .output .exit {
|
||||
color: rgb(255, 209, 77)
|
||||
}
|
||||
#content .buttons {
|
||||
position: relative;
|
||||
float: right;
|
||||
top: -50px;
|
||||
right: 30px;
|
||||
}
|
||||
#content .output .buttons {
|
||||
top: -60px;
|
||||
right: 0;
|
||||
height: 0;
|
||||
}
|
||||
#content .buttons .kill {
|
||||
display: none;
|
||||
visibility: hidden;
|
||||
}
|
||||
a.error {
|
||||
font-weight: bold;
|
||||
color: white;
|
||||
background-color: darkred;
|
||||
border-bottom-left-radius: 4px;
|
||||
border-bottom-right-radius: 4px;
|
||||
border-top-left-radius: 4px;
|
||||
border-top-right-radius: 4px;
|
||||
padding: 2px 4px 2px 4px; /* TRBL */
|
||||
}
|
||||
|
||||
|
||||
#heading-narrow {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.downloading {
|
||||
background: #F9F9BE;
|
||||
padding: 10px;
|
||||
text-align: center;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
@media (max-width: 930px) {
|
||||
#heading-wide {
|
||||
display: none;
|
||||
}
|
||||
#heading-narrow {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@media (max-width: 760px) {
|
||||
.container .left,
|
||||
.container .right {
|
||||
width: auto;
|
||||
float: none;
|
||||
}
|
||||
|
||||
div#about {
|
||||
max-width: 500px;
|
||||
text-align: center;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 700px) and (max-width: 1000px) {
|
||||
div#menu > a {
|
||||
margin: 5px 0;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
div#menu > input {
|
||||
font-size: 14px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 700px) {
|
||||
body {
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
pre,
|
||||
code {
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
div#page > .container {
|
||||
padding: 0 10px;
|
||||
}
|
||||
|
||||
div#topbar {
|
||||
height: auto;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
div#topbar > .container {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#heading-wide {
|
||||
display: block;
|
||||
}
|
||||
#heading-narrow {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.top-heading {
|
||||
float: none;
|
||||
display: inline-block;
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
div#menu {
|
||||
padding: 0;
|
||||
min-width: 0;
|
||||
text-align: left;
|
||||
float: left;
|
||||
}
|
||||
|
||||
div#menu > a,
|
||||
div#menu > input {
|
||||
display: block;
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
}
|
||||
|
||||
div#menu > input {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#menu-button {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
p,
|
||||
pre,
|
||||
ul,
|
||||
ol {
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
.pkg-synopsis {
|
||||
display: none;
|
||||
}
|
||||
|
||||
img.gopher {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 480px) {
|
||||
#heading-wide {
|
||||
display: none;
|
||||
}
|
||||
#heading-narrow {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
@media print {
|
||||
pre {
|
||||
background: #FFF;
|
||||
border: 1px solid #BBB;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
#
|
||||
# include.mk
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
TARGETS += fdb_go fdb_go_tester
|
||||
CLEAN_TARGETS += fdb_go_clean fdb_go_tester_clean
|
||||
|
||||
GOPATH := $(CURDIR)/bindings/go
|
||||
|
||||
# We only override if the environment didn't set it (this is used by
|
||||
# the fdbwebsite documentation build process)
|
||||
GODOC_DIR ?= bindings/go
|
||||
|
||||
CGO_CFLAGS := -I$(CURDIR)/bindings/c
|
||||
CGO_LDFLAGS := -L$(CURDIR)/lib
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
GOPLATFORM := linux_amd64
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
GOPLATFORM := darwin_amd64
|
||||
else
|
||||
$(error Not prepared to compile on platform $(PLATFORM))
|
||||
endif
|
||||
|
||||
GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)
|
||||
|
||||
GO_PACKAGES := fdb fdb/tuple fdb/subspace fdb/directory
|
||||
GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
|
||||
|
||||
GO_SRC := $(shell find $(GOPATH)/src -name '*.go')
|
||||
|
||||
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC)
|
||||
|
||||
fdb_go_clean:
|
||||
@echo "Cleaning fdb_go"
|
||||
@rm -rf $(GO_PACKAGE_OUTDIR)
|
||||
|
||||
fdb_go_tester: $(GOPATH)/bin/_stacktester
|
||||
|
||||
fdb_go_tester_clean:
|
||||
@echo "Cleaning fdb_go_tester"
|
||||
@rm -rf $(GOPATH)/bin
|
||||
|
||||
$(GOPATH)/bin/_stacktester: $(GO_SRC) $(GO_PACKAGE_OBJECTS) bindings/go/src/fdb/generated.go
|
||||
@echo "Compiling $(basename $(notdir $@))"
|
||||
@go install _stacktester
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a bindings/go/src/fdb/generated.go
|
||||
@echo "Compiling fdb/tuple"
|
||||
@go install fdb/tuple
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a bindings/go/src/fdb/generated.go
|
||||
@echo "Compiling fdb/subspace"
|
||||
@go install fdb/subspace
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a bindings/go/src/fdb/generated.go
|
||||
@echo "Compiling fdb/directory"
|
||||
@go install fdb/directory
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_SRC) bindings/go/src/fdb/generated.go
|
||||
@echo "Compiling fdb"
|
||||
@go install fdb
|
||||
|
||||
bindings/go/src/fdb/generated.go: lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
@echo "Building $@"
|
||||
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@
|
||||
|
||||
godoc: $(GO_SRC)
|
||||
@echo "Generating Go Documentation"
|
||||
@rm -rf $(GODOC_DIR)/godoc
|
||||
@mkdir -p $(GODOC_DIR)/godoc
|
||||
@mkdir -p $(GODOC_DIR)/godoc/lib/godoc
|
||||
@godoc -url "http://localhost:6060/pkg/fdb" > $(GODOC_DIR)/godoc/fdb.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/tuple" > $(GODOC_DIR)/godoc/fdb.tuple.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/subspace" > $(GODOC_DIR)/godoc/fdb.subspace.html
|
||||
@godoc -url "http://localhost:6060/pkg/fdb/directory" > $(GODOC_DIR)/godoc/fdb.directory.html
|
||||
@cp $(GOPATH)/godoc-resources/* $(GODOC_DIR)/godoc/lib/godoc
|
||||
@echo "Mangling paths in Go Documentation"
|
||||
@(find $(GODOC_DIR)/godoc/ -name *.html -exec sed -i '' -e 's_/lib_lib_' {} \;)
|
||||
@(sed -i -e 's_a href="tuple/"_a href="fdb.tuple.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
@(sed -i -e 's_a href="subspace/"_a href="fdb.subspace.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
@(sed -i -e 's_a href="directory/"_a href="fdb.directory.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
|
||||
godoc_clean:
|
||||
@echo "Cleaning Go Documentation"
|
||||
@rm -rf $(GODOC_DIR)/godoc
|
||||
|
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
* directory.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"fdb/subspace"
|
||||
"fdb/directory"
|
||||
"strings"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
func (sm *StackMachine) popTuples(count int) []tuple.Tuple {
|
||||
tuples := make([]tuple.Tuple, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
c := sm.waitAndPop().item.(int64)
|
||||
tuples[i] = make(tuple.Tuple, c)
|
||||
for j := int64(0); j < c; j++ {
|
||||
tuples[i][j] = sm.waitAndPop().item
|
||||
}
|
||||
}
|
||||
|
||||
return tuples
|
||||
}
|
||||
|
||||
func tupleToPath(t tuple.Tuple) []string {
|
||||
ret := make([]string, len(t))
|
||||
for i, el := range t {
|
||||
ret[i] = el.(string)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func tuplePackStrings(s []string) []byte {
|
||||
t := make(tuple.Tuple, len(s))
|
||||
for i, el := range s {
|
||||
t[i] = el
|
||||
}
|
||||
return t.Pack()
|
||||
}
|
||||
|
||||
type DirectoryExtension struct {
|
||||
list []interface{}
|
||||
index int64
|
||||
errorIndex int64
|
||||
}
|
||||
|
||||
func newDirectoryExtension() *DirectoryExtension {
|
||||
de := DirectoryExtension{}
|
||||
de.store(directory.Root())
|
||||
return &de
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) store(el interface{}) {
|
||||
de.list = append(de.list, el)
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) cwd() directory.Directory {
|
||||
return de.list[de.index].(directory.Directory)
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) css() subspace.Subspace {
|
||||
return de.list[de.index].(subspace.Subspace)
|
||||
}
|
||||
|
||||
func (sm *StackMachine) maybePath() []string {
|
||||
count := sm.waitAndPop().item.(int64)
|
||||
var path []string
|
||||
if count > 0 {
|
||||
tuples := sm.popTuples(1)
|
||||
path = tupleToPath(tuples[0])
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
var createOps = map[string]bool {
|
||||
"CREATE_SUBSPACE": true,
|
||||
"CREATE_LAYER": true,
|
||||
"CREATE_OR_OPEN": true,
|
||||
"CREATE": true,
|
||||
"OPEN": true,
|
||||
"MOVE": true,
|
||||
"MOVE_TO": true,
|
||||
"OPEN_SUBSPACE": true,
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool, idx int, t fdb.Transactor, rt fdb.ReadTransactor) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
sm.store(idx, []byte("DIRECTORY_ERROR"))
|
||||
if createOps[op] {
|
||||
de.store(nil)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var e error
|
||||
|
||||
switch {
|
||||
case op == "CREATE_SUBSPACE":
|
||||
tuples := sm.popTuples(1)
|
||||
rp := sm.waitAndPop().item.([]byte)
|
||||
s := subspace.FromBytes(rp).Sub(tuples[0]...)
|
||||
de.store(s)
|
||||
case op == "CREATE_LAYER":
|
||||
idx1 := sm.waitAndPop().item.(int64)
|
||||
idx2 := sm.waitAndPop().item.(int64)
|
||||
amp := sm.waitAndPop().item.(int64)
|
||||
nodeSS := de.list[idx1]
|
||||
contentSS := de.list[idx2]
|
||||
|
||||
if nodeSS == nil || contentSS == nil {
|
||||
de.store(nil)
|
||||
} else {
|
||||
de.store(directory.NewDirectoryLayer(nodeSS.(subspace.Subspace), contentSS.(subspace.Subspace), (amp == int64(1))))
|
||||
}
|
||||
case op == "CREATE_OR_OPEN":
|
||||
tuples := sm.popTuples(1)
|
||||
l := sm.waitAndPop().item
|
||||
var layer []byte
|
||||
if l != nil {
|
||||
layer = l.([]byte)
|
||||
}
|
||||
d, e := de.cwd().CreateOrOpen(t, tupleToPath(tuples[0]), layer)
|
||||
if e != nil { panic(e) }
|
||||
de.store(d)
|
||||
case op == "CREATE":
|
||||
tuples := sm.popTuples(1)
|
||||
l := sm.waitAndPop().item
|
||||
var layer []byte
|
||||
if l != nil {
|
||||
layer = l.([]byte)
|
||||
}
|
||||
p := sm.waitAndPop().item
|
||||
var d directory.Directory
|
||||
if p == nil {
|
||||
d, e = de.cwd().Create(t, tupleToPath(tuples[0]), layer)
|
||||
} else {
|
||||
// p.([]byte) itself may be nil, but CreatePrefix handles that appropriately
|
||||
d, e = de.cwd().CreatePrefix(t, tupleToPath(tuples[0]), layer, p.([]byte))
|
||||
}
|
||||
if e != nil { panic(e) }
|
||||
de.store(d)
|
||||
case op == "OPEN":
|
||||
tuples := sm.popTuples(1)
|
||||
l := sm.waitAndPop().item
|
||||
var layer []byte
|
||||
if l != nil {
|
||||
layer = l.([]byte)
|
||||
}
|
||||
d, e := de.cwd().Open(rt, tupleToPath(tuples[0]), layer)
|
||||
if e != nil { panic(e) }
|
||||
de.store(d)
|
||||
case op == "CHANGE":
|
||||
i := sm.waitAndPop().item.(int64)
|
||||
if de.list[i] == nil {
|
||||
i = de.errorIndex
|
||||
}
|
||||
de.index = i
|
||||
case op == "SET_ERROR_INDEX":
|
||||
de.errorIndex = sm.waitAndPop().item.(int64)
|
||||
case op == "MOVE":
|
||||
tuples := sm.popTuples(2)
|
||||
d, e := de.cwd().Move(t, tupleToPath(tuples[0]), tupleToPath(tuples[1]))
|
||||
if e != nil { panic(e) }
|
||||
de.store(d)
|
||||
case op == "MOVE_TO":
|
||||
tuples := sm.popTuples(1)
|
||||
d, e := de.cwd().MoveTo(t, tupleToPath(tuples[0]))
|
||||
if e != nil { panic(e) }
|
||||
de.store(d)
|
||||
case strings.HasPrefix(op, "REMOVE"):
|
||||
path := sm.maybePath()
|
||||
// This ***HAS*** to call Transact to ensure that any directory version
|
||||
// key set in the process of trying to remove this potentially
|
||||
// non-existent directory, in the REMOVE but not REMOVE_IF_EXISTS case,
|
||||
// doesn't end up committing the version key. (Other languages have
|
||||
// separate remove() and remove_if_exists() so don't have this tricky
|
||||
// issue).
|
||||
_, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
ok, e := de.cwd().Remove(tr, path)
|
||||
if e != nil { panic(e) }
|
||||
switch op[6:] {
|
||||
case "":
|
||||
if !ok {
|
||||
panic("directory does not exist")
|
||||
}
|
||||
case "_IF_EXISTS":
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
case op == "LIST":
|
||||
subs, e := de.cwd().List(rt, sm.maybePath())
|
||||
if e != nil { panic(e) }
|
||||
t := make(tuple.Tuple, len(subs))
|
||||
for i, s := range subs { t[i] = s }
|
||||
sm.store(idx, t.Pack())
|
||||
case op == "EXISTS":
|
||||
b, e := de.cwd().Exists(rt, sm.maybePath())
|
||||
if e != nil { panic(e) }
|
||||
if b {
|
||||
sm.store(idx, int64(1))
|
||||
} else {
|
||||
sm.store(idx, int64(0))
|
||||
}
|
||||
case op == "PACK_KEY":
|
||||
tuples := sm.popTuples(1)
|
||||
sm.store(idx, de.css().Pack(tuples[0]))
|
||||
case op == "UNPACK_KEY":
|
||||
t, e := de.css().Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil { panic(e) }
|
||||
for _, el := range(t) {
|
||||
sm.store(idx, el)
|
||||
}
|
||||
case op == "RANGE":
|
||||
ss := de.css().Sub(sm.popTuples(1)[0]...)
|
||||
bk, ek := ss.FDBRangeKeys()
|
||||
sm.store(idx, bk)
|
||||
sm.store(idx, ek)
|
||||
case op == "CONTAINS":
|
||||
k := sm.waitAndPop().item.([]byte)
|
||||
b := de.css().Contains(fdb.Key(k))
|
||||
if b {
|
||||
sm.store(idx, int64(1))
|
||||
} else {
|
||||
sm.store(idx, int64(0))
|
||||
}
|
||||
case op == "OPEN_SUBSPACE":
|
||||
de.store(de.css().Sub(sm.popTuples(1)[0]...))
|
||||
case op == "LOG_SUBSPACE":
|
||||
k := sm.waitAndPop().item.([]byte)
|
||||
k = append(k, tuple.Tuple{de.index}.Pack()...)
|
||||
v := de.css().Bytes()
|
||||
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(fdb.Key(k), v)
|
||||
return nil, nil
|
||||
})
|
||||
case op == "LOG_DIRECTORY":
|
||||
rp := sm.waitAndPop().item.([]byte)
|
||||
ss := subspace.FromBytes(rp).Sub(de.index)
|
||||
k1 := ss.Pack(tuple.Tuple{"path"})
|
||||
v1 := tuplePackStrings(de.cwd().GetPath())
|
||||
k2 := ss.Pack(tuple.Tuple{"layer"})
|
||||
v2 := tuple.Tuple{de.cwd().GetLayer()}.Pack()
|
||||
k3 := ss.Pack(tuple.Tuple{"exists"})
|
||||
var v3 []byte
|
||||
exists, e := de.cwd().Exists(rt, nil)
|
||||
if e != nil { panic(e) }
|
||||
if exists {
|
||||
v3 = tuple.Tuple{1}.Pack()
|
||||
} else {
|
||||
v3 = tuple.Tuple{0}.Pack()
|
||||
}
|
||||
k4 := ss.Pack(tuple.Tuple{"children"})
|
||||
var subs []string
|
||||
if exists {
|
||||
subs, e = de.cwd().List(rt, nil)
|
||||
if e != nil { panic(e) }
|
||||
}
|
||||
v4 := tuplePackStrings(subs)
|
||||
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(k1, v1)
|
||||
tr.Set(k2, v2)
|
||||
tr.Set(k3, v3)
|
||||
tr.Set(k4, v4)
|
||||
return nil, nil
|
||||
})
|
||||
case op == "STRIP_PREFIX":
|
||||
ba := sm.waitAndPop().item.([]byte)
|
||||
ssb := de.css().Bytes()
|
||||
if !bytes.HasPrefix(ba, ssb) {
|
||||
panic("prefix mismatch")
|
||||
}
|
||||
ba = ba[len(ssb):]
|
||||
sm.store(idx, ba)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,726 @@
|
|||
/*
|
||||
* stacktester.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"runtime"
|
||||
"reflect"
|
||||
"time"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const verbose bool = false
|
||||
|
||||
var trMap = map[string]fdb.Transaction {}
|
||||
var trMapLock = sync.RWMutex{}
|
||||
|
||||
func int64ToBool(i int64) bool {
|
||||
switch i {
|
||||
case 0:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
type stackEntry struct {
|
||||
item interface{}
|
||||
idx int
|
||||
}
|
||||
|
||||
type StackMachine struct {
|
||||
prefix []byte
|
||||
trName string
|
||||
stack []stackEntry
|
||||
lastVersion int64
|
||||
threads sync.WaitGroup
|
||||
verbose bool
|
||||
de *DirectoryExtension
|
||||
}
|
||||
|
||||
func newStackMachine(prefix []byte, verbose bool) *StackMachine {
|
||||
sm := StackMachine{verbose: verbose, prefix: prefix, de: newDirectoryExtension(), trName: string(prefix[:])}
|
||||
return &sm
|
||||
}
|
||||
|
||||
func (sm *StackMachine) waitAndPop() (ret stackEntry) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch r := r.(type) {
|
||||
case fdb.Error:
|
||||
ret.item = []byte(tuple.Tuple{[]byte("ERROR"), []byte(fmt.Sprintf("%d", r.Code))}.Pack())
|
||||
default:
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ret, sm.stack = sm.stack[len(sm.stack) - 1], sm.stack[:len(sm.stack) - 1]
|
||||
switch el := ret.item.(type) {
|
||||
case int64, []byte, string:
|
||||
case fdb.Key:
|
||||
ret.item = []byte(el)
|
||||
case fdb.FutureNil:
|
||||
el.MustGet()
|
||||
ret.item = []byte("RESULT_NOT_PRESENT")
|
||||
case fdb.FutureByteSlice:
|
||||
v := el.MustGet()
|
||||
if v != nil {
|
||||
ret.item = v
|
||||
} else {
|
||||
ret.item = []byte("RESULT_NOT_PRESENT")
|
||||
}
|
||||
case fdb.FutureKey:
|
||||
ret.item = []byte(el.MustGet())
|
||||
case nil:
|
||||
default:
|
||||
log.Fatalf("Don't know how to pop stack element %v %T\n", el, el)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sm *StackMachine) popSelector() fdb.KeySelector {
|
||||
sel := fdb.KeySelector{fdb.Key(sm.waitAndPop().item.([]byte)), int64ToBool(sm.waitAndPop().item.(int64)), int(sm.waitAndPop().item.(int64))}
|
||||
return sel
|
||||
}
|
||||
|
||||
func (sm *StackMachine) popKeyRange() fdb.KeyRange {
|
||||
kr := fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))}
|
||||
return kr
|
||||
}
|
||||
|
||||
func (sm *StackMachine) popRangeOptions() fdb.RangeOptions {
|
||||
ro := fdb.RangeOptions{Limit: int(sm.waitAndPop().item.(int64)), Reverse: int64ToBool(sm.waitAndPop().item.(int64)), Mode: fdb.StreamingMode(sm.waitAndPop().item.(int64) + 1)}
|
||||
return ro
|
||||
}
|
||||
|
||||
func (sm *StackMachine) popPrefixRange() fdb.ExactRange {
|
||||
er, e := fdb.PrefixRange(sm.waitAndPop().item.([]byte))
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return er
|
||||
}
|
||||
|
||||
func (sm *StackMachine) pushRange(idx int, sl []fdb.KeyValue, prefixFilter []byte) {
|
||||
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl) * 2)
|
||||
|
||||
for _, kv := range(sl) {
|
||||
if prefixFilter == nil || bytes.HasPrefix(kv.Key, prefixFilter) {
|
||||
t = append(t, kv.Key)
|
||||
t = append(t, kv.Value)
|
||||
}
|
||||
}
|
||||
|
||||
sm.store(idx, []byte(t.Pack()))
|
||||
}
|
||||
|
||||
func (sm *StackMachine) store(idx int, item interface{}) {
|
||||
sm.stack = append(sm.stack, stackEntry{item, idx})
|
||||
}
|
||||
|
||||
func (sm *StackMachine) dumpStack() {
|
||||
for i := len(sm.stack) - 1; i >= 0; i-- {
|
||||
fmt.Printf(" %d.", sm.stack[i].idx)
|
||||
el := sm.stack[i].item
|
||||
switch el := el.(type) {
|
||||
case int64:
|
||||
fmt.Printf(" %d", el)
|
||||
case fdb.FutureNil:
|
||||
fmt.Printf(" FutureNil")
|
||||
case fdb.FutureByteSlice:
|
||||
fmt.Printf(" FutureByteSlice")
|
||||
case fdb.FutureKey:
|
||||
fmt.Printf(" FutureKey")
|
||||
case []byte:
|
||||
fmt.Printf(" %+q", string(el))
|
||||
case fdb.Key:
|
||||
fmt.Printf(" %+q", string(el))
|
||||
case string:
|
||||
fmt.Printf(" %+q", el)
|
||||
case nil:
|
||||
fmt.Printf(" nil")
|
||||
default:
|
||||
log.Fatalf("Don't know how to dump stack element %v %T\n", el, el)
|
||||
}
|
||||
if i != 0 {
|
||||
fmt.Printf(",")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func (fdb.Transaction) (interface{}, error), isDB bool, idx int) {
|
||||
_, e := t.Transact(f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if isDB {
|
||||
sm.store(idx, []byte("RESULT_NOT_PRESENT"))
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) (bool) {
|
||||
for _, watch := range(watches) {
|
||||
if watch.IsReady() || expected {
|
||||
e := watch.Get()
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
case fdb.Error:
|
||||
tr, tr_error := db.CreateTransaction()
|
||||
if tr_error != nil {
|
||||
panic(tr_error)
|
||||
}
|
||||
tr.OnError(e).MustGet()
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
if !expected {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (sm *StackMachine) testWatches() {
|
||||
for {
|
||||
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(fdb.Key("w0"), []byte("0"))
|
||||
tr.Set(fdb.Key("w2"), []byte("2"))
|
||||
tr.Set(fdb.Key("w3"), []byte("3"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
var watches [4]fdb.FutureNil
|
||||
|
||||
_, e = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
watches[0] = tr.Watch(fdb.Key("w0"))
|
||||
watches[1] = tr.Watch(fdb.Key("w1"))
|
||||
watches[2] = tr.Watch(fdb.Key("w2"))
|
||||
watches[3] = tr.Watch(fdb.Key("w3"))
|
||||
|
||||
tr.Set(fdb.Key("w0"), []byte("0"))
|
||||
tr.Clear(fdb.Key("w1"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if !sm.checkWatches(watches, false) {
|
||||
continue
|
||||
}
|
||||
|
||||
_, e = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(fdb.Key("w0"), []byte("a"))
|
||||
tr.Set(fdb.Key("w1"), []byte("b"))
|
||||
tr.Clear(fdb.Key("w2"))
|
||||
tr.BitXor(fdb.Key("w3"), []byte("\xff\xff"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
if sm.checkWatches(watches, true) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) testLocality() {
|
||||
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Options().SetTimeout(60*1000)
|
||||
tr.Options().SetReadSystemKeys()
|
||||
boundaryKeys, e := db.LocalityGetBoundaryKeys(fdb.KeyRange{fdb.Key(""), fdb.Key("\xff\xff")}, 0, 0)
|
||||
if e != nil { panic(e) }
|
||||
|
||||
for i:=0; i<len(boundaryKeys)-1 ; i++ {
|
||||
start := boundaryKeys[i]
|
||||
end := tr.GetKey(fdb.LastLessThan(boundaryKeys[i+1])).MustGet()
|
||||
|
||||
startAddresses := tr.LocalityGetAddressesForKey(start).MustGet()
|
||||
endAddresses := tr.LocalityGetAddressesForKey(end).MustGet()
|
||||
|
||||
for _, address1 := range(startAddresses) {
|
||||
found := false
|
||||
for _, address2 := range(endAddresses) {
|
||||
if address1 == address2 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
panic("Locality not internally consistent.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
}
|
||||
|
||||
func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
|
||||
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
for index, el := range entries {
|
||||
var keyt tuple.Tuple
|
||||
keyt = append(keyt, int64(index))
|
||||
keyt = append(keyt, int64(el.idx))
|
||||
pk := append(prefix, keyt.Pack()...)
|
||||
|
||||
var valt tuple.Tuple
|
||||
valt = append(valt, el.item)
|
||||
pv := valt.Pack()
|
||||
|
||||
vl := 40000
|
||||
if len(pv) < vl {
|
||||
vl = len(pv)
|
||||
}
|
||||
|
||||
tr.Set(fdb.Key(pk), pv[:vl])
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
return
|
||||
}
|
||||
|
||||
func (sm *StackMachine) currentTransaction() fdb.Transaction {
|
||||
trMapLock.RLock()
|
||||
tr := trMap[sm.trName]
|
||||
trMapLock.RUnlock()
|
||||
|
||||
return tr
|
||||
}
|
||||
|
||||
func (sm *StackMachine) newTransactionWithLockHeld() {
|
||||
tr, e := db.CreateTransaction()
|
||||
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
trMap[sm.trName] = tr
|
||||
}
|
||||
|
||||
func (sm *StackMachine) newTransaction() {
|
||||
trMapLock.Lock()
|
||||
sm.newTransactionWithLockHeld()
|
||||
trMapLock.Unlock()
|
||||
}
|
||||
|
||||
func (sm *StackMachine) switchTransaction(name []byte) {
|
||||
sm.trName = string(name[:])
|
||||
trMapLock.RLock()
|
||||
_, present := trMap[sm.trName]
|
||||
trMapLock.RUnlock()
|
||||
if !present {
|
||||
trMapLock.Lock()
|
||||
|
||||
_, present = trMap[sm.trName]
|
||||
if !present {
|
||||
sm.newTransactionWithLockHeld()
|
||||
}
|
||||
|
||||
trMapLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch r := r.(type) {
|
||||
case fdb.Error:
|
||||
sm.store(idx, []byte(tuple.Tuple{[]byte("ERROR"), []byte(fmt.Sprintf("%d", r.Code))}.Pack()))
|
||||
default:
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var e error
|
||||
|
||||
op := inst[0].(string)
|
||||
if sm.verbose {
|
||||
fmt.Printf("%d. Instruction is %s (%v)\n", idx, op, sm.prefix)
|
||||
fmt.Printf("Stack from [")
|
||||
sm.dumpStack()
|
||||
fmt.Printf(" ] (%d)\n", len(sm.stack))
|
||||
}
|
||||
|
||||
var t fdb.Transactor
|
||||
var rt fdb.ReadTransactor
|
||||
|
||||
var isDB bool
|
||||
|
||||
switch {
|
||||
case strings.HasSuffix(op, "_SNAPSHOT"):
|
||||
rt = sm.currentTransaction().Snapshot()
|
||||
op = op[:len(op)-9]
|
||||
case strings.HasSuffix(op, "_DATABASE"):
|
||||
t = db
|
||||
rt = db
|
||||
op = op[:len(op)-9]
|
||||
isDB = true
|
||||
default:
|
||||
t = sm.currentTransaction()
|
||||
rt = sm.currentTransaction()
|
||||
}
|
||||
|
||||
switch {
|
||||
case op == "PUSH":
|
||||
sm.store(idx, inst[1])
|
||||
case op == "DUP":
|
||||
entry := sm.stack[len(sm.stack) - 1]
|
||||
sm.store(entry.idx, entry.item)
|
||||
case op == "EMPTY_STACK":
|
||||
sm.stack = []stackEntry{}
|
||||
sm.stack = make([]stackEntry, 0)
|
||||
case op == "SWAP":
|
||||
idx := sm.waitAndPop().item.(int64)
|
||||
sm.stack[len(sm.stack) - 1], sm.stack[len(sm.stack) - 1 - int(idx)] = sm.stack[len(sm.stack) - 1 - int(idx)], sm.stack[len(sm.stack) - 1]
|
||||
case op == "POP":
|
||||
sm.stack = sm.stack[:len(sm.stack) - 1]
|
||||
case op == "SUB":
|
||||
sm.store(idx, sm.waitAndPop().item.(int64) - sm.waitAndPop().item.(int64))
|
||||
case op == "CONCAT":
|
||||
str1 := sm.waitAndPop().item
|
||||
str2 := sm.waitAndPop().item
|
||||
switch str1.(type) {
|
||||
case string:
|
||||
sm.store(idx, str1.(string) + str2.(string))
|
||||
case []byte:
|
||||
sm.store(idx, append(str1.([]byte), str2.([]byte)...))
|
||||
default:
|
||||
panic("Invalid CONCAT parameter")
|
||||
}
|
||||
case op == "NEW_TRANSACTION":
|
||||
sm.newTransaction()
|
||||
case op == "USE_TRANSACTION":
|
||||
sm.switchTransaction(sm.waitAndPop().item.([]byte))
|
||||
case op == "ON_ERROR":
|
||||
sm.store(idx, sm.currentTransaction().OnError(fdb.Error{int(sm.waitAndPop().item.(int64))}))
|
||||
case op == "GET_READ_VERSION":
|
||||
_, e = rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
sm.lastVersion = rtr.GetReadVersion().MustGet()
|
||||
sm.store(idx, []byte("GOT_READ_VERSION"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
case op == "SET":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
value := sm.waitAndPop().item.([]byte)
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(key, value)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
case op == "LOG_STACK":
|
||||
prefix := sm.waitAndPop().item.([]byte)
|
||||
|
||||
entries := make(map[int]stackEntry)
|
||||
for len(sm.stack) > 0 {
|
||||
entries[len(sm.stack)-1] = sm.waitAndPop()
|
||||
if len(entries) == 100 {
|
||||
sm.logStack(entries, prefix)
|
||||
entries = make(map[int]stackEntry)
|
||||
}
|
||||
}
|
||||
|
||||
sm.logStack(entries, prefix)
|
||||
case op == "GET":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.Get(key), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
sm.store(idx, res.(fdb.FutureByteSlice))
|
||||
case op == "COMMIT":
|
||||
sm.store(idx, sm.currentTransaction().Commit())
|
||||
case op == "RESET":
|
||||
sm.currentTransaction().Reset()
|
||||
case op == "CLEAR":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Clear(key)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
case op == "SET_READ_VERSION":
|
||||
sm.currentTransaction().SetReadVersion(sm.lastVersion)
|
||||
case op == "WAIT_FUTURE":
|
||||
entry := sm.waitAndPop()
|
||||
sm.store(entry.idx, entry.item)
|
||||
case op == "GET_COMMITTED_VERSION":
|
||||
sm.lastVersion, e = sm.currentTransaction().GetCommittedVersion()
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, []byte("GOT_COMMITTED_VERSION"))
|
||||
case op == "GET_VERSIONSTAMP":
|
||||
sm.store(idx, sm.currentTransaction().GetVersionstamp())
|
||||
case op == "GET_KEY":
|
||||
sel := sm.popSelector()
|
||||
prefix := sm.waitAndPop().item.([]byte)
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.GetKey(sel).MustGet(), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
key := res.(fdb.Key)
|
||||
|
||||
if bytes.HasPrefix(key, prefix) {
|
||||
sm.store(idx, key)
|
||||
} else if bytes.Compare(key, prefix) < 0 {
|
||||
sm.store(idx, prefix)
|
||||
} else {
|
||||
s, e := fdb.Strinc(prefix)
|
||||
if e != nil { panic(e) }
|
||||
sm.store(idx, s)
|
||||
}
|
||||
case strings.HasPrefix(op, "GET_RANGE"):
|
||||
var r fdb.Range
|
||||
|
||||
switch op[9:] {
|
||||
case "_STARTS_WITH":
|
||||
r = sm.popPrefixRange()
|
||||
case "_SELECTOR":
|
||||
r = fdb.SelectorRange{sm.popSelector(), sm.popSelector()}
|
||||
case "":
|
||||
r = sm.popKeyRange()
|
||||
}
|
||||
|
||||
ro := sm.popRangeOptions()
|
||||
var prefix []byte = nil
|
||||
if op[9:] == "_SELECTOR" {
|
||||
prefix = sm.waitAndPop().item.([]byte)
|
||||
}
|
||||
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.GetRange(r, ro).GetSliceOrPanic(), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
|
||||
sm.pushRange(idx, res.([]fdb.KeyValue), prefix)
|
||||
case strings.HasPrefix(op, "CLEAR_RANGE"):
|
||||
var er fdb.ExactRange
|
||||
|
||||
switch op[11:] {
|
||||
case "_STARTS_WITH":
|
||||
er = sm.popPrefixRange()
|
||||
case "":
|
||||
er = sm.popKeyRange()
|
||||
}
|
||||
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.ClearRange(er)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
case op == "TUPLE_PACK":
|
||||
var t tuple.Tuple
|
||||
count := sm.waitAndPop().item.(int64)
|
||||
for i := 0; i < int(count); i++ {
|
||||
t = append(t, sm.waitAndPop().item)
|
||||
}
|
||||
sm.store(idx, []byte(t.Pack()))
|
||||
case op == "TUPLE_UNPACK":
|
||||
t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
for _, el := range(t) {
|
||||
sm.store(idx, []byte(tuple.Tuple{el}.Pack()))
|
||||
}
|
||||
case op == "TUPLE_RANGE":
|
||||
var t tuple.Tuple
|
||||
count := sm.waitAndPop().item.(int64)
|
||||
for i := 0; i < int(count); i++ {
|
||||
t = append(t, sm.waitAndPop().item)
|
||||
}
|
||||
bk, ek := t.FDBRangeKeys()
|
||||
sm.store(idx, []byte(bk.FDBKey()))
|
||||
sm.store(idx, []byte(ek.FDBKey()))
|
||||
case op == "START_THREAD":
|
||||
newsm := newStackMachine(sm.waitAndPop().item.([]byte), verbose)
|
||||
sm.threads.Add(1)
|
||||
go func() {
|
||||
newsm.Run()
|
||||
sm.threads.Done()
|
||||
}()
|
||||
case op == "WAIT_EMPTY":
|
||||
prefix := sm.waitAndPop().item.([]byte)
|
||||
er, e := fdb.PrefixRange(prefix)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
db.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
v := tr.GetRange(er, fdb.RangeOptions{}).GetSliceOrPanic()
|
||||
if len(v) != 0 {
|
||||
panic(fdb.Error{1020})
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
sm.store(idx, []byte("WAITED_FOR_EMPTY"))
|
||||
case op == "READ_CONFLICT_RANGE":
|
||||
e = sm.currentTransaction().AddReadConflictRange(fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))})
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, []byte("SET_CONFLICT_RANGE"))
|
||||
case op == "WRITE_CONFLICT_RANGE":
|
||||
e = sm.currentTransaction().AddWriteConflictRange(fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))})
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, []byte("SET_CONFLICT_RANGE"))
|
||||
case op == "READ_CONFLICT_KEY":
|
||||
e = sm.currentTransaction().AddReadConflictKey(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, []byte("SET_CONFLICT_KEY"))
|
||||
case op == "WRITE_CONFLICT_KEY":
|
||||
e = sm.currentTransaction().AddWriteConflictKey(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, []byte("SET_CONFLICT_KEY"))
|
||||
case op == "ATOMIC_OP":
|
||||
opname := strings.Replace(strings.Title(strings.Replace(strings.ToLower(sm.waitAndPop().item.(string)), "_", " ", -1)), " ", "", -1)
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
value := sm.waitAndPop().item.([]byte)
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
reflect.ValueOf(tr).MethodByName(opname).Call([]reflect.Value{reflect.ValueOf(key), reflect.ValueOf(value)})
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
case op == "DISABLE_WRITE_CONFLICT":
|
||||
sm.currentTransaction().Options().SetNextWriteNoWriteConflictRange()
|
||||
case op == "CANCEL":
|
||||
sm.currentTransaction().Cancel()
|
||||
case op == "UNIT_TESTS":
|
||||
db.Options().SetLocationCacheSize(100001)
|
||||
db.Options().SetMaxWatches(10001)
|
||||
|
||||
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Options().SetPrioritySystemImmediate()
|
||||
tr.Options().SetPriorityBatch()
|
||||
tr.Options().SetCausalReadRisky()
|
||||
tr.Options().SetCausalWriteRisky()
|
||||
tr.Options().SetReadYourWritesDisable()
|
||||
tr.Options().SetReadAheadDisable()
|
||||
tr.Options().SetReadSystemKeys()
|
||||
tr.Options().SetAccessSystemKeys()
|
||||
tr.Options().SetDurabilityDevNullIsWebScale()
|
||||
tr.Options().SetTimeout(60*1000)
|
||||
tr.Options().SetRetryLimit(50)
|
||||
tr.Options().SetMaxRetryDelay(100)
|
||||
tr.Options().SetUsedDuringCommitProtectionDisable()
|
||||
tr.Options().SetTransactionLoggingEnable("my_transaction")
|
||||
|
||||
return tr.Get(fdb.Key("\xff")).MustGet(), nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
|
||||
sm.testWatches()
|
||||
sm.testLocality()
|
||||
|
||||
case strings.HasPrefix(op, "DIRECTORY_"):
|
||||
sm.de.processOp(sm, op[10:], isDB, idx, t, rt)
|
||||
default:
|
||||
log.Fatalf("Unhandled operation %s\n", string(inst[0].([]byte)))
|
||||
}
|
||||
|
||||
if sm.verbose {
|
||||
fmt.Printf(" to [")
|
||||
sm.dumpStack()
|
||||
fmt.Printf(" ] (%d)\n\n", len(sm.stack))
|
||||
}
|
||||
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
func (sm *StackMachine) Run() {
|
||||
r, e := db.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
return tr.GetRange(tuple.Tuple{sm.prefix}, fdb.RangeOptions{}).GetSliceOrPanic(), nil
|
||||
})
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
instructions := r.([]fdb.KeyValue)
|
||||
|
||||
for i, kv := range(instructions) {
|
||||
inst, _ := tuple.Unpack(fdb.Key(kv.Value))
|
||||
|
||||
if sm.verbose {
|
||||
fmt.Printf("Instruction %d\n", i)
|
||||
}
|
||||
sm.processInst(i, inst)
|
||||
}
|
||||
|
||||
sm.threads.Wait()
|
||||
}
|
||||
|
||||
var db fdb.Database
|
||||
|
||||
func main() {
|
||||
var clusterFile string
|
||||
|
||||
prefix := []byte(os.Args[1])
|
||||
if len(os.Args) > 3 {
|
||||
clusterFile = os.Args[3]
|
||||
}
|
||||
|
||||
var e error
|
||||
var apiVersion int
|
||||
|
||||
apiVersion, e = strconv.Atoi(os.Args[2])
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
e = fdb.APIVersion(apiVersion)
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
db, e = fdb.Open(clusterFile, []byte("DB"))
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
sm := newStackMachine(prefix, verbose)
|
||||
|
||||
sm.Run()
|
||||
}
|
|
@ -0,0 +1,228 @@
|
|||
/*
|
||||
* translate_fdb_options.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go options translator
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
"go/doc"
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Code int `xml:"code,attr"`
|
||||
ParamType string `xml:"paramType,attr"`
|
||||
ParamDesc string `xml:"paramDescription,attr"`
|
||||
Description string `xml:"description,attr"`
|
||||
Hidden bool `xml:"hidden,attr"`
|
||||
}
|
||||
type Scope struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Option []Option
|
||||
}
|
||||
type Options struct {
|
||||
Scope []Scope
|
||||
}
|
||||
|
||||
func writeOptString(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param string) error {
|
||||
return o.setOpt(%d, []byte(param))
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptBytes(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param []byte) error {
|
||||
return o.setOpt(%d, param)
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptInt(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param int64) error {
|
||||
b, e := int64ToBytes(param)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return o.setOpt(%d, b)
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptNone(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s() error {
|
||||
return o.setOpt(%d, nil)
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOpt(receiver string, opt Option) {
|
||||
function := "Set" + translateName(opt.Name)
|
||||
|
||||
fmt.Println()
|
||||
|
||||
if opt.Description != "" {
|
||||
fmt.Printf("// %s\n", opt.Description)
|
||||
if opt.ParamDesc != "" {
|
||||
fmt.Printf("//\n// Parameter: %s\n", opt.ParamDesc)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("// Not yet implemented.\n")
|
||||
}
|
||||
|
||||
switch opt.ParamType {
|
||||
case "String":
|
||||
writeOptString(receiver, function, opt)
|
||||
case "Bytes":
|
||||
writeOptBytes(receiver, function, opt)
|
||||
case "Int":
|
||||
writeOptInt(receiver, function, opt)
|
||||
case "":
|
||||
writeOptNone(receiver, function, opt)
|
||||
default:
|
||||
log.Fatalf("Totally unexpected ParamType %s", opt.ParamType)
|
||||
}
|
||||
}
|
||||
|
||||
func translateName(old string) string {
|
||||
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
func lowerFirst (s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(unicode.ToLower(r)) + s[n:]
|
||||
}
|
||||
|
||||
func writeMutation(opt Option) {
|
||||
desc := lowerFirst(opt.Description)
|
||||
tname := translateName(opt.Name)
|
||||
fmt.Printf(`
|
||||
// %s %s
|
||||
func (t Transaction) %s(key KeyConvertible, param []byte) {
|
||||
t.atomicOp(key.FDBKey(), param, %d)
|
||||
}
|
||||
`, tname, desc, tname, opt.Code)
|
||||
}
|
||||
|
||||
func writeEnum(scope Scope, opt Option, delta int) {
|
||||
fmt.Println()
|
||||
if opt.Description != "" {
|
||||
doc.ToText(os.Stdout, opt.Description, " // ", "", 73)
|
||||
// fmt.Printf(" // %s\n", opt.Description)
|
||||
}
|
||||
fmt.Printf(" %s %s = %d\n", scope.Name + translateName(opt.Name), scope.Name, opt.Code + delta)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
|
||||
v := Options{}
|
||||
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = xml.Unmarshal(data, &v)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Print(`// DO NOT EDIT THIS FILE BY HAND. This file was generated using
|
||||
// translate_fdb_options.go, part of the fdb-go repository, and a copy of the
|
||||
// fdb.options file (installed as part of the FoundationDB client, typically
|
||||
// found as /usr/include/foundationdb/fdb.options).
|
||||
|
||||
// To regenerate this file, from the top level of an fdb-go repository checkout,
|
||||
// run:
|
||||
// $ go run _util/translate_fdb_options.go < /usr/include/foundationdb/fdb.options > fdb/generated.go
|
||||
|
||||
package fdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
func int64ToBytes(i int64) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if e := binary.Write(buf, binary.LittleEndian, i); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
`)
|
||||
|
||||
for _, scope := range(v.Scope) {
|
||||
if strings.HasSuffix(scope.Name, "Option") {
|
||||
receiver := scope.Name + "s"
|
||||
|
||||
for _, opt := range(scope.Option) {
|
||||
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
|
||||
writeOpt(receiver, opt)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if scope.Name == "MutationType" {
|
||||
for _, opt := range(scope.Option) {
|
||||
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
|
||||
writeMutation(opt)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// We really need the default StreamingMode (0) to be ITERATOR
|
||||
var d int
|
||||
if scope.Name == "StreamingMode" {
|
||||
d = 1
|
||||
}
|
||||
|
||||
// ConflictRangeType shouldn't be exported
|
||||
if scope.Name == "ConflictRangeType" {
|
||||
scope.Name = "conflictRangeType"
|
||||
}
|
||||
|
||||
fmt.Printf(`
|
||||
type %s int
|
||||
const (
|
||||
`, scope.Name)
|
||||
for _, opt := range(scope.Option) {
|
||||
if !opt.Hidden {
|
||||
writeEnum(scope, opt, d)
|
||||
}
|
||||
}
|
||||
fmt.Println(")")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* cluster.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Cluster is a handle to a FoundationDB cluster. Cluster is a lightweight
|
||||
// object that may be efficiently copied, and is safe for concurrent use by
|
||||
// multiple goroutines.
|
||||
//
|
||||
// It is generally preferable to use Open or OpenDefault to obtain a database
|
||||
// handle directly.
|
||||
type Cluster struct {
|
||||
*cluster
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
ptr *C.FDBCluster
|
||||
}
|
||||
|
||||
func (c *cluster) destroy() {
|
||||
C.fdb_cluster_destroy(c.ptr)
|
||||
}
|
||||
|
||||
// OpenDatabase returns a database handle from the FoundationDB cluster. It is
|
||||
// generally preferable to use Open or OpenDefault to obtain a database handle
|
||||
// directly.
|
||||
//
|
||||
// In the current release, the database name must be []byte("DB").
|
||||
func (c Cluster) OpenDatabase(dbName []byte) (Database, error) {
|
||||
f := C.fdb_cluster_create_database(c.ptr, byteSliceToPtr(dbName), C.int(len(dbName)))
|
||||
fdb_future_block_until_ready(f)
|
||||
|
||||
var outd *C.FDBDatabase
|
||||
|
||||
if err := C.fdb_future_get_database(f, &outd); err != 0 {
|
||||
return Database{}, Error{int(err)}
|
||||
}
|
||||
|
||||
C.fdb_future_destroy(f)
|
||||
|
||||
d := &database{outd}
|
||||
runtime.SetFinalizer(d, (*database).destroy)
|
||||
|
||||
return Database{d}, nil
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
/*
|
||||
* database.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Database is a handle to a FoundationDB database. Database is a lightweight
|
||||
// object that may be efficiently copied, and is safe for concurrent use by
|
||||
// multiple goroutines.
|
||||
//
|
||||
// Although Database provides convenience methods for reading and writing data,
|
||||
// modifications to a database are usually made via transactions, which are
|
||||
// usually created and committed automatically by the (Database).Transact
|
||||
// method.
|
||||
type Database struct {
|
||||
*database
|
||||
}
|
||||
|
||||
type database struct {
|
||||
ptr *C.FDBDatabase
|
||||
}
|
||||
|
||||
// DatabaseOptions is a handle with which to set options that affect a Database
|
||||
// object. A DatabaseOptions instance should be obtained with the
|
||||
// (Database).Options method.
|
||||
type DatabaseOptions struct {
|
||||
d *database
|
||||
}
|
||||
|
||||
func (opt DatabaseOptions) setOpt(code int, param []byte) error {
|
||||
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
|
||||
return C.fdb_database_set_option(opt.d.ptr, C.FDBDatabaseOption(code), p, pl)
|
||||
}, param)
|
||||
}
|
||||
|
||||
func (d *database) destroy() {
|
||||
C.fdb_database_destroy(d.ptr)
|
||||
}
|
||||
|
||||
// CreateTransaction returns a new FoundationDB transaction. It is generally
|
||||
// preferable to use the (Database).Transact method, which handles
|
||||
// automatically creating and committing a transaction with appropriate retry
|
||||
// behavior.
|
||||
func (d Database) CreateTransaction() (Transaction, error) {
|
||||
var outt *C.FDBTransaction
|
||||
|
||||
if err := C.fdb_database_create_transaction(d.ptr, &outt); err != 0 {
|
||||
return Transaction{}, Error{int(err)}
|
||||
}
|
||||
|
||||
t := &transaction{outt, d}
|
||||
runtime.SetFinalizer(t, (*transaction).destroy)
|
||||
|
||||
return Transaction{t}, nil
|
||||
}
|
||||
|
||||
func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNil) (ret interface{}, e error) {
|
||||
for {
|
||||
ret, e = wrapped()
|
||||
|
||||
/* No error means success! */
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ep, ok := e.(Error)
|
||||
if ok {
|
||||
e = onError(ep).Get()
|
||||
}
|
||||
|
||||
/* If OnError returns an error, then it's not
|
||||
/* retryable; otherwise take another pass at things */
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Transact runs a caller-provided function inside a retry loop, providing it
|
||||
// with a newly created Transaction. After the function returns, the Transaction
|
||||
// will be committed automatically. Any error during execution of the function
|
||||
// (by panic or return) or the commit will cause the function and commit to be
|
||||
// retried or, if fatal, return the error to the caller.
|
||||
//
|
||||
// When working with Future objects in a transactional function, you may either
|
||||
// explicity check and return error values using Get, or call MustGet. Transact
|
||||
// will recover a panicked Error and either retry the transaction or return the
|
||||
// error.
|
||||
//
|
||||
// Do not return Future objects from the function provided to Transact. The
|
||||
// Transaction created by Transact may be finalized at any point after Transact
|
||||
// returns, resulting in the cancellation of any outstanding
|
||||
// reads. Additionally, any errors returned or panicked by the Future will no
|
||||
// longer be able to trigger a retry of the caller-provided function.
|
||||
//
|
||||
// See the Transactor interface for an example of using Transact with
|
||||
// Transaction and Database objects.
|
||||
func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
wrapped := func() (ret interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
ret, e = f(tr)
|
||||
|
||||
if e == nil {
|
||||
e = tr.Commit().Get()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
return retryable(wrapped, tr.OnError)
|
||||
}
|
||||
|
||||
// ReadTransact runs a caller-provided function inside a retry loop, providing
|
||||
// it with a newly created Transaction (as a ReadTransaction). Any error during
|
||||
// execution of the function (by panic or return) will cause the function to be
|
||||
// retried or, if fatal, return the error to the caller.
|
||||
//
|
||||
// When working with Future objects in a read-only transactional function, you
|
||||
// may either explicity check and return error values using Get, or call
|
||||
// MustGet. ReadTransact will recover a panicked Error and either retry the
|
||||
// transaction or return the error.
|
||||
//
|
||||
// Do not return Future objects from the function provided to ReadTransact. The
|
||||
// Transaction created by ReadTransact may be finalized at any point after
|
||||
// ReadTransact returns, resulting in the cancellation of any outstanding
|
||||
// reads. Additionally, any errors returned or panicked by the Future will no
|
||||
// longer be able to trigger a retry of the caller-provided function.
|
||||
//
|
||||
// See the ReadTransactor interface for an example of using ReadTransact with
|
||||
// Transaction, Snapshot and Database objects.
|
||||
func (d Database) ReadTransact(f func(ReadTransaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
wrapped := func() (ret interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
ret, e = f(tr)
|
||||
|
||||
if e == nil {
|
||||
e = tr.Commit().Get()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
return retryable(wrapped, tr.OnError)
|
||||
}
|
||||
|
||||
// Options returns a DatabaseOptions instance suitable for setting options
|
||||
// specific to this database.
|
||||
func (d Database) Options() DatabaseOptions {
|
||||
return DatabaseOptions{d.database}
|
||||
}
|
||||
|
||||
// LocalityGetBoundaryKeys returns a slice of keys that fall within the provided
|
||||
// range. Each key is located at the start of a contiguous range stored on a
|
||||
// single server.
|
||||
//
|
||||
// If limit is non-zero, only the first limit keys will be returned. In large
|
||||
// databases, the number of boundary keys may be large. In these cases, a
|
||||
// non-zero limit should be used, along with multiple calls to
|
||||
// LocalityGetBoundaryKeys.
|
||||
//
|
||||
// If readVersion is non-zero, the boundary keys as of readVersion will be
|
||||
// returned.
|
||||
func (d Database) LocalityGetBoundaryKeys(er ExactRange, limit int, readVersion int64) ([]Key, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
if readVersion != 0 {
|
||||
tr.SetReadVersion(readVersion)
|
||||
}
|
||||
|
||||
tr.Options().SetReadSystemKeys()
|
||||
tr.Options().SetLockAware()
|
||||
|
||||
bk, ek := er.FDBRangeKeys()
|
||||
ffer := KeyRange{append(Key("\xFF/keyServers/"), bk.FDBKey()...), append(Key("\xFF/keyServers/"), ek.FDBKey()...)}
|
||||
|
||||
kvs, e := tr.Snapshot().GetRange(ffer, RangeOptions{Limit: limit}).GetSliceWithError()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
size := len(kvs)
|
||||
if limit != 0 && limit < size {
|
||||
size = limit
|
||||
}
|
||||
|
||||
boundaries := make([]Key, size)
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
boundaries[i] = kvs[i].Key[13:]
|
||||
}
|
||||
|
||||
return boundaries, nil
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* allocator.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var oneBytes = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
var allocatorMutex = sync.Mutex{}
|
||||
|
||||
type highContentionAllocator struct {
|
||||
counters, recent subspace.Subspace
|
||||
}
|
||||
|
||||
func newHCA(s subspace.Subspace) highContentionAllocator {
|
||||
var hca highContentionAllocator
|
||||
|
||||
hca.counters = s.Sub(0)
|
||||
hca.recent = s.Sub(1)
|
||||
|
||||
return hca
|
||||
}
|
||||
|
||||
func windowSize(start int64) int64 {
|
||||
// Larger window sizes are better for high contention, smaller sizes for
|
||||
// keeping the keys small. But if there are many allocations, the keys
|
||||
// can't be too small. So start small and scale up. We don't want this to
|
||||
// ever get *too* big because we have to store about window_size/2 recent
|
||||
// items.
|
||||
if start < 255 { return 64 }
|
||||
if start < 65535 { return 1024 }
|
||||
return 8192
|
||||
}
|
||||
|
||||
func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subspace) (subspace.Subspace, error) {
|
||||
for {
|
||||
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
|
||||
kvs := rr.GetSliceOrPanic()
|
||||
|
||||
var start int64
|
||||
var window int64
|
||||
|
||||
if len(kvs) == 1 {
|
||||
t, e := hca.counters.Unpack(kvs[0].Key)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
start = t[0].(int64)
|
||||
}
|
||||
|
||||
windowAdvanced := false
|
||||
for {
|
||||
allocatorMutex.Lock()
|
||||
|
||||
if windowAdvanced {
|
||||
tr.ClearRange(fdb.KeyRange{hca.counters, hca.counters.Sub(start)})
|
||||
tr.Options().SetNextWriteNoWriteConflictRange()
|
||||
tr.ClearRange(fdb.KeyRange{hca.recent, hca.recent.Sub(start)})
|
||||
}
|
||||
|
||||
// Increment the allocation count for the current window
|
||||
tr.Add(hca.counters.Sub(start), oneBytes)
|
||||
countFuture := tr.Snapshot().Get(hca.counters.Sub(start))
|
||||
|
||||
allocatorMutex.Unlock()
|
||||
|
||||
countStr, e := countFuture.Get()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
var count int64
|
||||
if countStr == nil {
|
||||
count = 0
|
||||
} else {
|
||||
e = binary.Read(bytes.NewBuffer(countStr), binary.LittleEndian, &count)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
|
||||
window = windowSize(start)
|
||||
if count * 2 < window {
|
||||
break
|
||||
}
|
||||
|
||||
start += window
|
||||
windowAdvanced = true
|
||||
}
|
||||
|
||||
for {
|
||||
// As of the snapshot being read from, the window is less than half
|
||||
// full, so this should be expected to take 2 tries. Under high
|
||||
// contention (and when the window advances), there is an additional
|
||||
// subsequent risk of conflict for this transaction.
|
||||
candidate := rand.Int63n(window) + start
|
||||
key := hca.recent.Sub(candidate)
|
||||
|
||||
allocatorMutex.Lock()
|
||||
|
||||
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
|
||||
candidateValue := tr.Get(key)
|
||||
tr.Options().SetNextWriteNoWriteConflictRange()
|
||||
tr.Set(key, []byte(""))
|
||||
|
||||
allocatorMutex.Unlock()
|
||||
|
||||
kvs = latestCounter.GetSliceOrPanic()
|
||||
if len(kvs) > 0 {
|
||||
t, e := hca.counters.Unpack(kvs[0].Key)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
currentStart := t[0].(int64)
|
||||
if currentStart > start {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
v, e := candidateValue.Get()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if v == nil {
|
||||
tr.AddWriteConflictKey(key)
|
||||
return s.Sub(candidate), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* directory.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
// Package directory provides a tool for managing related subspaces. Directories
|
||||
// are a recommended approach for administering applications. Each application
|
||||
// should create or open at least one directory to manage its subspaces.
|
||||
//
|
||||
// For general guidance on directory usage, see the Directories section of the
|
||||
// Developer Guide
|
||||
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-directories).
|
||||
//
|
||||
// Directories are identified by hierarchical paths analogous to the paths in a
|
||||
// Unix-like file system. A path is represented as a slice of strings. Each
|
||||
// directory has an associated subspace used to store its content. The directory
|
||||
// layer maps each path to a short prefix used for the corresponding
|
||||
// subspace. In effect, directories provide a level of indirection for access to
|
||||
// subspaces.
|
||||
//
|
||||
// Directory operations are transactional. A byte slice layer option is used as
|
||||
// a metadata identifier when opening a directory.
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
_SUBDIRS int = 0
|
||||
|
||||
// []int32{1,0,0} by any other name
|
||||
_MAJORVERSION int32 = 1
|
||||
_MINORVERSION int32 = 0
|
||||
_MICROVERSION int32 = 0
|
||||
)
|
||||
|
||||
// Directory represents a subspace of keys in a FoundationDB database,
|
||||
// identified by a hierarchical path.
|
||||
type Directory interface {
|
||||
// CreateOrOpen opens the directory specified by path (relative to this
|
||||
// Directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace. If the directory does not exist, it is created
|
||||
// (creating parent directories if necessary).
|
||||
//
|
||||
// If the byte slice layer is specified and the directory is new, it is
|
||||
// recorded as the layer; if layer is specified and the directory already
|
||||
// exists, it is compared against the layer specified when the directory was
|
||||
// created, and an error is returned if they differ.
|
||||
CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error)
|
||||
|
||||
// Open opens the directory specified by path (relative to this Directory),
|
||||
// and returns the directory and its contents as a DirectorySubspace (or an
|
||||
// error if the directory does not exist).
|
||||
//
|
||||
// If the byte slice layer is specified, it is compared against the layer
|
||||
// specified when the directory was created, and an error is returned if
|
||||
// they differ.
|
||||
Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error)
|
||||
|
||||
// Create creates a directory specified by path (relative to this
|
||||
// Directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace (or an error if the directory already exists).
|
||||
//
|
||||
// If the byte slice layer is specified, it is recorded as the layer and
|
||||
// will be checked when opening the directory in the future.
|
||||
Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error)
|
||||
|
||||
// CreatePrefix behaves like Create, but uses a manually specified byte
|
||||
// slice prefix to physically store the contents of this directory, rather
|
||||
// than an automatically allocated prefix.
|
||||
//
|
||||
// If this Directory was created in a root directory that does not allow
|
||||
// manual prefixes, CreatePrefix will return an error. The default root
|
||||
// directory does not allow manual prefixes.
|
||||
CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error)
|
||||
|
||||
// Move moves the directory at oldPath to newPath (both relative to this
|
||||
// Directory), and returns the directory (at its new location) and its
|
||||
// contents as a DirectorySubspace. Move will return an error if a directory
|
||||
// does not exist at oldPath, a directory already exists at newPath, or the
|
||||
// parent directory of newPath does not exist.
|
||||
//
|
||||
// There is no effect on the physical prefix of the given directory or on
|
||||
// clients that already have the directory open.
|
||||
Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error)
|
||||
|
||||
// MoveTo moves this directory to newAbsolutePath (relative to the root
|
||||
// directory of this Directory), and returns the directory (at its new
|
||||
// location) and its contents as a DirectorySubspace. MoveTo will return an
|
||||
// error if a directory already exists at newAbsolutePath or the parent
|
||||
// directory of newAbsolutePath does not exist.
|
||||
//
|
||||
// There is no effect on the physical prefix of the given directory or on
|
||||
// clients that already have the directory open.
|
||||
MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error)
|
||||
|
||||
// Remove removes the directory at path (relative to this Directory), its
|
||||
// content, and all subdirectories. Remove returns true if a directory
|
||||
// existed at path and was removed, and false if no directory exists at
|
||||
// path.
|
||||
//
|
||||
// Note that clients that have already opened this directory might still
|
||||
// insert data into its contents after removal.
|
||||
Remove(t fdb.Transactor, path []string) (bool, error)
|
||||
|
||||
// Exists returns true if the directory at path (relative to this Directory)
|
||||
// exists, and false otherwise.
|
||||
Exists(rt fdb.ReadTransactor, path []string) (bool, error)
|
||||
|
||||
// List returns the names of the immediate subdirectories of the directory
|
||||
// at path (relative to this Directory) as a slice of strings. Each string
|
||||
// is the name of the last component of a subdirectory's path.
|
||||
List(rt fdb.ReadTransactor, path []string) ([]string, error)
|
||||
|
||||
// GetLayer returns the layer specified when this Directory was created.
|
||||
GetLayer() []byte
|
||||
|
||||
// GetPath returns the path with which this Directory was opened.
|
||||
GetPath() []string
|
||||
}
|
||||
|
||||
func stringsEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func moveTo(t fdb.Transactor, dl directoryLayer, path, newAbsolutePath []string) (DirectorySubspace, error) {
|
||||
partition_len := len(dl.path)
|
||||
|
||||
if !stringsEqual(newAbsolutePath[:partition_len], dl.path) {
|
||||
return nil, errors.New("cannot move between partitions")
|
||||
}
|
||||
|
||||
return dl.Move(t, path[partition_len:], newAbsolutePath[partition_len:])
|
||||
}
|
||||
|
||||
var root = NewDirectoryLayer(subspace.FromBytes([]byte{0xFE}), subspace.AllKeys(), false)
|
||||
|
||||
// CreateOrOpen opens the directory specified by path (resolved relative to the
|
||||
// default root directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace. If the directory does not exist, it is created (creating
|
||||
// parent directories if necessary).
|
||||
//
|
||||
// If the byte slice layer is specified and the directory is new, it is recorded
|
||||
// as the layer; if layer is specified and the directory already exists, it is
|
||||
// compared against the layer specified when the directory was created, and an
|
||||
// error is returned if they differ.
|
||||
func CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return root.CreateOrOpen(t, path, layer)
|
||||
}
|
||||
|
||||
// Open opens the directory specified by path (resolved relative to the default
|
||||
// root directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace (or an error if the directory does not exist).
|
||||
//
|
||||
// If the byte slice layer is specified, it is compared against the layer
|
||||
// specified when the directory was created, and an error is returned if they
|
||||
// differ.
|
||||
func Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return root.Open(rt, path, layer)
|
||||
}
|
||||
|
||||
// Create creates a directory specified by path (resolved relative to the
|
||||
// default root directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace (or an error if the directory already exists).
|
||||
//
|
||||
// If the byte slice layer is specified, it is recorded as the layer and will be
|
||||
// checked when opening the directory in the future.
|
||||
func Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return root.Create(t, path, layer)
|
||||
}
|
||||
|
||||
// Move moves the directory at oldPath to newPath (both resolved relative to the
|
||||
// default root directory), and returns the directory (at its new location) and
|
||||
// its contents as a DirectorySubspace. Move will return an error if a directory
|
||||
// does not exist at oldPath, a directory already exists at newPath, or the
|
||||
// parent directory of newPath does not exit.
|
||||
//
|
||||
// There is no effect on the physical prefix of the given directory or on
|
||||
// clients that already have the directory open.
|
||||
func Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
|
||||
return root.Move(t, oldPath, newPath)
|
||||
}
|
||||
|
||||
// Exists returns true if the directory at path (relative to the default root
|
||||
// directory) exists, and false otherwise.
|
||||
func Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
|
||||
return root.Exists(rt, path)
|
||||
}
|
||||
|
||||
// List returns the names of the immediate subdirectories of the default root
|
||||
// directory as a slice of strings. Each string is the name of the last
|
||||
// component of a subdirectory's path.
|
||||
func List(rt fdb.ReadTransactor, path []string) ([]string, error) {
|
||||
return root.List(rt, path)
|
||||
}
|
||||
|
||||
// Root returns the default root directory. Any attempt to move or remove the
|
||||
// root directory will return an error.
|
||||
//
|
||||
// The default root directory stores directory layer metadata in keys beginning
|
||||
// with 0xFE, and allocates newly created directories in (unused) prefixes
|
||||
// starting with 0x00 through 0xFD. This is appropriate for otherwise empty
|
||||
// databases, but may conflict with other formal or informal partitionings of
|
||||
// keyspace. If you already have other content in your database, you may wish to
|
||||
// use NewDirectoryLayer to construct a non-standard root directory to control
|
||||
// where metadata and keys are stored.
|
||||
//
|
||||
// As an alternative to Root, you may use the package-level functions
|
||||
// CreateOrOpen, Open, Create, CreatePrefix, Move, Exists and List to operate
|
||||
// directly on the default DirectoryLayer.
|
||||
func Root() Directory {
|
||||
return root
|
||||
}
|
|
@ -0,0 +1,589 @@
|
|||
/*
|
||||
* directoryLayer.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"fdb/tuple"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type directoryLayer struct {
|
||||
nodeSS subspace.Subspace
|
||||
contentSS subspace.Subspace
|
||||
|
||||
allowManualPrefixes bool
|
||||
|
||||
allocator highContentionAllocator
|
||||
rootNode subspace.Subspace
|
||||
|
||||
path []string
|
||||
}
|
||||
|
||||
// NewDirectoryLayer returns a new root directory (as a Directory). The
|
||||
// subspaces nodeSS and contentSS control where the directory metadata and
|
||||
// contents are stored. The default root directory has a nodeSS of
|
||||
// subspace.FromBytes([]byte{0xFE}) and a contentSS of
|
||||
// subspace.AllKeys(). Specifying more restrictive values for nodeSS and
|
||||
// contentSS will allow using the directory layer alongside other content in a
|
||||
// database.
|
||||
//
|
||||
// If allowManualPrefixes is false, all calls to CreatePrefix on the returned
|
||||
// Directory (or any subdirectories) will fail, and all directory prefixes will
|
||||
// be automatically allocated. The default root directory does not allow manual
|
||||
// prefixes.
|
||||
func NewDirectoryLayer(nodeSS, contentSS subspace.Subspace, allowManualPrefixes bool) Directory {
|
||||
var dl directoryLayer
|
||||
|
||||
dl.nodeSS = subspace.FromBytes(nodeSS.Bytes())
|
||||
dl.contentSS = subspace.FromBytes(contentSS.Bytes())
|
||||
|
||||
dl.allowManualPrefixes = allowManualPrefixes
|
||||
|
||||
dl.rootNode = dl.nodeSS.Sub(dl.nodeSS.Bytes())
|
||||
dl.allocator = newHCA(dl.rootNode.Sub([]byte("hca")))
|
||||
|
||||
return dl
|
||||
}
|
||||
|
||||
func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transaction, path []string, layer []byte, prefix []byte, allowCreate, allowOpen bool) (DirectorySubspace, error) {
|
||||
if e := dl.checkVersion(rtr, nil); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
if prefix != nil && !dl.allowManualPrefixes {
|
||||
if len(dl.path) == 0 {
|
||||
return nil, errors.New("cannot specify a prefix unless manual prefixes are enabled")
|
||||
} else {
|
||||
return nil, errors.New("cannot specify a prefix in a partition")
|
||||
}
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
return nil, errors.New("the root directory cannot be opened")
|
||||
}
|
||||
|
||||
existingNode := dl.find(rtr, path).prefetchMetadata(rtr)
|
||||
if existingNode.exists() {
|
||||
if existingNode.isInPartition(nil, false) {
|
||||
subpath := existingNode.getPartitionSubpath()
|
||||
enc, e := existingNode.getContents(dl, nil)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return enc.(directoryPartition).createOrOpen(rtr, tr, subpath, layer, prefix, allowCreate, allowOpen)
|
||||
}
|
||||
|
||||
if !allowOpen {
|
||||
return nil, errors.New("the directory already exists")
|
||||
}
|
||||
|
||||
if layer != nil && bytes.Compare(existingNode._layer.MustGet(), layer) != 0 {
|
||||
return nil, errors.New("the directory was created with an incompatible layer")
|
||||
}
|
||||
|
||||
return existingNode.getContents(dl, nil)
|
||||
}
|
||||
|
||||
if !allowCreate {
|
||||
return nil, errors.New("the directory does not exist")
|
||||
}
|
||||
|
||||
if e := dl.checkVersion(rtr, tr); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
if prefix == nil {
|
||||
newss, e := dl.allocator.allocate(*tr, dl.contentSS)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("unable to allocate new directory prefix (%s)", e.Error())
|
||||
}
|
||||
|
||||
if !isRangeEmpty(rtr, newss) {
|
||||
return nil, fmt.Errorf("the database has keys stored at the prefix chosen by the automatic prefix allocator: %v", prefix)
|
||||
}
|
||||
|
||||
prefix = newss.Bytes()
|
||||
|
||||
pf, e := dl.isPrefixFree(rtr.Snapshot(), prefix)
|
||||
if e != nil { return nil, e }
|
||||
if !pf {
|
||||
return nil, errors.New("the directory layer has manually allocated prefixes that conflict with the automatic prefix allocator")
|
||||
}
|
||||
} else {
|
||||
pf, e := dl.isPrefixFree(rtr, prefix)
|
||||
if e != nil { return nil, e }
|
||||
if !pf {
|
||||
return nil, errors.New("the given prefix is already in use")
|
||||
}
|
||||
}
|
||||
|
||||
var parentNode subspace.Subspace
|
||||
|
||||
if len(path) > 1 {
|
||||
pd, e := dl.createOrOpen(rtr, tr, path[:len(path)-1], nil, nil, true, true)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
parentNode = dl.nodeWithPrefix(pd.Bytes())
|
||||
} else {
|
||||
parentNode = dl.rootNode
|
||||
}
|
||||
|
||||
if parentNode == nil {
|
||||
return nil, errors.New("the parent directory does not exist")
|
||||
}
|
||||
|
||||
node := dl.nodeWithPrefix(prefix)
|
||||
tr.Set(parentNode.Sub(_SUBDIRS, path[len(path)-1]), prefix)
|
||||
|
||||
if layer == nil {
|
||||
layer = []byte{}
|
||||
}
|
||||
|
||||
tr.Set(node.Sub([]byte("layer")), layer)
|
||||
|
||||
return dl.contentsOfNode(node, path, layer)
|
||||
}
|
||||
|
||||
func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, nil, true, true)
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.(DirectorySubspace), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, nil, true, false)
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.(DirectorySubspace), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error) {
|
||||
if prefix == nil {
|
||||
prefix = []byte{}
|
||||
}
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, prefix, true, false)
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.(DirectorySubspace), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return dl.createOrOpen(rtr, nil, path, layer, nil, false, true)
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.(DirectorySubspace), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(rtr, nil); e != nil {
|
||||
return false, e
|
||||
}
|
||||
|
||||
node := dl.find(rtr, path).prefetchMetadata(rtr)
|
||||
if !node.exists() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if node.isInPartition(nil, false) {
|
||||
nc, e := node.getContents(dl, nil)
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
return nc.Exists(rtr, node.getPartitionSubpath())
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
return r.(bool), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) List(rt fdb.ReadTransactor, path []string) ([]string, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(rtr, nil); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
node := dl.find(rtr, path).prefetchMetadata(rtr)
|
||||
if !node.exists() {
|
||||
return nil, errors.New("the directory does not exist")
|
||||
}
|
||||
|
||||
if node.isInPartition(nil, true) {
|
||||
nc, e := node.getContents(dl, nil)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return nc.List(rtr, node.getPartitionSubpath())
|
||||
}
|
||||
|
||||
return dl.subdirNames(rtr, node.subspace)
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.([]string), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
|
||||
return nil, errors.New("the root directory cannot be moved")
|
||||
}
|
||||
|
||||
func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(tr, &tr); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
sliceEnd := len(oldPath)
|
||||
if sliceEnd > len(newPath) {
|
||||
sliceEnd = len(newPath)
|
||||
}
|
||||
if stringsEqual(oldPath, newPath[:sliceEnd]) {
|
||||
return nil, errors.New("the destination directory cannot be a subdirectory of the source directory")
|
||||
}
|
||||
|
||||
oldNode := dl.find(tr, oldPath).prefetchMetadata(tr)
|
||||
newNode := dl.find(tr, newPath).prefetchMetadata(tr)
|
||||
|
||||
if !oldNode.exists() {
|
||||
return nil, errors.New("the source directory does not exist")
|
||||
}
|
||||
|
||||
if oldNode.isInPartition(nil, false) || newNode.isInPartition(nil, false) {
|
||||
if !oldNode.isInPartition(nil, false) || !newNode.isInPartition(nil, false) || !stringsEqual(oldNode.path, newNode.path) {
|
||||
return nil, errors.New("cannot move between partitions")
|
||||
}
|
||||
|
||||
nnc, e := newNode.getContents(dl, nil)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return nnc.Move(tr, oldNode.getPartitionSubpath(), newNode.getPartitionSubpath())
|
||||
}
|
||||
|
||||
if newNode.exists() {
|
||||
return nil, errors.New("the destination directory already exists. Remove it first")
|
||||
}
|
||||
|
||||
parentNode := dl.find(tr, newPath[:len(newPath)-1])
|
||||
if !parentNode.exists() {
|
||||
return nil, errors.New("the parent of the destination directory does not exist. Create it first")
|
||||
}
|
||||
|
||||
p, e := dl.nodeSS.Unpack(oldNode.subspace)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
tr.Set(parentNode.subspace.Sub(_SUBDIRS, newPath[len(newPath)-1]), p[0].([]byte))
|
||||
|
||||
dl.removeFromParent(tr, oldPath)
|
||||
|
||||
return dl.contentsOfNode(oldNode.subspace, newPath, oldNode._layer.MustGet())
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return r.(DirectorySubspace), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) Remove(t fdb.Transactor, path []string) (bool, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(tr, &tr); e != nil {
|
||||
return false, e
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
return false, errors.New("the root directory cannot be removed")
|
||||
}
|
||||
|
||||
node := dl.find(tr, path).prefetchMetadata(tr)
|
||||
|
||||
if !node.exists() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if node.isInPartition(nil, false) {
|
||||
nc, e := node.getContents(dl, nil)
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
return nc.(directoryPartition).Remove(tr, node.getPartitionSubpath())
|
||||
}
|
||||
|
||||
if e := dl.removeRecursive(tr, node.subspace); e != nil {
|
||||
return false, e
|
||||
}
|
||||
dl.removeFromParent(tr, path)
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
return r.(bool), nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) removeRecursive(tr fdb.Transaction, node subspace.Subspace) error {
|
||||
nodes := dl.subdirNodes(tr, node)
|
||||
for i := range nodes {
|
||||
if e := dl.removeRecursive(tr, nodes[i]); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
p, e := dl.nodeSS.Unpack(node)
|
||||
if e != nil { return e }
|
||||
kr, e := fdb.PrefixRange(p[0].([]byte))
|
||||
if e != nil { return e }
|
||||
|
||||
tr.ClearRange(kr)
|
||||
tr.ClearRange(node)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) removeFromParent(tr fdb.Transaction, path []string) {
|
||||
parent := dl.find(tr, path[:len(path)-1])
|
||||
tr.Clear(parent.subspace.Sub(_SUBDIRS, path[len(path)-1]))
|
||||
}
|
||||
|
||||
func (dl directoryLayer) GetLayer() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
func (dl directoryLayer) GetPath() []string {
|
||||
return dl.path
|
||||
}
|
||||
|
||||
func (dl directoryLayer) subdirNames(rtr fdb.ReadTransaction, node subspace.Subspace) ([]string, error) {
|
||||
sd := node.Sub(_SUBDIRS)
|
||||
|
||||
rr := rtr.GetRange(sd, fdb.RangeOptions{})
|
||||
ri := rr.Iterator()
|
||||
|
||||
var ret []string
|
||||
|
||||
for ri.Advance() {
|
||||
kv := ri.MustGet()
|
||||
|
||||
p, e := sd.Unpack(kv.Key)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
ret = append(ret, p[0].(string))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) subdirNodes(tr fdb.Transaction, node subspace.Subspace) []subspace.Subspace {
|
||||
sd := node.Sub(_SUBDIRS)
|
||||
|
||||
rr := tr.GetRange(sd, fdb.RangeOptions{})
|
||||
ri := rr.Iterator()
|
||||
|
||||
var ret []subspace.Subspace
|
||||
|
||||
for ri.Advance() {
|
||||
kv := ri.MustGet()
|
||||
|
||||
ret = append(ret, dl.nodeWithPrefix(kv.Value))
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (dl directoryLayer) nodeContainingKey(rtr fdb.ReadTransaction, key []byte) (subspace.Subspace, error) {
|
||||
if bytes.HasPrefix(key, dl.nodeSS.Bytes()) {
|
||||
return dl.rootNode, nil
|
||||
}
|
||||
|
||||
bk, _ := dl.nodeSS.FDBRangeKeys()
|
||||
kr := fdb.KeyRange{bk, fdb.Key(append(dl.nodeSS.Pack(tuple.Tuple{key}), 0x00))}
|
||||
|
||||
kvs := rtr.GetRange(kr, fdb.RangeOptions{Reverse:true, Limit:1}).GetSliceOrPanic()
|
||||
if len(kvs) == 1 {
|
||||
pp, e := dl.nodeSS.Unpack(kvs[0].Key)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
prevPrefix := pp[0].([]byte)
|
||||
if bytes.HasPrefix(key, prevPrefix) {
|
||||
return dl.nodeWithPrefix(prevPrefix), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) isPrefixFree(rtr fdb.ReadTransaction, prefix []byte) (bool, error) {
|
||||
if len(prefix) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nck, e := dl.nodeContainingKey(rtr, prefix)
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
if nck != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
kr, e := fdb.PrefixRange(prefix)
|
||||
if e != nil {
|
||||
return false, e
|
||||
}
|
||||
|
||||
bk, ek := kr.FDBRangeKeys()
|
||||
if !isRangeEmpty(rtr, fdb.KeyRange{dl.nodeSS.Pack(tuple.Tuple{bk}), dl.nodeSS.Pack(tuple.Tuple{ek})}) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) checkVersion(rtr fdb.ReadTransaction, tr *fdb.Transaction) error {
|
||||
version := rtr.Get(dl.rootNode.Sub([]byte("version"))).MustGet()
|
||||
|
||||
if version == nil {
|
||||
if tr != nil {
|
||||
dl.initializeDirectory(*tr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var versions []int32
|
||||
buf := bytes.NewBuffer(version)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
var v int32
|
||||
err := binary.Read(buf, binary.LittleEndian, &v)
|
||||
if err != nil {
|
||||
return errors.New("cannot determine directory version present in database")
|
||||
}
|
||||
versions = append(versions, v)
|
||||
}
|
||||
|
||||
if versions[0] > _MAJORVERSION {
|
||||
return fmt.Errorf("cannot load directory with version %d.%d.%d using directory layer %d.%d.%d", versions[0], versions[1], versions[2], _MAJORVERSION, _MINORVERSION, _MICROVERSION)
|
||||
}
|
||||
|
||||
if versions[1] > _MINORVERSION && tr != nil /* aka write access allowed */ {
|
||||
return fmt.Errorf("directory with version %d.%d.%d is read-only when opened using directory layer %d.%d.%d", versions[0], versions[1], versions[2], _MAJORVERSION, _MINORVERSION, _MICROVERSION)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl directoryLayer) initializeDirectory(tr fdb.Transaction) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// bytes.Buffer claims that Write will always return a nil error, which
|
||||
// means the error return here can only be an encoding issue. So long as we
|
||||
// don't set our own versions to something completely invalid, we should be
|
||||
// OK to ignore error returns.
|
||||
binary.Write(buf, binary.LittleEndian, _MAJORVERSION)
|
||||
binary.Write(buf, binary.LittleEndian, _MINORVERSION)
|
||||
binary.Write(buf, binary.LittleEndian, _MICROVERSION)
|
||||
|
||||
tr.Set(dl.rootNode.Sub([]byte("version")), buf.Bytes())
|
||||
}
|
||||
|
||||
func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
p, e := dl.nodeSS.Unpack(node)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
prefix := p[0]
|
||||
|
||||
newPath := make([]string, len(dl.path) + len(path))
|
||||
copy(newPath, dl.path)
|
||||
copy(newPath[len(dl.path):], path)
|
||||
|
||||
pb := prefix.([]byte)
|
||||
ss := subspace.FromBytes(pb)
|
||||
|
||||
if bytes.Compare(layer, []byte("partition")) == 0 {
|
||||
nssb := make([]byte, len(pb) + 1)
|
||||
copy(nssb, pb)
|
||||
nssb[len(pb)] = 0xFE
|
||||
ndl := NewDirectoryLayer(subspace.FromBytes(nssb), ss, false).(directoryLayer)
|
||||
ndl.path = newPath
|
||||
return directoryPartition{ndl, dl}, nil
|
||||
} else {
|
||||
return directorySubspace{ss, dl, newPath, layer}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (dl directoryLayer) nodeWithPrefix(prefix []byte) subspace.Subspace {
|
||||
if prefix == nil { return nil }
|
||||
return dl.nodeSS.Sub(prefix)
|
||||
}
|
||||
|
||||
func (dl directoryLayer) find(rtr fdb.ReadTransaction, path []string) *node {
|
||||
n := &node{dl.rootNode, []string{}, path, nil}
|
||||
for i := range path {
|
||||
n = &node{dl.nodeWithPrefix(rtr.Get(n.subspace.Sub(_SUBDIRS, path[i])).MustGet()), path[:i+1], path, nil}
|
||||
if !n.exists() || bytes.Compare(n.layer(rtr).MustGet(), []byte("partition")) == 0 {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (dl directoryLayer) partitionSubpath(lpath, rpath []string) []string {
|
||||
r := make([]string, len(lpath) - len(dl.path) + len(rpath))
|
||||
copy(r, lpath[len(dl.path):])
|
||||
copy(r[len(lpath) - len(dl.path):], rpath)
|
||||
return r
|
||||
}
|
||||
|
||||
func isRangeEmpty(rtr fdb.ReadTransaction, r fdb.Range) bool {
|
||||
kvs := rtr.GetRange(r, fdb.RangeOptions{Limit: 1}).GetSliceOrPanic()
|
||||
|
||||
return len(kvs) == 0
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* directoryPartition.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"fdb/tuple"
|
||||
)
|
||||
|
||||
type directoryPartition struct {
|
||||
directoryLayer
|
||||
parentDirectoryLayer directoryLayer
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Sub(el ...tuple.TupleElement) subspace.Subspace {
|
||||
panic("cannot open subspace in the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Bytes() []byte {
|
||||
panic("cannot get key for the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Pack(t tuple.Tuple) fdb.Key {
|
||||
panic("cannot pack keys using the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
|
||||
panic("cannot unpack keys using the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Contains(k fdb.KeyConvertible) bool {
|
||||
panic("cannot check whether a key belongs to the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) FDBKey() fdb.Key {
|
||||
panic("cannot use the root of a directory partition as a key")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
|
||||
panic("cannot get range for the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
|
||||
panic("cannot get range for the root of a directory partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) GetLayer() []byte {
|
||||
return []byte("partition")
|
||||
}
|
||||
|
||||
func (dp directoryPartition) getLayerForPath(path []string) directoryLayer {
|
||||
if len(path) == 0 {
|
||||
return dp.parentDirectoryLayer
|
||||
} else {
|
||||
return dp.directoryLayer
|
||||
}
|
||||
}
|
||||
|
||||
func (dp directoryPartition) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
|
||||
return moveTo(t, dp.parentDirectoryLayer, dp.path, newAbsolutePath)
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Remove(t fdb.Transactor, path []string) (bool, error) {
|
||||
dl := dp.getLayerForPath(path)
|
||||
return dl.Remove(t, dl.partitionSubpath(dp.path, path))
|
||||
}
|
||||
|
||||
func (dp directoryPartition) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
|
||||
dl := dp.getLayerForPath(path)
|
||||
return dl.Exists(rt, dl.partitionSubpath(dp.path, path))
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* directorySubspace.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
)
|
||||
|
||||
// DirectorySubspace represents a Directory that may also be used as a Subspace
|
||||
// to store key/value pairs. Subdirectories of a root directory (as returned by
|
||||
// Root or NewDirectoryLayer) are DirectorySubspaces, and provide all methods of
|
||||
// the Directory and subspace.Subspace interfaces.
|
||||
type DirectorySubspace interface {
|
||||
subspace.Subspace
|
||||
Directory
|
||||
}
|
||||
|
||||
type directorySubspace struct {
|
||||
subspace.Subspace
|
||||
dl directoryLayer
|
||||
path []string
|
||||
layer []byte
|
||||
}
|
||||
|
||||
func (d directorySubspace) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return d.dl.CreateOrOpen(t, d.dl.partitionSubpath(d.path, path), layer)
|
||||
}
|
||||
|
||||
func (d directorySubspace) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return d.dl.Create(t, d.dl.partitionSubpath(d.path, path), layer)
|
||||
}
|
||||
|
||||
func (d directorySubspace) CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error) {
|
||||
return d.dl.CreatePrefix(t, d.dl.partitionSubpath(d.path, path), layer, prefix)
|
||||
}
|
||||
|
||||
func (d directorySubspace) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
return d.dl.Open(rt, d.dl.partitionSubpath(d.path, path), layer)
|
||||
}
|
||||
|
||||
func (d directorySubspace) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
|
||||
return moveTo(t, d.dl, d.path, newAbsolutePath)
|
||||
}
|
||||
|
||||
func (d directorySubspace) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
|
||||
return d.dl.Move(t, d.dl.partitionSubpath(d.path, oldPath), d.dl.partitionSubpath(d.path, newPath))
|
||||
}
|
||||
|
||||
func (d directorySubspace) Remove(t fdb.Transactor, path []string) (bool, error) {
|
||||
return d.dl.Remove(t, d.dl.partitionSubpath(d.path, path))
|
||||
}
|
||||
|
||||
func (d directorySubspace) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
|
||||
return d.dl.Exists(rt, d.dl.partitionSubpath(d.path, path))
|
||||
}
|
||||
|
||||
func (d directorySubspace) List(rt fdb.ReadTransactor, path []string) (subdirs []string, e error) {
|
||||
return d.dl.List(rt, d.dl.partitionSubpath(d.path, path))
|
||||
}
|
||||
|
||||
func (d directorySubspace) GetLayer() []byte {
|
||||
return d.layer
|
||||
}
|
||||
|
||||
func (d directorySubspace) GetPath() []string {
|
||||
return d.path
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* node.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Directory Layer
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/subspace"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type node struct {
|
||||
subspace subspace.Subspace
|
||||
path []string
|
||||
targetPath []string
|
||||
_layer fdb.FutureByteSlice
|
||||
}
|
||||
|
||||
func (n *node) exists() bool {
|
||||
if n.subspace == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *node) prefetchMetadata(rtr fdb.ReadTransaction) *node {
|
||||
if n.exists() {
|
||||
n.layer(rtr)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *node) layer(rtr fdb.ReadTransaction) fdb.FutureByteSlice {
|
||||
if n._layer == nil {
|
||||
fv := rtr.Get(n.subspace.Sub([]byte("layer")))
|
||||
n._layer = fv
|
||||
}
|
||||
|
||||
return n._layer
|
||||
}
|
||||
|
||||
func (n *node) isInPartition(tr *fdb.Transaction, includeEmptySubpath bool) bool {
|
||||
return n.exists() && bytes.Compare(n._layer.MustGet(), []byte("partition")) == 0 && (includeEmptySubpath || len(n.targetPath) > len(n.path))
|
||||
}
|
||||
|
||||
func (n *node) getPartitionSubpath() []string {
|
||||
return n.targetPath[len(n.path):]
|
||||
}
|
||||
|
||||
func (n *node) getContents(dl directoryLayer, tr *fdb.Transaction) (DirectorySubspace, error) {
|
||||
return dl.contentsOfNode(n.subspace, n.path, n._layer.MustGet())
|
||||
}
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* doc.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
/*
|
||||
Package fdb provides an interface to FoundationDB databases (version 2.0 or higher).
|
||||
|
||||
To build and run programs using this package, you must have an installed copy of
|
||||
the FoundationDB client libraries (version 2.0.0 or later), available for Linux,
|
||||
Windows and OS X at https://files.foundationdb.org/fdb-c/.
|
||||
|
||||
This documentation specifically applies to the FoundationDB Go binding. For more
|
||||
extensive guidance to programming with FoundationDB, as well as API
|
||||
documentation for the other FoundationDB interfaces, please see
|
||||
https://foundationdb.org/documentation/index.html.
|
||||
|
||||
Basic Usage
|
||||
|
||||
A basic interaction with the FoundationDB API is demonstrated below:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/apple/foundationdb/bindings/go/fdb"
|
||||
"log"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Different API versions may expose different runtime behaviors.
|
||||
fdb.MustAPIVersion(200)
|
||||
|
||||
// Open the default database from the system cluster
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
// Database reads and writes happen inside transactions
|
||||
ret, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(fdb.Key("hello"), []byte("world"))
|
||||
return tr.Get(fdb.Key("foo")).MustGet(), nil
|
||||
// db.Transact automatically commits (and if necessary,
|
||||
// retries) the transaction
|
||||
})
|
||||
if e != nil {
|
||||
log.Fatalf("Unable to perform FDB transaction (%v)", e)
|
||||
}
|
||||
|
||||
fmt.Printf("hello is now world, foo was: %s\n", string(ret.([]byte)))
|
||||
}
|
||||
|
||||
Futures
|
||||
|
||||
Many functions in this package are asynchronous and return Future objects. A
|
||||
Future represents a value (or error) to be available at some later
|
||||
time. Functions documented as blocking on a Future will block the calling
|
||||
goroutine until the Future is ready (although if the Future is already ready,
|
||||
the call will not block at all). While a goroutine is blocked on a Future, other
|
||||
goroutines are free to execute and interact with the FoundationDB API.
|
||||
|
||||
It is possible (and often recommended) to call several asynchronous operations
|
||||
and have multiple Future objects outstanding inside a single goroutine. All
|
||||
operations will execute in parallel, and the calling goroutine will not block
|
||||
until a blocking method on any one of the Futures is called.
|
||||
|
||||
On Panics
|
||||
|
||||
Idiomatic Go code strongly frowns at panics that escape library/package
|
||||
boundaries, in favor of explicitly returned errors. Idiomatic FoundationDB
|
||||
client programs, however, are built around the idea of retryable
|
||||
programmer-provided transactional functions. Retryable transactions can be
|
||||
implemented using only error values:
|
||||
|
||||
ret, e := db.Transact(func (tr Transaction) (interface{}, error) {
|
||||
// FoundationDB futures represent a value that will become available
|
||||
futureValueOne := tr.Get(fdb.Key("foo"))
|
||||
futureValueTwo := tr.Get(fdb.Key("bar"))
|
||||
|
||||
// Both reads are being carried out in parallel
|
||||
|
||||
// Get the first value (or any error)
|
||||
valueOne, e := futureValueOne.Get()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
// Get the second value (or any error)
|
||||
valueTwo, e := futureValueTwo.Get()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
// Return the two values
|
||||
return []string{valueOne, valueTwo}, nil
|
||||
})
|
||||
|
||||
If either read encounters an error, it will be returned to Transact, which will
|
||||
determine if the error is retryable or not (using (Transaction).OnError). If the
|
||||
error is an FDB Error and retryable (such as a conflict with with another
|
||||
transaction), then the programmer-provided function will be run again. If the
|
||||
error is fatal (or not an FDB Error), then the error will be returned to the
|
||||
caller of Transact.
|
||||
|
||||
In practice, checking for an error from every asynchronous future type in the
|
||||
FoundationDB API quickly becomes frustrating. As a convenience, every Future
|
||||
type also has a MustGet method, which returns the same type and value as Get,
|
||||
but exposes FoundationDB Errors via a panic rather than an explicitly returned
|
||||
error. The above example may be rewritten as:
|
||||
|
||||
ret, e := db.Transact(func (tr Transaction) (interface{}, error) {
|
||||
// FoundationDB futures represent a value that will become available
|
||||
futureValueOne := tr.Get(fdb.Key("foo"))
|
||||
futureValueTwo := tr.Get(fdb.Key("bar"))
|
||||
|
||||
// Both reads are being carried out in parallel
|
||||
|
||||
// Get the first value
|
||||
valueOne := futureValueOne.MustGet()
|
||||
// Get the second value
|
||||
valueTwo := futureValueTwo.MustGet()
|
||||
|
||||
// Return the two values
|
||||
return []string{valueOne, valueTwo}, nil
|
||||
})
|
||||
|
||||
Any panic that occurs during execution of the caller-provided function will be
|
||||
recovered by the (Database).Transact method. If the error is an FDB Error, it
|
||||
will either result in a retry of the function or be returned by Transact. If the
|
||||
error is any other type (panics from code other than MustGet), Transact will
|
||||
re-panic the original value.
|
||||
|
||||
Note that (Transaction).Transact also recovers panics, but does not itself
|
||||
retry. If the recovered value is an FDB Error, it will be returned to the caller
|
||||
of (Transaction).Transact; all other values will be re-panicked.
|
||||
|
||||
Transactions and Goroutines
|
||||
|
||||
When using a Transactor in the fdb package, particular care must be taken if
|
||||
goroutines are created inside of the function passed to the Transact method. Any
|
||||
panic from the goroutine will not be recovered by Transact, and (unless
|
||||
otherwise recovered) will result in the termination of that goroutine.
|
||||
|
||||
Furthermore, any errors returned or panicked by fdb methods called in the
|
||||
goroutine must be safely returned to the function passed to Transact, and either
|
||||
returned or panicked, to allow Transact to appropriately retry or terminate the
|
||||
transactional function.
|
||||
|
||||
Lastly, a transactional function may be retried indefinitely. It is advisable to
|
||||
make sure any goroutines created during the transactional function have
|
||||
completed before returning from the transactional function, or a potentially
|
||||
unbounded number of goroutines may be created.
|
||||
|
||||
Given these complexities, it is generally best practice to use a single
|
||||
goroutine for each logical thread of interaction with FoundationDB, and allow
|
||||
each goroutine to block when necessary to wait for Futures to become ready.
|
||||
|
||||
Streaming Modes
|
||||
|
||||
When using GetRange methods in the FoundationDB API, clients can request large
|
||||
ranges of the database to iterate over. Making such a request doesn't
|
||||
necessarily mean that the client will consume all of the data in the range --
|
||||
sometimes the client doesn't know how far it intends to iterate in
|
||||
advance. FoundationDB tries to balance latency and bandwidth by requesting data
|
||||
for iteration in batches.
|
||||
|
||||
The Mode field of the RangeOptions struct allows a client to customize this
|
||||
performance tradeoff by providing extra information about how the iterator will
|
||||
be used.
|
||||
|
||||
The default value of Mode is StreamingModeIterator, which tries to provide a
|
||||
reasonable default balance. Other streaming modes that prioritize throughput or
|
||||
latency are available -- see the documented StreamingMode values for specific
|
||||
options.
|
||||
|
||||
Atomic Operations
|
||||
|
||||
The FDB package provides a number of atomic operations on the Database and
|
||||
Transaction objects. An atomic operation is a single database command that
|
||||
carries out several logical steps: reading the value of a key, performing a
|
||||
transformation on that value, and writing the result. Different atomic
|
||||
operations perform different transformations. Like other database operations, an
|
||||
atomic operation is used within a transaction.
|
||||
|
||||
For more information on atomic operations in FoundationDB, please see
|
||||
https://foundationdb.org/documentation/developer-guide.html#atomic-operations. The
|
||||
operands to atomic operations in this API must be provided as appropriately
|
||||
encoded byte slices. To convert a Go type to a byte slice, see the binary
|
||||
package.
|
||||
|
||||
The current atomic operations in this API are Add, BitAnd, BitOr, BitXor, Max, Min,
|
||||
SetVersionstampedKey, SetVersionstampedValue (all methods on Transaction).
|
||||
*/
|
||||
package fdb
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* errors.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 200
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error represents a low-level error returned by the FoundationDB C library. An
|
||||
// Error may be returned by any FoundationDB API function that returns error, or
|
||||
// as a panic from any FoundationDB API function whose name ends with OrPanic.
|
||||
//
|
||||
// You may compare the Code field of an Error against the list of FoundationDB
|
||||
// error codes at https://foundationdb.org/documentation/api-error-codes.html,
|
||||
// but generally an Error should be passed to (Transaction).OnError. When using
|
||||
// (Database).Transact, non-fatal errors will be retried automatically.
|
||||
type Error struct {
|
||||
Code int
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("FoundationDB error code %d (%s)", e.Code, C.GoString(C.fdb_get_error(C.fdb_error_t(e.Code))))
|
||||
}
|
||||
|
||||
// SOMEDAY: these (along with others) should be coming from fdb.options?
|
||||
|
||||
var (
|
||||
errNetworkNotSetup = Error{2008}
|
||||
|
||||
errAPIVersionUnset = Error{2200}
|
||||
errAPIVersionAlreadySet = Error{2201}
|
||||
errAPIVersionNotSupported = Error{2203}
|
||||
)
|
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
* fdb.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
/* Would put this in futures.go but for the documented issue with
|
||||
/* exports and functions in preamble
|
||||
/* (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions) */
|
||||
//export unlockMutex
|
||||
func unlockMutex(p unsafe.Pointer) {
|
||||
m := (*sync.Mutex)(p)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// A Transactor can execute a function that requires a Transaction. Functions
|
||||
// written to accept a Transactor are called transactional functions, and may be
|
||||
// called with either a Database or a Transaction.
|
||||
type Transactor interface {
|
||||
// Transact executes the caller-provided function, providing it with a
|
||||
// Transaction (itself a Transactor, allowing composition of transactional
|
||||
// functions).
|
||||
Transact(func (Transaction) (interface{}, error)) (interface{}, error)
|
||||
|
||||
// All Transactors are also ReadTransactors, allowing them to be used with
|
||||
// read-only transactional functions.
|
||||
ReadTransactor
|
||||
}
|
||||
|
||||
// A ReadTransactor can execute a function that requires a
|
||||
// ReadTransaction. Functions written to accept a ReadTransactor are called
|
||||
// read-only transactional functions, and may be called with a Database,
|
||||
// Transaction or Snapshot.
|
||||
type ReadTransactor interface {
|
||||
// ReadTransact executes the caller-provided function, providing it with a
|
||||
// ReadTransaction (itself a ReadTransactor, allowing composition of
|
||||
// read-only transactional functions).
|
||||
ReadTransact(func (ReadTransaction) (interface{}, error)) (interface{}, error)
|
||||
}
|
||||
|
||||
func setOpt(setter func(*C.uint8_t, C.int) C.fdb_error_t, param []byte) error {
|
||||
if err := setter(byteSliceToPtr(param), C.int(len(param))); err != 0 {
|
||||
return Error{int(err)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetworkOptions is a handle with which to set options that affect the entire
|
||||
// FoundationDB client. A NetworkOptions instance should be obtained with the
|
||||
// fdb.Options function.
|
||||
type NetworkOptions struct {
|
||||
}
|
||||
|
||||
// Options returns a NetworkOptions instance suitable for setting options that
|
||||
// affect the entire FoundationDB client.
|
||||
func Options() NetworkOptions {
|
||||
return NetworkOptions{}
|
||||
}
|
||||
|
||||
func (opt NetworkOptions) setOpt(code int, param []byte) error {
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
||||
if apiVersion == 0 {
|
||||
return errAPIVersionUnset
|
||||
}
|
||||
|
||||
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
|
||||
return C.fdb_network_set_option(C.FDBNetworkOption(code), p, pl)
|
||||
}, param)
|
||||
}
|
||||
|
||||
// APIVersion determines the runtime behavior the fdb package. If the requested
|
||||
// version is not supported by both the fdb package and the FoundationDB C
|
||||
// library, an error will be returned. APIVersion must be called prior to any
|
||||
// other functions in the fdb package.
|
||||
//
|
||||
// Currently, this package supports API versions 200 through 500.
|
||||
//
|
||||
// Warning: When using the multi-version client API, setting an API version that
|
||||
// is not supported by a particular client library will prevent that client from
|
||||
// being used to connect to the cluster. In particular, you should not advance
|
||||
// the API version of your application after upgrading your client until the
|
||||
// cluster has also been upgraded.
|
||||
func APIVersion(version int) error {
|
||||
headerVersion := 500
|
||||
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
||||
if apiVersion != 0 {
|
||||
if apiVersion == version {
|
||||
return nil
|
||||
}
|
||||
return errAPIVersionAlreadySet
|
||||
}
|
||||
|
||||
if version < 200 || version > 500 {
|
||||
return errAPIVersionNotSupported
|
||||
}
|
||||
|
||||
if e := C.fdb_select_api_version_impl(C.int(version), C.int(headerVersion)); e != 0 {
|
||||
if e != 0 {
|
||||
if e == 2203 {
|
||||
maxSupportedVersion := C.fdb_get_max_api_version()
|
||||
if headerVersion > int(maxSupportedVersion) {
|
||||
return fmt.Errorf("This version of the FoundationDB Go binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d.", version, maxSupportedVersion)
|
||||
} else {
|
||||
return fmt.Errorf("API version %d is not supported by the installed FoundationDB C library.", version)
|
||||
}
|
||||
}
|
||||
return Error{int(e)}
|
||||
}
|
||||
}
|
||||
|
||||
apiVersion = version
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustAPIVersion is like APIVersion but panics if the API version is not
|
||||
// supported.
|
||||
func MustAPIVersion(version int) {
|
||||
err := APIVersion(version)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var apiVersion int
|
||||
var networkStarted bool
|
||||
var networkMutex sync.Mutex
|
||||
|
||||
var openClusters map[string]Cluster
|
||||
var openDatabases map[string]Database
|
||||
|
||||
func init() {
|
||||
openClusters = make(map[string]Cluster)
|
||||
openDatabases = make(map[string]Database)
|
||||
}
|
||||
|
||||
func startNetwork() error {
|
||||
if e := C.fdb_setup_network(); e != 0 {
|
||||
return Error{int(e)}
|
||||
}
|
||||
|
||||
go func() {
|
||||
e := C.fdb_run_network()
|
||||
if e != 0 {
|
||||
log.Printf("Unhandled error in FoundationDB network thread: %v (%v)\n", C.GoString(C.fdb_get_error(e)), e)
|
||||
}
|
||||
}()
|
||||
|
||||
networkStarted = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartNetwork initializes the FoundationDB client networking engine. It is not
|
||||
// necessary to call StartNetwork when using the fdb.Open or fdb.OpenDefault
|
||||
// functions to obtain a database handle. StartNetwork must not be called more
|
||||
// than once.
|
||||
func StartNetwork() error {
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
||||
if apiVersion == 0 {
|
||||
return errAPIVersionUnset
|
||||
}
|
||||
|
||||
return startNetwork()
|
||||
}
|
||||
|
||||
// DefaultClusterFile should be passed to fdb.Open or fdb.CreateCluster to allow
|
||||
// the FoundationDB C library to select the platform-appropriate default cluster
|
||||
// file on the current machine.
|
||||
const DefaultClusterFile string = ""
|
||||
|
||||
// OpenDefault returns a database handle to the default database from the
|
||||
// FoundationDB cluster identified by the DefaultClusterFile on the current
|
||||
// machine. The FoundationDB client networking engine will be initialized first,
|
||||
// if necessary.
|
||||
func OpenDefault() (Database, error) {
|
||||
return Open(DefaultClusterFile, []byte("DB"))
|
||||
}
|
||||
|
||||
// MustOpenDefault is like OpenDefault but panics if the default database cannot
|
||||
// be opened.
|
||||
func MustOpenDefault() Database {
|
||||
db, err := OpenDefault()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// Open returns a database handle to the named database from the FoundationDB
|
||||
// cluster identified by the provided cluster file and database name. The
|
||||
// FoundationDB client networking engine will be initialized first, if
|
||||
// necessary.
|
||||
//
|
||||
// In the current release, the database name must be []byte("DB").
|
||||
func Open(clusterFile string, dbName []byte) (Database, error) {
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
||||
if apiVersion == 0 {
|
||||
return Database{}, errAPIVersionUnset
|
||||
}
|
||||
|
||||
var e error
|
||||
|
||||
if !networkStarted {
|
||||
e = startNetwork()
|
||||
if e != nil {
|
||||
return Database{}, e
|
||||
}
|
||||
}
|
||||
|
||||
cluster, ok := openClusters[clusterFile]
|
||||
if !ok {
|
||||
cluster, e = createCluster(clusterFile)
|
||||
if e != nil {
|
||||
return Database{}, e
|
||||
}
|
||||
openClusters[clusterFile] = cluster
|
||||
}
|
||||
|
||||
db, ok := openDatabases[string(dbName)]
|
||||
if !ok {
|
||||
db, e = cluster.OpenDatabase(dbName)
|
||||
if e != nil {
|
||||
return Database{}, e
|
||||
}
|
||||
openDatabases[string(dbName)] = db
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// MustOpen is like Open but panics if the database cannot be opened.
|
||||
func MustOpen(clusterFile string, dbName []byte) Database {
|
||||
db, err := Open(clusterFile, dbName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func createCluster(clusterFile string) (Cluster, error) {
|
||||
var cf *C.char
|
||||
|
||||
if len(clusterFile) != 0 {
|
||||
cf = C.CString(clusterFile)
|
||||
defer C.free(unsafe.Pointer(cf))
|
||||
}
|
||||
|
||||
f := C.fdb_create_cluster(cf)
|
||||
fdb_future_block_until_ready(f)
|
||||
|
||||
var outc *C.FDBCluster
|
||||
|
||||
if err := C.fdb_future_get_cluster(f, &outc); err != 0 {
|
||||
return Cluster{}, Error{int(err)}
|
||||
}
|
||||
|
||||
C.fdb_future_destroy(f)
|
||||
|
||||
c := &cluster{outc}
|
||||
runtime.SetFinalizer(c, (*cluster).destroy)
|
||||
|
||||
return Cluster{c}, nil
|
||||
}
|
||||
|
||||
// CreateCluster returns a cluster handle to the FoundationDB cluster identified
|
||||
// by the provided cluster file.
|
||||
func CreateCluster(clusterFile string) (Cluster, error) {
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
||||
if apiVersion == 0 {
|
||||
return Cluster{}, errAPIVersionUnset
|
||||
}
|
||||
|
||||
if !networkStarted {
|
||||
return Cluster{}, errNetworkNotSetup
|
||||
}
|
||||
|
||||
return createCluster(clusterFile)
|
||||
}
|
||||
|
||||
func byteSliceToPtr(b []byte) *C.uint8_t {
|
||||
if len(b) > 0 {
|
||||
return (*C.uint8_t)(unsafe.Pointer(&b[0]))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// A KeyConvertible can be converted to a FoundationDB Key. All functions in the
|
||||
// FoundationDB API that address a specific key accept a KeyConvertible.
|
||||
type KeyConvertible interface {
|
||||
FDBKey() Key
|
||||
}
|
||||
|
||||
// Key represents a FoundationDB key, a lexicographically-ordered sequence of
|
||||
// bytes. Key implements the KeyConvertible interface.
|
||||
type Key []byte
|
||||
|
||||
// FDBKey allows Key to (trivially) satisfy the KeyConvertible interface.
|
||||
func (k Key) FDBKey() Key {
|
||||
return k
|
||||
}
|
||||
|
||||
func panicToError(e *error) {
|
||||
if r := recover(); r != nil {
|
||||
fe, ok := r.(Error)
|
||||
if ok {
|
||||
*e = fe
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,263 @@
|
|||
/*
|
||||
* fdb_test.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb_test
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ExampleOpenDefault() {
|
||||
var e error
|
||||
|
||||
e = fdb.APIVersion(400)
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to set API version: %v\n", e)
|
||||
return
|
||||
}
|
||||
|
||||
// OpenDefault opens the database described by the platform-specific default
|
||||
// cluster file and the database name []byte("DB").
|
||||
db, e := fdb.OpenDefault()
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to open default database: %v\n", e)
|
||||
return
|
||||
}
|
||||
|
||||
_ = db
|
||||
}
|
||||
|
||||
func ExampleVersionstamp(t *testing.T) {
|
||||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setVs := func(t fdb.Transactor, key fdb.Key ) (fdb.FutureKey, error) {
|
||||
fmt.Printf("setOne called with: %T\n", t)
|
||||
ret, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.SetVersionstampedValue(key, []byte("blahblahbl"))
|
||||
return tr.GetVersionstamp(), nil
|
||||
})
|
||||
return ret.(fdb.FutureKey), e
|
||||
}
|
||||
|
||||
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
|
||||
fmt.Printf("getOne called with: %T\n", rt)
|
||||
ret, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.Get(key).MustGet(), nil
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return ret.([]byte), nil
|
||||
}
|
||||
|
||||
var v []byte
|
||||
var fvs fdb.FutureKey
|
||||
var k fdb.Key
|
||||
|
||||
fvs, _ = setVs(db, fdb.Key("foo"))
|
||||
v, _ = getOne(db, fdb.Key("foo"))
|
||||
t.Log(v)
|
||||
k, _ = fvs.Get()
|
||||
t.Log(k)
|
||||
}
|
||||
|
||||
func ExampleTransactor() {
|
||||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
|
||||
fmt.Printf("setOne called with: %T\n", t)
|
||||
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
// We don't actually call tr.Set here to avoid mutating a real database.
|
||||
// tr.Set(key, value)
|
||||
return nil, nil
|
||||
})
|
||||
return e
|
||||
}
|
||||
|
||||
setMany := func(t fdb.Transactor, value []byte, keys ...fdb.Key) error {
|
||||
fmt.Printf("setMany called with: %T\n", t)
|
||||
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
for _, key := range(keys) {
|
||||
setOne(tr, key, value)
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
return e
|
||||
}
|
||||
|
||||
var e error
|
||||
|
||||
fmt.Println("Calling setOne with a database:")
|
||||
e = setOne(db, []byte("foo"), []byte("bar"))
|
||||
if e != nil {
|
||||
fmt.Println(e)
|
||||
return
|
||||
}
|
||||
fmt.Println("\nCalling setMany with a database:")
|
||||
e = setMany(db, []byte("bar"), fdb.Key("foo1"), fdb.Key("foo2"), fdb.Key("foo3"))
|
||||
if e != nil {
|
||||
fmt.Println(e)
|
||||
return
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Calling setOne with a database:
|
||||
// setOne called with: fdb.Database
|
||||
//
|
||||
// Calling setMany with a database:
|
||||
// setMany called with: fdb.Database
|
||||
// setOne called with: fdb.Transaction
|
||||
// setOne called with: fdb.Transaction
|
||||
// setOne called with: fdb.Transaction
|
||||
}
|
||||
|
||||
func ExampleReadTransactor() {
|
||||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
|
||||
fmt.Printf("getOne called with: %T\n", rt)
|
||||
ret, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.Get(key).MustGet(), nil
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return ret.([]byte), nil
|
||||
}
|
||||
|
||||
getTwo := func(rt fdb.ReadTransactor, key1, key2 fdb.Key) ([][]byte, error) {
|
||||
fmt.Printf("getTwo called with: %T\n", rt)
|
||||
ret, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
r1, _ := getOne(rtr, key1)
|
||||
r2, _ := getOne(rtr.Snapshot(), key2)
|
||||
return [][]byte{r1, r2}, nil
|
||||
})
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return ret.([][]byte), nil
|
||||
}
|
||||
|
||||
var e error
|
||||
|
||||
fmt.Println("Calling getOne with a database:")
|
||||
_, e = getOne(db, fdb.Key("foo"))
|
||||
if e != nil {
|
||||
fmt.Println(e)
|
||||
return
|
||||
}
|
||||
fmt.Println("\nCalling getTwo with a database:")
|
||||
_, e = getTwo(db, fdb.Key("foo"), fdb.Key("bar"))
|
||||
if e != nil {
|
||||
fmt.Println(e)
|
||||
return
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Calling getOne with a database:
|
||||
// getOne called with: fdb.Database
|
||||
//
|
||||
// Calling getTwo with a database:
|
||||
// getTwo called with: fdb.Database
|
||||
// getOne called with: fdb.Transaction
|
||||
// getOne called with: fdb.Snapshot
|
||||
}
|
||||
|
||||
func ExamplePrefixRange() {
|
||||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to create transaction: %v\n", e)
|
||||
return
|
||||
}
|
||||
|
||||
// Clear and initialize data in this transaction. In examples we do not
|
||||
// commit transactions to avoid mutating a real database.
|
||||
tr.ClearRange(fdb.KeyRange{fdb.Key(""), fdb.Key{0xFF}})
|
||||
tr.Set(fdb.Key("alpha"), []byte("1"))
|
||||
tr.Set(fdb.Key("alphabetA"), []byte("2"))
|
||||
tr.Set(fdb.Key("alphabetB"), []byte("3"))
|
||||
tr.Set(fdb.Key("alphabetize"), []byte("4"))
|
||||
tr.Set(fdb.Key("beta"), []byte("5"))
|
||||
|
||||
// Construct the range of all keys beginning with "alphabet". It is safe to
|
||||
// ignore the error return from PrefixRange unless the provided prefix might
|
||||
// consist entirely of zero or more 0xFF bytes.
|
||||
pr, _ := fdb.PrefixRange([]byte("alphabet"))
|
||||
|
||||
// Read and process the range
|
||||
kvs, e := tr.GetRange(pr, fdb.RangeOptions{}).GetSliceWithError()
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to read range: %v\n", e)
|
||||
}
|
||||
for _, kv := range kvs {
|
||||
fmt.Printf("%s: %s\n", string(kv.Key), string(kv.Value))
|
||||
}
|
||||
|
||||
// Output:
|
||||
// alphabetA: 2
|
||||
// alphabetB: 3
|
||||
// alphabetize: 4
|
||||
}
|
||||
|
||||
func ExampleRangeIterator() {
|
||||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to create transaction: %v\n", e)
|
||||
return
|
||||
}
|
||||
|
||||
// Clear and initialize data in this transaction. In examples we do not
|
||||
// commit transactions to avoid mutating a real database.
|
||||
tr.ClearRange(fdb.KeyRange{fdb.Key(""), fdb.Key{0xFF}})
|
||||
tr.Set(fdb.Key("apple"), []byte("foo"))
|
||||
tr.Set(fdb.Key("cherry"), []byte("baz"))
|
||||
tr.Set(fdb.Key("banana"), []byte("bar"))
|
||||
|
||||
rr := tr.GetRange(fdb.KeyRange{fdb.Key(""), fdb.Key{0xFF}}, fdb.RangeOptions{})
|
||||
ri := rr.Iterator()
|
||||
|
||||
// Advance will return true until the iterator is exhausted
|
||||
for ri.Advance() {
|
||||
kv, e := ri.Get()
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to read next value: %v\n", e)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s is %s\n", kv.Key, kv.Value)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// apple is foo
|
||||
// banana is bar
|
||||
// cherry is baz
|
||||
}
|
|
@ -0,0 +1,376 @@
|
|||
/*
|
||||
* futures.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lfdb_c -lm
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void unlockMutex(void*);
|
||||
|
||||
void go_callback(FDBFuture* f, void* m) {
|
||||
unlockMutex(m);
|
||||
}
|
||||
|
||||
void go_set_callback(void* f, void* m) {
|
||||
fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
"sync"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A Future represents a value (or error) to be available at some later
|
||||
// time. Asynchronous FDB API functions return one of the types that implement
|
||||
// the Future interface. All Future types additionally implement Get and MustGet
|
||||
// methods with different return types. Calling BlockUntilReady, Get or MustGet
|
||||
// will block the calling goroutine until the Future is ready.
|
||||
type Future interface {
|
||||
// BlockUntilReady blocks the calling goroutine until the future is ready. A
|
||||
// future becomes ready either when it receives a value of its enclosed type
|
||||
// (if any) or is set to an error state.
|
||||
BlockUntilReady()
|
||||
|
||||
// IsReady returns true if the future is ready, and false otherwise, without
|
||||
// blocking. A future is ready either when has received a value of its
|
||||
// enclosed type (if any) or has been set to an error state.
|
||||
IsReady() bool
|
||||
|
||||
// Cancel cancels a future and its associated asynchronous operation. If
|
||||
// called before the future becomes ready, attempts to access the future
|
||||
// will return an error. Cancel has no effect if the future is already
|
||||
// ready.
|
||||
//
|
||||
// Note that even if a future is not ready, the associated asynchronous
|
||||
// operation may already have completed and be unable to be cancelled.
|
||||
Cancel()
|
||||
}
|
||||
|
||||
type future struct {
|
||||
ptr *C.FDBFuture
|
||||
}
|
||||
|
||||
func newFuture(ptr *C.FDBFuture) *future {
|
||||
f := &future{ptr}
|
||||
runtime.SetFinalizer(f, func(f *future) { C.fdb_future_destroy(f.ptr) })
|
||||
return f
|
||||
}
|
||||
|
||||
func fdb_future_block_until_ready(f *C.FDBFuture) {
|
||||
if C.fdb_future_is_ready(f) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
m := &sync.Mutex{}
|
||||
m.Lock()
|
||||
C.go_set_callback(unsafe.Pointer(f), unsafe.Pointer(m))
|
||||
m.Lock()
|
||||
}
|
||||
|
||||
func (f future) BlockUntilReady() {
|
||||
fdb_future_block_until_ready(f.ptr)
|
||||
}
|
||||
|
||||
func (f future) IsReady() bool {
|
||||
return C.fdb_future_is_ready(f.ptr) != 0
|
||||
}
|
||||
|
||||
func (f future) Cancel() {
|
||||
C.fdb_future_cancel(f.ptr)
|
||||
}
|
||||
|
||||
// FutureByteSlice represents the asynchronous result of a function that returns
|
||||
// a value from a database. FutureByteSlice is a lightweight object that may be
|
||||
// efficiently copied, and is safe for concurrent use by multiple goroutines.
|
||||
type FutureByteSlice interface {
|
||||
// Get returns a database value (or nil if there is no value), or an error
|
||||
// if the asynchronous operation associated with this future did not
|
||||
// successfully complete. The current goroutine will be blocked until the
|
||||
// future is ready.
|
||||
Get() ([]byte, error)
|
||||
|
||||
// MustGet returns a database value (or nil if there is no value), or panics
|
||||
// if the asynchronous operation associated with this future did not
|
||||
// successfully complete. The current goroutine will be blocked until the
|
||||
// future is ready.
|
||||
MustGet() []byte
|
||||
|
||||
Future
|
||||
}
|
||||
|
||||
type futureByteSlice struct {
|
||||
*future
|
||||
v []byte
|
||||
e error
|
||||
o sync.Once
|
||||
}
|
||||
|
||||
func (f *futureByteSlice) Get() ([]byte, error) {
|
||||
f.o.Do(func() {
|
||||
var present C.fdb_bool_t
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
if err := C.fdb_future_get_value(f.ptr, &present, &value, &length); err != 0 {
|
||||
f.e = Error{int(err)}
|
||||
} else {
|
||||
if present != 0 {
|
||||
f.v = C.GoBytes(unsafe.Pointer(value), length)
|
||||
}
|
||||
}
|
||||
|
||||
C.fdb_future_release_memory(f.ptr)
|
||||
})
|
||||
|
||||
return f.v, f.e
|
||||
}
|
||||
|
||||
func (f *futureByteSlice) MustGet() []byte {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// FutureKey represents the asynchronous result of a function that returns a key
|
||||
// from a database. FutureKey is a lightweight object that may be efficiently
|
||||
// copied, and is safe for concurrent use by multiple goroutines.
|
||||
type FutureKey interface {
|
||||
// Get returns a database key or an error if the asynchronous operation
|
||||
// associated with this future did not successfully complete. The current
|
||||
// goroutine will be blocked until the future is ready.
|
||||
Get() (Key, error)
|
||||
|
||||
// MustGet returns a database key, or panics if the asynchronous operation
|
||||
// associated with this future did not successfully complete. The current
|
||||
// goroutine will be blocked until the future is ready.
|
||||
MustGet() Key
|
||||
|
||||
Future
|
||||
}
|
||||
|
||||
type futureKey struct {
|
||||
*future
|
||||
k Key
|
||||
e error
|
||||
o sync.Once
|
||||
}
|
||||
|
||||
func (f *futureKey) Get() (Key, error) {
|
||||
f.o.Do(func() {
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
if err := C.fdb_future_get_key(f.ptr, &value, &length); err != 0 {
|
||||
f.e = Error{int(err)}
|
||||
} else {
|
||||
f.k = C.GoBytes(unsafe.Pointer(value), length)
|
||||
}
|
||||
|
||||
C.fdb_future_release_memory(f.ptr)
|
||||
})
|
||||
|
||||
return f.k, f.e
|
||||
}
|
||||
|
||||
func (f *futureKey) MustGet() Key {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// FutureNil represents the asynchronous result of a function that has no return
|
||||
// value. FutureNil is a lightweight object that may be efficiently copied, and
|
||||
// is safe for concurrent use by multiple goroutines.
|
||||
type FutureNil interface {
|
||||
// Get returns an error if the asynchronous operation associated with this
|
||||
// future did not successfully complete. The current goroutine will be
|
||||
// blocked until the future is ready.
|
||||
Get() error
|
||||
|
||||
// MustGet panics if the asynchronous operation associated with this future
|
||||
// did not successfully complete. The current goroutine will be blocked
|
||||
// until the future is ready.
|
||||
MustGet()
|
||||
|
||||
Future
|
||||
}
|
||||
|
||||
type futureNil struct {
|
||||
*future
|
||||
}
|
||||
|
||||
func (f futureNil) Get() error {
|
||||
f.BlockUntilReady()
|
||||
if err := C.fdb_future_get_error(f.ptr); err != 0 {
|
||||
return Error{int(err)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f futureNil) MustGet() {
|
||||
if err := f.Get(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type futureKeyValueArray struct {
|
||||
*future
|
||||
}
|
||||
|
||||
func stringRefToSlice(ptr unsafe.Pointer) []byte {
|
||||
size := *((*C.int)(unsafe.Pointer(uintptr(ptr)+8)))
|
||||
|
||||
if size == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
src := unsafe.Pointer(*(**C.uint8_t)(unsafe.Pointer(ptr)))
|
||||
|
||||
return C.GoBytes(src, size)
|
||||
}
|
||||
|
||||
func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
||||
f.BlockUntilReady()
|
||||
|
||||
var kvs *C.FDBKeyValue
|
||||
var count C.int
|
||||
var more C.fdb_bool_t
|
||||
|
||||
if err := C.fdb_future_get_keyvalue_array(f.ptr, &kvs, &count, &more); err != 0 {
|
||||
return nil, false, Error{int(err)}
|
||||
}
|
||||
|
||||
ret := make([]KeyValue, int(count))
|
||||
|
||||
for i := 0; i < int(count); i++ {
|
||||
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i * 24))
|
||||
|
||||
ret[i].Key = stringRefToSlice(kvptr)
|
||||
ret[i].Value = stringRefToSlice(unsafe.Pointer(uintptr(kvptr) + 12))
|
||||
}
|
||||
|
||||
return ret, (more != 0), nil
|
||||
}
|
||||
|
||||
// FutureInt64 represents the asynchronous result of a function that returns a
|
||||
// database version. FutureInt64 is a lightweight object that may be efficiently
|
||||
// copied, and is safe for concurrent use by multiple goroutines.
|
||||
type FutureInt64 interface {
|
||||
// Get returns a database version or an error if the asynchronous operation
|
||||
// associated with this future did not successfully complete. The current
|
||||
// goroutine will be blocked until the future is ready.
|
||||
Get() (int64, error)
|
||||
|
||||
// MustGet returns a database version, or panics if the asynchronous
|
||||
// operation associated with this future did not successfully complete. The
|
||||
// current goroutine will be blocked until the future is ready.
|
||||
MustGet() int64
|
||||
|
||||
Future
|
||||
}
|
||||
|
||||
type futureInt64 struct {
|
||||
*future
|
||||
}
|
||||
|
||||
func (f futureInt64) Get() (int64, error) {
|
||||
f.BlockUntilReady()
|
||||
|
||||
var ver C.int64_t
|
||||
if err := C.fdb_future_get_version(f.ptr, &ver); err != 0 {
|
||||
return 0, Error{int(err)}
|
||||
}
|
||||
return int64(ver), nil
|
||||
}
|
||||
|
||||
func (f futureInt64) MustGet() int64 {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// FutureStringSlice represents the asynchronous result of a function that
|
||||
// returns a slice of strings. FutureStringSlice is a lightweight object that
|
||||
// may be efficiently copied, and is safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
type FutureStringSlice interface {
|
||||
// Get returns a slice of strings or an error if the asynchronous operation
|
||||
// associated with this future did not successfully complete. The current
|
||||
// goroutine will be blocked until the future is ready.
|
||||
Get() ([]string, error)
|
||||
|
||||
// MustGet returns a slice of strings or panics if the asynchronous
|
||||
// operation associated with this future did not successfully complete. The
|
||||
// current goroutine will be blocked until the future is ready.
|
||||
MustGet() []string
|
||||
|
||||
Future
|
||||
}
|
||||
|
||||
type futureStringSlice struct {
|
||||
*future
|
||||
}
|
||||
|
||||
func (f futureStringSlice) Get() ([]string, error) {
|
||||
f.BlockUntilReady()
|
||||
|
||||
var strings **C.char
|
||||
var count C.int
|
||||
|
||||
if err := C.fdb_future_get_string_array(f.ptr, (***C.char)(unsafe.Pointer(&strings)), &count); err != 0 {
|
||||
return nil, Error{int(err)}
|
||||
}
|
||||
|
||||
ret := make([]string, int(count))
|
||||
|
||||
for i := 0; i < int(count); i++ {
|
||||
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings))+uintptr(i*8)))))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (f futureStringSlice) MustGet() []string {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* keyselector.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
// A Selectable can be converted to a FoundationDB KeySelector. All functions in
|
||||
// the FoundationDB API that resolve a key selector to a key accept Selectable.
|
||||
type Selectable interface {
|
||||
FDBKeySelector() KeySelector
|
||||
}
|
||||
|
||||
// KeySelector represents a description of a key in a FoundationDB database. A
|
||||
// KeySelector may be resolved to a specific key with the GetKey method, or used
|
||||
// as the endpoints of a SelectorRange to be used with a GetRange function.
|
||||
//
|
||||
// The most common key selectors are constructed with the functions documented
|
||||
// below. For details of how KeySelectors are specified and resolved, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#key-selectors.
|
||||
type KeySelector struct {
|
||||
Key KeyConvertible
|
||||
OrEqual bool
|
||||
Offset int
|
||||
}
|
||||
|
||||
func (ks KeySelector) FDBKeySelector() KeySelector {
|
||||
return ks
|
||||
}
|
||||
|
||||
// LastLessThan returns the KeySelector specifying the lexigraphically greatest
|
||||
// key present in the database which is lexigraphically strictly less than the
|
||||
// given key.
|
||||
func LastLessThan(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, false, 0}
|
||||
}
|
||||
|
||||
// LastLessOrEqual returns the KeySelector specifying the lexigraphically
|
||||
// greatest key present in the database which is lexigraphically less than or
|
||||
// equal to the given key.
|
||||
func LastLessOrEqual(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, true, 0}
|
||||
}
|
||||
|
||||
// FirstGreaterThan returns the KeySelector specifying the lexigraphically least
|
||||
// key present in the database which is lexigraphically strictly greater than
|
||||
// the given key.
|
||||
func FirstGreaterThan(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, true, 1}
|
||||
}
|
||||
|
||||
// FirstGreaterOrEqual returns the KeySelector specifying the lexigraphically
|
||||
// least key present in the database which is lexigraphically greater than or
|
||||
// equal to the given key.
|
||||
func FirstGreaterOrEqual(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, false, 1}
|
||||
}
|
|
@ -0,0 +1,317 @@
|
|||
/*
|
||||
* range.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// KeyValue represents a single key-value pair in the database.
|
||||
type KeyValue struct {
|
||||
Key Key
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// RangeOptions specify how a database range read operation is carried
|
||||
// out. RangeOptions objects are passed to GetRange methods of Database,
|
||||
// Transaction and Snapshot.
|
||||
//
|
||||
// The zero value of RangeOptions represents the default range read
|
||||
// configuration (no limit, lexicographic order, to be used as an iterator).
|
||||
type RangeOptions struct {
|
||||
// Limit restricts the number of key-value pairs returned as part of a range
|
||||
// read. A value of 0 indicates no limit.
|
||||
Limit int
|
||||
|
||||
// Mode sets the streaming mode of the range read, allowing the database to
|
||||
// balance latency and bandwidth for this read.
|
||||
Mode StreamingMode
|
||||
|
||||
// Reverse indicates that the read should be performed in lexicographic
|
||||
// (false) or reverse lexicographic (true) order. When Reverse is true and
|
||||
// Limit is non-zero, the last Limit key-value pairs in the range are
|
||||
// returned.
|
||||
Reverse bool
|
||||
}
|
||||
|
||||
// A Range describes all keys between a begin (inclusive) and end (exclusive)
|
||||
// key selector.
|
||||
type Range interface {
|
||||
// FDBRangeKeySelectors returns a pair of key selectors that describe the
|
||||
// beginning and end of a range.
|
||||
FDBRangeKeySelectors() (begin, end Selectable)
|
||||
}
|
||||
|
||||
// An ExactRange describes all keys between a begin (inclusive) and end
|
||||
// (exclusive) key. If you need to specify an ExactRange and you have only a
|
||||
// Range, you must resolve the selectors returned by
|
||||
// (Range).FDBRangeKeySelectors to keys using the (Transaction).GetKey method.
|
||||
//
|
||||
// Any object that implements ExactRange also implements Range, and may be used
|
||||
// accordingly.
|
||||
type ExactRange interface {
|
||||
// FDBRangeKeys returns a pair of keys that describe the beginning and end
|
||||
// of a range.
|
||||
FDBRangeKeys() (begin, end KeyConvertible)
|
||||
|
||||
// An object that implements ExactRange must also implement Range
|
||||
// (logically, by returning FirstGreaterOrEqual of the keys returned by
|
||||
// FDBRangeKeys).
|
||||
Range
|
||||
}
|
||||
|
||||
// KeyRange is an ExactRange constructed from a pair of KeyConvertibles. Note
|
||||
// that the default zero-value of KeyRange specifies an empty range before all
|
||||
// keys in the database.
|
||||
type KeyRange struct {
|
||||
Begin, End KeyConvertible
|
||||
}
|
||||
|
||||
// FDBRangeKeys allows KeyRange to satisfy the ExactRange interface.
|
||||
func (kr KeyRange) FDBRangeKeys() (KeyConvertible, KeyConvertible) {
|
||||
return kr.Begin, kr.End
|
||||
}
|
||||
|
||||
// FDBRangeKeySelectors allows KeyRange to satisfy the Range interface.
|
||||
func (kr KeyRange) FDBRangeKeySelectors() (Selectable, Selectable) {
|
||||
return FirstGreaterOrEqual(kr.Begin), FirstGreaterOrEqual(kr.End)
|
||||
}
|
||||
|
||||
// SelectorRange is a Range constructed directly from a pair of Selectable
|
||||
// objects. Note that the default zero-value of SelectorRange specifies an empty
|
||||
// range before all keys in the database.
|
||||
type SelectorRange struct {
|
||||
Begin, End Selectable
|
||||
}
|
||||
|
||||
// FDBRangeKeySelectors allows SelectorRange to satisfy the Range interface.
|
||||
func (sr SelectorRange) FDBRangeKeySelectors() (Selectable, Selectable) {
|
||||
return sr.Begin, sr.End
|
||||
}
|
||||
|
||||
// RangeResult is a handle to the asynchronous result of a range
|
||||
// read. RangeResult is safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// A RangeResult should not be returned from a transactional function passed to
|
||||
// the Transact method of a Transactor.
|
||||
type RangeResult struct {
|
||||
t *transaction
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
snapshot bool
|
||||
f *futureKeyValueArray
|
||||
}
|
||||
|
||||
// GetSliceWithError returns a slice of KeyValue objects satisfying the range
|
||||
// specified in the read that returned this RangeResult, or an error if any of
|
||||
// the asynchronous operations associated with this result did not successfully
|
||||
// complete. The current goroutine will be blocked until all reads have
|
||||
// completed.
|
||||
func (rr RangeResult) GetSliceWithError() ([]KeyValue, error) {
|
||||
var ret []KeyValue
|
||||
|
||||
ri := rr.Iterator()
|
||||
|
||||
if rr.options.Limit != 0 {
|
||||
ri.options.Mode = StreamingModeExact
|
||||
} else {
|
||||
ri.options.Mode = StreamingModeWantAll
|
||||
}
|
||||
|
||||
for ri.Advance() {
|
||||
if ri.err != nil {
|
||||
return nil, ri.err
|
||||
}
|
||||
ret = append(ret, ri.kvs...)
|
||||
ri.index = len(ri.kvs)
|
||||
ri.fetchNextBatch()
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetSliceOrPanic returns a slice of KeyValue objects satisfying the range
|
||||
// specified in the read that returned this RangeResult, or panics if any of the
|
||||
// asynchronous operations associated with this result did not successfully
|
||||
// complete. The current goroutine will be blocked until all reads have
|
||||
// completed.
|
||||
func (rr RangeResult) GetSliceOrPanic() []KeyValue {
|
||||
kvs, e := rr.GetSliceWithError()
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return kvs
|
||||
}
|
||||
|
||||
// Iterator returns a RangeIterator over the key-value pairs satisfying the
|
||||
// range specified in the read that returned this RangeResult.
|
||||
func (rr RangeResult) Iterator() *RangeIterator {
|
||||
return &RangeIterator{
|
||||
t: rr.t,
|
||||
f: rr.f,
|
||||
sr: rr.sr,
|
||||
options: rr.options,
|
||||
iteration: 1,
|
||||
snapshot: rr.snapshot,
|
||||
}
|
||||
}
|
||||
|
||||
// RangeIterator returns the key-value pairs in the database (as KeyValue
|
||||
// objects) satisfying the range specified in a range read. RangeIterator is
|
||||
// constructed with the (RangeResult).Iterator method.
|
||||
//
|
||||
// You must call Advance and get a true result prior to calling Get or MustGet.
|
||||
//
|
||||
// RangeIterator should not be copied or used concurrently from multiple
|
||||
// goroutines, but multiple RangeIterators may be constructed from a single
|
||||
// RangeResult and used concurrently. RangeIterator should not be returned from
|
||||
// a transactional function passed to the Transact method of a Transactor.
|
||||
type RangeIterator struct {
|
||||
t *transaction
|
||||
f *futureKeyValueArray
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
iteration int
|
||||
done bool
|
||||
more bool
|
||||
kvs []KeyValue
|
||||
index int
|
||||
err error
|
||||
snapshot bool
|
||||
}
|
||||
|
||||
// Advance attempts to advance the iterator to the next key-value pair. Advance
|
||||
// returns true if there are more key-value pairs satisfying the range, or false
|
||||
// if the range has been exhausted. You must call this before every call to Get
|
||||
// or MustGet.
|
||||
func (ri *RangeIterator) Advance() bool {
|
||||
if ri.done {
|
||||
return false
|
||||
}
|
||||
|
||||
if ri.f == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
ri.kvs, ri.more, ri.err = ri.f.Get()
|
||||
ri.index = 0
|
||||
ri.f = nil
|
||||
|
||||
if ri.err != nil || len(ri.kvs) > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ri *RangeIterator) fetchNextBatch() {
|
||||
if !ri.more || ri.index == ri.options.Limit {
|
||||
ri.done = true
|
||||
return
|
||||
}
|
||||
|
||||
if ri.options.Limit > 0 {
|
||||
// Not worried about this being zero, checked equality above
|
||||
ri.options.Limit -= ri.index
|
||||
}
|
||||
|
||||
if ri.options.Reverse {
|
||||
ri.sr.End = FirstGreaterOrEqual(ri.kvs[ri.index-1].Key)
|
||||
} else {
|
||||
ri.sr.Begin = FirstGreaterThan(ri.kvs[ri.index-1].Key)
|
||||
}
|
||||
|
||||
ri.iteration += 1
|
||||
|
||||
f := ri.t.doGetRange(ri.sr, ri.options, ri.snapshot, ri.iteration)
|
||||
ri.f = &f
|
||||
}
|
||||
|
||||
// Get returns the next KeyValue in a range read, or an error if one of the
|
||||
// asynchronous operations associated with this range did not successfully
|
||||
// complete. The Advance method of this RangeIterator must have returned true
|
||||
// prior to calling Get.
|
||||
func (ri *RangeIterator) Get() (kv KeyValue, e error) {
|
||||
if ri.err != nil {
|
||||
e = ri.err
|
||||
return
|
||||
}
|
||||
|
||||
kv = ri.kvs[ri.index]
|
||||
|
||||
ri.index += 1
|
||||
|
||||
if ri.index == len(ri.kvs) {
|
||||
ri.fetchNextBatch()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MustGet returns the next KeyValue in a range read, or panics if one of the
|
||||
// asynchronous operations associated with this range did not successfully
|
||||
// complete. The Advance method of this RangeIterator must have returned true
|
||||
// prior to calling MustGet.
|
||||
func (ri *RangeIterator) MustGet() KeyValue {
|
||||
kv, e := ri.Get()
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return kv
|
||||
}
|
||||
|
||||
func Strinc(prefix []byte) ([]byte, error) {
|
||||
for i := len(prefix) - 1; i >= 0; i-- {
|
||||
if prefix[i] != 0xFF {
|
||||
ret := make([]byte, i+1)
|
||||
copy(ret, prefix[:i+1])
|
||||
ret[i] += 1
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Key must contain at least one byte not equal to 0xFF")
|
||||
}
|
||||
|
||||
// PrefixRange returns the KeyRange describing the range of keys k such that
|
||||
// bytes.HasPrefix(k, prefix) is true. PrefixRange returns an error if prefix is
|
||||
// empty or entirely 0xFF bytes.
|
||||
//
|
||||
// Do not use PrefixRange on objects that already implement the Range or
|
||||
// ExactRange interfaces. The prefix range of the byte representation of these
|
||||
// objects may not correspond to their logical range.
|
||||
func PrefixRange(prefix []byte) (KeyRange, error) {
|
||||
begin := make([]byte, len(prefix))
|
||||
copy(begin, prefix)
|
||||
end, e := Strinc(begin)
|
||||
if e != nil {
|
||||
return KeyRange{}, nil
|
||||
}
|
||||
return KeyRange{Key(begin), Key(end)}, nil
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* snapshot.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
// Snapshot is a handle to a FoundationDB transaction snapshot, suitable for
|
||||
// performing snapshot reads. Snapshot reads offer a more relaxed isolation
|
||||
// level than FoundationDB's default serializable isolation, reducing
|
||||
// transaction conflicts but making it harder to reason about concurrency.
|
||||
//
|
||||
// For more information on snapshot reads, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#snapshot-reads.
|
||||
type Snapshot struct {
|
||||
*transaction
|
||||
}
|
||||
|
||||
// ReadTransact executes the caller-provided function, passing it the Snapshot
|
||||
// receiver object (as a ReadTransaction).
|
||||
//
|
||||
// A panic of type Error during execution of the function will be recovered and
|
||||
// returned to the caller as an error, but ReadTransact will not retry the
|
||||
// function.
|
||||
//
|
||||
// By satisfying the ReadTransactor interface, Snapshot may be passed to a
|
||||
// read-only transactional function from another (possibly read-only)
|
||||
// transactional function, allowing composition.
|
||||
//
|
||||
// See the ReadTransactor interface for an example of using ReadTransact with
|
||||
// Transaction, Snapshot and Database objects.
|
||||
func (s Snapshot) ReadTransact(f func (ReadTransaction) (interface{}, error)) (r interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
r, e = f(s)
|
||||
return
|
||||
}
|
||||
|
||||
// Snapshot returns the receiver and allows Snapshot to satisfy the
|
||||
// ReadTransaction interface.
|
||||
func (s Snapshot) Snapshot() Snapshot {
|
||||
return s
|
||||
}
|
||||
|
||||
// Get is equivalent to (Transaction).Get, performed as a snapshot read.
|
||||
func (s Snapshot) Get(key KeyConvertible) FutureByteSlice {
|
||||
return s.get(key.FDBKey(), 1)
|
||||
}
|
||||
|
||||
// GetKey is equivalent to (Transaction).GetKey, performed as a snapshot read.
|
||||
func (s Snapshot) GetKey(sel Selectable) FutureKey {
|
||||
return s.getKey(sel.FDBKeySelector(), 1)
|
||||
}
|
||||
|
||||
// GetRange is equivalent to (Transaction).GetRange, performed as a snapshot
|
||||
// read.
|
||||
func (s Snapshot) GetRange(r Range, options RangeOptions) RangeResult {
|
||||
return s.getRange(r, options, true)
|
||||
}
|
||||
|
||||
// GetReadVersion is equivalent to (Transaction).GetReadVersion, performed as
|
||||
// a snapshot read.
|
||||
func (s Snapshot) GetReadVersion() FutureInt64 {
|
||||
return s.getReadVersion()
|
||||
}
|
||||
|
||||
// GetDatabase returns a handle to the database with which this snapshot is
|
||||
// interacting.
|
||||
func (s Snapshot) GetDatabase() Database {
|
||||
return s.transaction.db
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* subspace.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Subspace Layer
|
||||
|
||||
// Package subspace provides a convenient way to use FoundationDB tuples to
|
||||
// define namespaces for different categories of data. The namespace is
|
||||
// specified by a prefix tuple which is prepended to all tuples packed by the
|
||||
// subspace. When unpacking a key with the subspace, the prefix tuple will be
|
||||
// removed from the result.
|
||||
//
|
||||
// As a best practice, API clients should use at least one subspace for
|
||||
// application data. For general guidance on subspace usage, see the Subspaces
|
||||
// section of the Developer Guide
|
||||
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-sub-keyspaces).
|
||||
package subspace
|
||||
|
||||
import (
|
||||
"fdb"
|
||||
"fdb/tuple"
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Subspace represents a well-defined region of keyspace in a FoundationDB
|
||||
// database.
|
||||
type Subspace interface {
|
||||
// Sub returns a new Subspace whose prefix extends this Subspace with the
|
||||
// encoding of the provided element(s). If any of the elements are not a
|
||||
// valid tuple.TupleElement, Sub will panic.
|
||||
Sub(el ...tuple.TupleElement) Subspace
|
||||
|
||||
// Bytes returns the literal bytes of the prefix of this Subspace.
|
||||
Bytes() []byte
|
||||
|
||||
// Pack returns the key encoding the specified Tuple with the prefix of this
|
||||
// Subspace prepended.
|
||||
Pack(t tuple.Tuple) fdb.Key
|
||||
|
||||
// Unpack returns the Tuple encoded by the given key with the prefix of this
|
||||
// Subspace removed. Unpack will return an error if the key is not in this
|
||||
// Subspace or does not encode a well-formed Tuple.
|
||||
Unpack(k fdb.KeyConvertible) (tuple.Tuple, error)
|
||||
|
||||
// Contains returns true if the provided key starts with the prefix of this
|
||||
// Subspace, indicating that the Subspace logically contains the key.
|
||||
Contains(k fdb.KeyConvertible) bool
|
||||
|
||||
// All Subspaces implement fdb.KeyConvertible and may be used as
|
||||
// FoundationDB keys (corresponding to the prefix of this Subspace).
|
||||
fdb.KeyConvertible
|
||||
|
||||
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
|
||||
// keys logically in this Subspace.
|
||||
fdb.ExactRange
|
||||
}
|
||||
|
||||
type subspace struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
// AllKeys returns the Subspace corresponding to all keys in a FoundationDB
|
||||
// database.
|
||||
func AllKeys() Subspace {
|
||||
return subspace{}
|
||||
}
|
||||
|
||||
// Sub returns a new Subspace whose prefix is the encoding of the provided
|
||||
// element(s). If any of the elements are not a valid tuple.TupleElement, a
|
||||
// runtime panic will occur.
|
||||
func Sub(el ...tuple.TupleElement) Subspace {
|
||||
return subspace{tuple.Tuple(el).Pack()}
|
||||
}
|
||||
|
||||
// FromBytes returns a new Subspace from the provided bytes.
|
||||
func FromBytes(b []byte) Subspace {
|
||||
s := make([]byte, len(b))
|
||||
copy(s, b)
|
||||
return subspace{s}
|
||||
}
|
||||
|
||||
func (s subspace) Sub(el ...tuple.TupleElement) Subspace {
|
||||
return subspace{concat(s.Bytes(), tuple.Tuple(el).Pack()...)}
|
||||
}
|
||||
|
||||
func (s subspace) Bytes() []byte {
|
||||
return s.b
|
||||
}
|
||||
|
||||
func (s subspace) Pack(t tuple.Tuple) fdb.Key {
|
||||
return fdb.Key(concat(s.b, t.Pack()...))
|
||||
}
|
||||
|
||||
func (s subspace) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
|
||||
key := k.FDBKey()
|
||||
if !bytes.HasPrefix(key, s.b) {
|
||||
return nil, errors.New("key is not in subspace")
|
||||
}
|
||||
return tuple.Unpack(key[len(s.b):])
|
||||
}
|
||||
|
||||
func (s subspace) Contains(k fdb.KeyConvertible) bool {
|
||||
return bytes.HasPrefix(k.FDBKey(), s.b)
|
||||
}
|
||||
|
||||
func (s subspace) FDBKey() fdb.Key {
|
||||
return fdb.Key(s.b)
|
||||
}
|
||||
|
||||
func (s subspace) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
|
||||
return fdb.Key(concat(s.b, 0x00)), fdb.Key(concat(s.b, 0xFF))
|
||||
}
|
||||
|
||||
func (s subspace) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
|
||||
begin, end := s.FDBRangeKeys()
|
||||
return fdb.FirstGreaterOrEqual(begin), fdb.FirstGreaterOrEqual(end)
|
||||
}
|
||||
|
||||
func concat(a []byte, b ...byte) []byte {
|
||||
r := make([]byte, len(a) + len(b))
|
||||
copy(r, a)
|
||||
copy(r[len(a):], b)
|
||||
return r
|
||||
}
|
|
@ -0,0 +1,459 @@
|
|||
/*
|
||||
* transaction.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go API
|
||||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 500
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// A ReadTransaction can asynchronously read from a FoundationDB
|
||||
// database. Transaction and Snapshot both satisfy the ReadTransaction
|
||||
// interface.
|
||||
//
|
||||
// All ReadTransactions satisfy the ReadTransactor interface and may be used
|
||||
// with read-only transactional functions.
|
||||
type ReadTransaction interface {
|
||||
Get(key KeyConvertible) FutureByteSlice
|
||||
GetKey(sel Selectable) FutureKey
|
||||
GetRange(r Range, options RangeOptions) RangeResult
|
||||
GetReadVersion() FutureInt64
|
||||
GetDatabase() Database
|
||||
Snapshot() Snapshot
|
||||
|
||||
ReadTransactor
|
||||
}
|
||||
|
||||
// Transaction is a handle to a FoundationDB transaction. Transaction is a
|
||||
// lightweight object that may be efficiently copied, and is safe for concurrent
|
||||
// use by multiple goroutines.
|
||||
//
|
||||
// In FoundationDB, a transaction is a mutable snapshot of a database. All read
|
||||
// and write operations on a transaction see and modify an otherwise-unchanging
|
||||
// version of the database and only change the underlying database if and when
|
||||
// the transaction is committed. Read operations do see the effects of previous
|
||||
// write operations on the same transaction. Committing a transaction usually
|
||||
// succeeds in the absence of conflicts.
|
||||
//
|
||||
// Transactions group operations into a unit with the properties of atomicity,
|
||||
// isolation, and durability. Transactions also provide the ability to maintain
|
||||
// an applications invariants or integrity constraints, supporting the property
|
||||
// of consistency. Together these properties are known as ACID.
|
||||
//
|
||||
// Transactions are also causally consistent: once a transaction has been
|
||||
// successfully committed, all subsequently created transactions will see the
|
||||
// modifications made by it.
|
||||
type Transaction struct {
|
||||
*transaction
|
||||
}
|
||||
|
||||
type transaction struct {
|
||||
ptr *C.FDBTransaction
|
||||
db Database
|
||||
}
|
||||
|
||||
// TransactionOptions is a handle with which to set options that affect a
|
||||
// Transaction object. A TransactionOptions instance should be obtained with the
|
||||
// (Transaction).Options method.
|
||||
type TransactionOptions struct {
|
||||
transaction *transaction
|
||||
}
|
||||
|
||||
func (opt TransactionOptions) setOpt(code int, param []byte) error {
|
||||
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
|
||||
return C.fdb_transaction_set_option(opt.transaction.ptr, C.FDBTransactionOption(code), p, pl)
|
||||
}, param)
|
||||
}
|
||||
|
||||
func (t *transaction) destroy() {
|
||||
C.fdb_transaction_destroy(t.ptr)
|
||||
}
|
||||
|
||||
// GetDatabase returns a handle to the database with which this transaction is
|
||||
// interacting.
|
||||
func (t Transaction) GetDatabase() Database {
|
||||
return t.transaction.db
|
||||
}
|
||||
|
||||
// Transact executes the caller-provided function, passing it the Transaction
|
||||
// receiver object.
|
||||
//
|
||||
// A panic of type Error during execution of the function will be recovered and
|
||||
// returned to the caller as an error, but Transact will not retry the function
|
||||
// or commit the Transaction after the caller-provided function completes.
|
||||
//
|
||||
// By satisfying the Transactor interface, Transaction may be passed to a
|
||||
// transactional function from another transactional function, allowing
|
||||
// composition. The outermost transactional function must have been provided a
|
||||
// Database, or else the transaction will never be committed.
|
||||
//
|
||||
// See the Transactor interface for an example of using Transact with
|
||||
// Transaction and Database objects.
|
||||
func (t Transaction) Transact(f func (Transaction) (interface{}, error)) (r interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
r, e = f(t)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadTransact executes the caller-provided function, passing it the
|
||||
// Transaction receiver object (as a ReadTransaction).
|
||||
//
|
||||
// A panic of type Error during execution of the function will be recovered and
|
||||
// returned to the caller as an error, but ReadTransact will not retry the
|
||||
// function.
|
||||
//
|
||||
// By satisfying the ReadTransactor interface, Transaction may be passed to a
|
||||
// read-only transactional function from another (possibly read-only)
|
||||
// transactional function, allowing composition.
|
||||
//
|
||||
// See the ReadTransactor interface for an example of using ReadTransact with
|
||||
// Transaction, Snapshot and Database objects.
|
||||
func (t Transaction) ReadTransact(f func(ReadTransaction) (interface{}, error)) (r interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
r, e = f(t)
|
||||
return
|
||||
}
|
||||
|
||||
// Cancel cancels a transaction. All pending or future uses of the transaction
|
||||
// will encounter an error. The Transaction object may be reused after calling
|
||||
// (Transaction).Reset.
|
||||
//
|
||||
// Be careful if you are using (Transaction).Reset and (Transaction).Cancel
|
||||
// concurrently with the same transaction. Since they negate each others
|
||||
// effects, a race condition between these calls will leave the transaction in
|
||||
// an unknown state.
|
||||
//
|
||||
// If your program attempts to cancel a transaction after (Transaction).Commit
|
||||
// has been called but before it returns, unpredictable behavior will
|
||||
// result. While it is guaranteed that the transaction will eventually end up in
|
||||
// a cancelled state, the commit may or may not occur. Moreover, even if the
|
||||
// call to (Transaction).Commit appears to return a transaction_cancelled
|
||||
// error, the commit may have occurred or may occur in the future. This can make
|
||||
// it more difficult to reason about the order in which transactions occur.
|
||||
func (t Transaction) Cancel() {
|
||||
C.fdb_transaction_cancel(t.ptr)
|
||||
}
|
||||
|
||||
// (Infrequently used) SetReadVersion sets the database version that the transaction will read from
|
||||
// the database. The database cannot guarantee causal consistency if this method
|
||||
// is used (the transactions reads will be causally consistent only if the
|
||||
// provided read version has that property).
|
||||
func (t Transaction) SetReadVersion(version int64) {
|
||||
C.fdb_transaction_set_read_version(t.ptr, C.int64_t(version))
|
||||
}
|
||||
|
||||
// Snapshot returns a Snapshot object, suitable for performing snapshot
|
||||
// reads. Snapshot reads offer a more relaxed isolation level than
|
||||
// FoundationDB's default serializable isolation, reducing transaction conflicts
|
||||
// but making it harder to reason about concurrency.
|
||||
//
|
||||
// For more information on snapshot reads, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#using-snapshot-reads.
|
||||
func (t Transaction) Snapshot() Snapshot {
|
||||
return Snapshot{t.transaction}
|
||||
}
|
||||
|
||||
// OnError determines whether an error returned by a Transaction method is
|
||||
// retryable. Waiting on the returned future will return the same error when
|
||||
// fatal, or return nil (after blocking the calling goroutine for a suitable
|
||||
// delay) for retryable errors.
|
||||
//
|
||||
// Typical code will not use OnError directly. (Database).Transact uses
|
||||
// OnError internally to implement a correct retry loop.
|
||||
func (t Transaction) OnError(e Error) FutureNil {
|
||||
return &futureNil{newFuture(C.fdb_transaction_on_error(t.ptr, C.fdb_error_t(e.Code)))}
|
||||
}
|
||||
|
||||
// Commit attempts to commit the modifications made in the transaction to the
|
||||
// database. Waiting on the returned future will block the calling goroutine
|
||||
// until the transaction has either been committed successfully or an error is
|
||||
// encountered. Any error should be passed to (Transaction).OnError to determine
|
||||
// if the error is retryable or not.
|
||||
//
|
||||
// As with other client/server databases, in some failure scenarios a client may
|
||||
// be unable to determine whether a transaction succeeded. For more information,
|
||||
// see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#developer-guide-unknown-results.
|
||||
func (t Transaction) Commit() FutureNil {
|
||||
return &futureNil{newFuture(C.fdb_transaction_commit(t.ptr))}
|
||||
}
|
||||
|
||||
// Watch creates a watch and returns a FutureNil that will become ready when the
|
||||
// watch reports a change to the value of the specified key.
|
||||
//
|
||||
// A watchs behavior is relative to the transaction that created it. A watch
|
||||
// will report a change in relation to the keys value as readable by that
|
||||
// transaction. The initial value used for comparison is either that of the
|
||||
// transactions read version or the value as modified by the transaction itself
|
||||
// prior to the creation of the watch. If the value changes and then changes
|
||||
// back to its initial value, the watch might not report the change.
|
||||
//
|
||||
// Until the transaction that created it has been committed, a watch will not
|
||||
// report changes made by other transactions. In contrast, a watch will
|
||||
// immediately report changes made by the transaction itself. Watches cannot be
|
||||
// created if the transaction has called SetReadYourWritesDisable on the
|
||||
// Transaction options, and an attempt to do so will return a watches_disabled
|
||||
// error.
|
||||
//
|
||||
// If the transaction used to create a watch encounters an error during commit,
|
||||
// then the watch will be set with that error. A transaction whose commit
|
||||
// result is unknown will set all of its watches with the commit_unknown_result
|
||||
// error. If an uncommitted transaction is reset or destroyed, then any watches
|
||||
// it created will be set with the transaction_cancelled error.
|
||||
//
|
||||
// By default, each database connection can have no more than 10,000 watches
|
||||
// that have not yet reported a change. When this number is exceeded, an attempt
|
||||
// to create a watch will return a too_many_watches error. This limit can be
|
||||
// changed using SetMaxWatches on the Database. Because a watch outlives the
|
||||
// transaction that creates it, any watch that is no longer needed should be
|
||||
// cancelled by calling (FutureNil).Cancel on its returned future.
|
||||
func (t Transaction) Watch(key KeyConvertible) FutureNil {
|
||||
kb := key.FDBKey()
|
||||
return &futureNil{newFuture(C.fdb_transaction_watch(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
|
||||
}
|
||||
|
||||
func (t *transaction) get(key []byte, snapshot int) FutureByteSlice {
|
||||
return &futureByteSlice{future: newFuture(C.fdb_transaction_get(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(snapshot)))}
|
||||
}
|
||||
|
||||
// Get returns the (future) value associated with the specified key. The read is
|
||||
// performed asynchronously and does not block the calling goroutine. The future
|
||||
// will become ready when the read is complete.
|
||||
func (t Transaction) Get(key KeyConvertible) FutureByteSlice {
|
||||
return t.get(key.FDBKey(), 0)
|
||||
}
|
||||
|
||||
func (t *transaction) doGetRange(r Range, options RangeOptions, snapshot bool, iteration int) futureKeyValueArray {
|
||||
begin, end := r.FDBRangeKeySelectors()
|
||||
bsel := begin.FDBKeySelector()
|
||||
esel := end.FDBKeySelector()
|
||||
bkey := bsel.Key.FDBKey()
|
||||
ekey := esel.Key.FDBKey()
|
||||
|
||||
return futureKeyValueArray{newFuture(C.fdb_transaction_get_range(t.ptr, byteSliceToPtr(bkey), C.int(len(bkey)), C.fdb_bool_t(boolToInt(bsel.OrEqual)), C.int(bsel.Offset), byteSliceToPtr(ekey), C.int(len(ekey)), C.fdb_bool_t(boolToInt(esel.OrEqual)), C.int(esel.Offset), C.int(options.Limit), C.int(0), C.FDBStreamingMode(options.Mode-1), C.int(iteration), C.fdb_bool_t(boolToInt(snapshot)), C.fdb_bool_t(boolToInt(options.Reverse))))}
|
||||
}
|
||||
|
||||
func (t *transaction) getRange(r Range, options RangeOptions, snapshot bool) RangeResult {
|
||||
f := t.doGetRange(r, options, snapshot, 1)
|
||||
begin, end := r.FDBRangeKeySelectors()
|
||||
return RangeResult{
|
||||
t: t,
|
||||
sr: SelectorRange{begin, end},
|
||||
options: options,
|
||||
snapshot: snapshot,
|
||||
f: &f,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRange performs a range read. The returned RangeResult represents all
|
||||
// KeyValue objects kv where beginKey <= kv.Key < endKey, ordered by kv.Key
|
||||
// (where beginKey and endKey are the keys described by the key selectors
|
||||
// returned by r.FDBKeySelectors). All reads performed as a result of GetRange
|
||||
// are asynchronous and do not block the calling goroutine.
|
||||
func (t Transaction) GetRange(r Range, options RangeOptions) RangeResult {
|
||||
return t.getRange(r, options, false)
|
||||
}
|
||||
|
||||
func (t *transaction) getReadVersion() FutureInt64 {
|
||||
return &futureInt64{newFuture(C.fdb_transaction_get_read_version(t.ptr))}
|
||||
}
|
||||
|
||||
// (Infrequently used) GetReadVersion returns the (future) transaction read version. The read is
|
||||
// performed asynchronously and does not block the calling goroutine. The future
|
||||
// will become ready when the read version is available.
|
||||
func (t Transaction) GetReadVersion() FutureInt64 {
|
||||
return t.getReadVersion()
|
||||
}
|
||||
|
||||
// Set associated the given key and value, overwriting any previous association
|
||||
// with key. Set returns immediately, having modified the snapshot of the
|
||||
// database represented by the transaction.
|
||||
func (t Transaction) Set(key KeyConvertible, value []byte) {
|
||||
kb := key.FDBKey()
|
||||
C.fdb_transaction_set(t.ptr, byteSliceToPtr(kb), C.int(len(kb)), byteSliceToPtr(value), C.int(len(value)))
|
||||
}
|
||||
|
||||
// Clear removes the specified key (and any associated value), if it
|
||||
// exists. Clear returns immediately, having modified the snapshot of the
|
||||
// database represented by the transaction.
|
||||
func (t Transaction) Clear(key KeyConvertible) {
|
||||
kb := key.FDBKey()
|
||||
C.fdb_transaction_clear(t.ptr, byteSliceToPtr(kb), C.int(len(kb)))
|
||||
}
|
||||
|
||||
// ClearRange removes all keys k such that begin <= k < end, and their
|
||||
// associated values. ClearRange returns immediately, having modified the
|
||||
// snapshot of the database represented by the transaction.
|
||||
func (t Transaction) ClearRange(er ExactRange) {
|
||||
begin, end := er.FDBRangeKeys()
|
||||
bkb := begin.FDBKey()
|
||||
ekb := end.FDBKey()
|
||||
C.fdb_transaction_clear_range(t.ptr, byteSliceToPtr(bkb), C.int(len(bkb)), byteSliceToPtr(ekb), C.int(len(ekb)))
|
||||
}
|
||||
|
||||
// (Infrequently used) GetCommittedVersion returns the version number at which a
|
||||
// successful commit modified the database. This must be called only after the
|
||||
// successful (non-error) completion of a call to Commit on this Transaction, or
|
||||
// the behavior is undefined. Read-only transactions do not modify the database
|
||||
// when committed and will have a committed version of -1. Keep in mind that a
|
||||
// transaction which reads keys and then sets them to their current values may
|
||||
// be optimized to a read-only transaction.
|
||||
func (t Transaction) GetCommittedVersion() (int64, error) {
|
||||
var version C.int64_t
|
||||
|
||||
if err := C.fdb_transaction_get_committed_version(t.ptr, &version); err != 0 {
|
||||
return 0, Error{int(err)}
|
||||
}
|
||||
|
||||
return int64(version), nil
|
||||
}
|
||||
|
||||
// (Infrequently used) Returns a future which will contain the versionstamp
|
||||
// which was used by any versionstamp operations in this transaction. The
|
||||
// future will be ready only after the successful completion of a call to Commit
|
||||
// on this Transaction. Read-only transactions do not modify the database when
|
||||
// committed and will result in the future completing with an error. Keep in
|
||||
// mind that a transaction which reads keys and then sets them to their current
|
||||
// values may be optimized to a read-only transaction.
|
||||
func (t Transaction) GetVersionstamp() FutureKey {
|
||||
return &futureKey{future: newFuture(C.fdb_transaction_get_versionstamp(t.ptr))}
|
||||
}
|
||||
|
||||
// Reset rolls back a transaction, completely resetting it to its initial
|
||||
// state. This is logically equivalent to destroying the transaction and
|
||||
// creating a new one.
|
||||
func (t Transaction) Reset() {
|
||||
C.fdb_transaction_reset(t.ptr)
|
||||
}
|
||||
|
||||
func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transaction) getKey(sel KeySelector, snapshot int) FutureKey {
|
||||
key := sel.Key.FDBKey()
|
||||
return &futureKey{future: newFuture(C.fdb_transaction_get_key(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(boolToInt(sel.OrEqual)), C.int(sel.Offset), C.fdb_bool_t(snapshot)))}
|
||||
}
|
||||
|
||||
// GetKey returns the future key referenced by the provided key selector. The
|
||||
// read is performed asynchronously and does not block the calling
|
||||
// goroutine. The future will become ready when the read version is available.
|
||||
//
|
||||
// By default, the key is cached for the duration of the transaction, providing
|
||||
// a potential performance benefit. However, the value of the key is also
|
||||
// retrieved, using network bandwidth. Invoking
|
||||
// (TransactionOptions).SetReadYourWritesDisable will avoid both the caching and
|
||||
// the increased network bandwidth.
|
||||
func (t Transaction) GetKey(sel Selectable) FutureKey {
|
||||
return t.getKey(sel.FDBKeySelector(), 0)
|
||||
}
|
||||
|
||||
func (t Transaction) atomicOp(key []byte, param []byte, code int) {
|
||||
C.fdb_transaction_atomic_op(t.ptr, byteSliceToPtr(key), C.int(len(key)), byteSliceToPtr(param), C.int(len(param)), C.FDBMutationType(code))
|
||||
}
|
||||
|
||||
func addConflictRange(t *transaction, er ExactRange, crtype conflictRangeType) error {
|
||||
begin, end := er.FDBRangeKeys()
|
||||
bkb := begin.FDBKey()
|
||||
ekb := end.FDBKey()
|
||||
if err := C.fdb_transaction_add_conflict_range(t.ptr, byteSliceToPtr(bkb), C.int(len(bkb)), byteSliceToPtr(ekb), C.int(len(ekb)), C.FDBConflictRangeType(crtype)); err != 0 {
|
||||
return Error{int(err)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddReadConflictRange adds a range of keys to the transactions read conflict
|
||||
// ranges as if you had read the range. As a result, other transactions that
|
||||
// write a key in this range could cause the transaction to fail with a
|
||||
// conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddReadConflictRange(er ExactRange) error {
|
||||
return addConflictRange(t.transaction, er, conflictRangeTypeRead)
|
||||
}
|
||||
|
||||
func copyAndAppend(orig []byte, b byte) []byte {
|
||||
ret := make([]byte, len(orig) + 1)
|
||||
copy(ret, orig)
|
||||
ret[len(orig)] = b
|
||||
return ret
|
||||
}
|
||||
|
||||
// AddReadConflictKey adds a key to the transactions read conflict ranges as if
|
||||
// you had read the key. As a result, other transactions that concurrently write
|
||||
// this key could cause the transaction to fail with a conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
|
||||
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeRead)
|
||||
}
|
||||
|
||||
// AddWriteConflictRange adds a range of keys to the transactions write
|
||||
// conflict ranges as if you had cleared the range. As a result, other
|
||||
// transactions that concurrently read a key in this range could fail with a
|
||||
// conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddWriteConflictRange(er ExactRange) error {
|
||||
return addConflictRange(t.transaction, er, conflictRangeTypeWrite)
|
||||
}
|
||||
|
||||
// AddWriteConflictKey adds a key to the transactions write conflict ranges as
|
||||
// if you had written the key. As a result, other transactions that concurrently
|
||||
// read this key could fail with a conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddWriteConflictKey(key KeyConvertible) error {
|
||||
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeWrite)
|
||||
}
|
||||
|
||||
// Options returns a TransactionOptions instance suitable for setting options
|
||||
// specific to this transaction.
|
||||
func (t Transaction) Options() TransactionOptions {
|
||||
return TransactionOptions{t.transaction}
|
||||
}
|
||||
|
||||
func localityGetAddressesForKey(t *transaction, key KeyConvertible) FutureStringSlice {
|
||||
kb := key.FDBKey()
|
||||
return &futureStringSlice{newFuture(C.fdb_transaction_get_addresses_for_key(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
|
||||
}
|
||||
|
||||
// LocalityGetAddressesForKey returns the (future) public network addresses of
|
||||
// each of the storage servers responsible for storing key and its associated
|
||||
// value. The read is performed asynchronously and does not block the calling
|
||||
// goroutine. The future will become ready when the read is complete.
|
||||
func (t Transaction) LocalityGetAddressesForKey(key KeyConvertible) FutureStringSlice {
|
||||
return localityGetAddressesForKey(t.transaction, key)
|
||||
}
|
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
* tuple.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// FoundationDB Go Tuple Layer
|
||||
|
||||
// Package tuple provides a layer for encoding and decoding multi-element tuples
|
||||
// into keys usable by FoundationDB. The encoded key maintains the same sort
|
||||
// order as the original tuple: sorted first by the first element, then by the
|
||||
// second element, etc. This makes the tuple layer ideal for building a variety
|
||||
// of higher-level data models.
|
||||
//
|
||||
// For general guidance on tuple usage, see the Tuple section of Data Modeling
|
||||
// (https://foundationdb.org/documentation/data-modeling.html#data-modeling-tuples).
|
||||
//
|
||||
// FoundationDB tuples can currently encode byte and unicode strings, integers
|
||||
// and NULL values. In Go these are represented as []byte, string, int64 and
|
||||
// nil.
|
||||
package tuple
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"fdb"
|
||||
)
|
||||
|
||||
// A TupleElement is one of the types that may be encoded in FoundationDB
|
||||
// tuples. Although the Go compiler cannot enforce this, it is a programming
|
||||
// error to use an unsupported types as a TupleElement (and will typically
|
||||
// result in a runtime panic).
|
||||
//
|
||||
// The valid types for TupleElement are []byte (or fdb.KeyConvertible), string,
|
||||
// int64 (or int), and nil.
|
||||
type TupleElement interface{}
|
||||
|
||||
// Tuple is a slice of objects that can be encoded as FoundationDB tuples. If
|
||||
// any of the TupleElements are of unsupported types, a runtime panic will occur
|
||||
// when the Tuple is packed.
|
||||
//
|
||||
// Given a Tuple T containing objects only of these types, then T will be
|
||||
// identical to the Tuple returned by unpacking the byte slice obtained by
|
||||
// packing T (modulo type normalization to []byte and int64).
|
||||
type Tuple []TupleElement
|
||||
|
||||
var sizeLimits = []uint64{
|
||||
1 << (0 * 8) - 1,
|
||||
1 << (1 * 8) - 1,
|
||||
1 << (2 * 8) - 1,
|
||||
1 << (3 * 8) - 1,
|
||||
1 << (4 * 8) - 1,
|
||||
1 << (5 * 8) - 1,
|
||||
1 << (6 * 8) - 1,
|
||||
1 << (7 * 8) - 1,
|
||||
1 << (8 * 8) - 1,
|
||||
}
|
||||
|
||||
func encodeBytes(buf *bytes.Buffer, code byte, b []byte) {
|
||||
buf.WriteByte(code)
|
||||
buf.Write(bytes.Replace(b, []byte{0x00}, []byte{0x00, 0xFF}, -1))
|
||||
buf.WriteByte(0x00)
|
||||
}
|
||||
|
||||
func bisectLeft(u uint64) int {
|
||||
var n int
|
||||
for sizeLimits[n] < u {
|
||||
n += 1
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func encodeInt(buf *bytes.Buffer, i int64) {
|
||||
if i == 0 {
|
||||
buf.WriteByte(0x14)
|
||||
return
|
||||
}
|
||||
|
||||
var n int
|
||||
var ibuf bytes.Buffer
|
||||
|
||||
switch {
|
||||
case i > 0:
|
||||
n = bisectLeft(uint64(i))
|
||||
buf.WriteByte(byte(0x14+n))
|
||||
binary.Write(&ibuf, binary.BigEndian, i)
|
||||
case i < 0:
|
||||
n = bisectLeft(uint64(-i))
|
||||
buf.WriteByte(byte(0x14-n))
|
||||
binary.Write(&ibuf, binary.BigEndian, int64(sizeLimits[n])+i)
|
||||
}
|
||||
|
||||
buf.Write(ibuf.Bytes()[8-n:])
|
||||
}
|
||||
|
||||
// Pack returns a new byte slice encoding the provided tuple. Pack will panic if
|
||||
// the tuple contains an element of any type other than []byte,
|
||||
// fdb.KeyConvertible, string, int64, int or nil.
|
||||
//
|
||||
// Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to
|
||||
// call Pack when using a Tuple with a FoundationDB API function that requires a
|
||||
// key.
|
||||
func (t Tuple) Pack() []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
for i, e := range(t) {
|
||||
switch e := e.(type) {
|
||||
case nil:
|
||||
buf.WriteByte(0x00)
|
||||
case int64:
|
||||
encodeInt(buf, e)
|
||||
case int:
|
||||
encodeInt(buf, int64(e))
|
||||
case []byte:
|
||||
encodeBytes(buf, 0x01, e)
|
||||
case fdb.KeyConvertible:
|
||||
encodeBytes(buf, 0x01, []byte(e.FDBKey()))
|
||||
case string:
|
||||
encodeBytes(buf, 0x02, []byte(e))
|
||||
default:
|
||||
panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i]))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func findTerminator(b []byte) int {
|
||||
bp := b
|
||||
var length int
|
||||
|
||||
for {
|
||||
idx := bytes.IndexByte(bp, 0x00)
|
||||
length += idx
|
||||
if idx + 1 == len(bp) || bp[idx+1] != 0xFF {
|
||||
break
|
||||
}
|
||||
length += 2
|
||||
bp = bp[idx+2:]
|
||||
}
|
||||
|
||||
return length
|
||||
}
|
||||
|
||||
func decodeBytes(b []byte) ([]byte, int) {
|
||||
idx := findTerminator(b[1:])
|
||||
return bytes.Replace(b[1:idx+1], []byte{0x00, 0xFF}, []byte{0x00}, -1), idx + 2
|
||||
}
|
||||
|
||||
func decodeString(b []byte) (string, int) {
|
||||
bp, idx := decodeBytes(b)
|
||||
return string(bp), idx
|
||||
}
|
||||
|
||||
func decodeInt(b []byte) (int64, int) {
|
||||
if b[0] == 0x14 {
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
var neg bool
|
||||
|
||||
n := int(b[0]) - 20
|
||||
if n < 0 {
|
||||
n = -n
|
||||
neg = true
|
||||
}
|
||||
|
||||
bp := make([]byte, 8)
|
||||
copy(bp[8-n:], b[1:n+1])
|
||||
|
||||
var ret int64
|
||||
|
||||
binary.Read(bytes.NewBuffer(bp), binary.BigEndian, &ret)
|
||||
|
||||
if neg {
|
||||
ret -= int64(sizeLimits[n])
|
||||
}
|
||||
|
||||
return ret, n+1
|
||||
}
|
||||
|
||||
// Unpack returns the tuple encoded by the provided byte slice, or an error if
|
||||
// the key does not correctly encode a FoundationDB tuple.
|
||||
func Unpack(b []byte) (Tuple, error) {
|
||||
var t Tuple
|
||||
|
||||
var i int
|
||||
|
||||
for i < len(b) {
|
||||
var el interface{}
|
||||
var off int
|
||||
|
||||
switch {
|
||||
case b[i] == 0x00:
|
||||
el = nil
|
||||
off = 1
|
||||
case b[i] == 0x01:
|
||||
el, off = decodeBytes(b[i:])
|
||||
case b[i] == 0x02:
|
||||
el, off = decodeString(b[i:])
|
||||
case 0x0c <= b[i] && b[i] <= 0x1c:
|
||||
el, off = decodeInt(b[i:])
|
||||
default:
|
||||
return nil, fmt.Errorf("unable to decode tuple element with unknown typecode %02x", b[i])
|
||||
}
|
||||
|
||||
t = append(t, el)
|
||||
i += off
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// FDBKey returns the packed representation of a Tuple, and allows Tuple to
|
||||
// satisfy the fdb.KeyConvertible interface. FDBKey will panic in the same
|
||||
// circumstances as Pack.
|
||||
func (t Tuple) FDBKey() fdb.Key {
|
||||
return t.Pack()
|
||||
}
|
||||
|
||||
// FDBRangeKeys allows Tuple to satisfy the fdb.ExactRange interface. The range
|
||||
// represents all keys that encode tuples strictly starting with a Tuple (that
|
||||
// is, all tuples of greater length than the Tuple of which the Tuple is a
|
||||
// prefix).
|
||||
func (t Tuple) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
|
||||
p := t.Pack()
|
||||
return fdb.Key(concat(p, 0x00)), fdb.Key(concat(p, 0xFF))
|
||||
}
|
||||
|
||||
// FDBRangeKeySelectors allows Tuple to satisfy the fdb.Range interface. The
|
||||
// range represents all keys that encode tuples strictly starting with a Tuple
|
||||
// (that is, all tuples of greater length than the Tuple of which the Tuple is a
|
||||
// prefix).
|
||||
func (t Tuple) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
|
||||
b, e := t.FDBRangeKeys()
|
||||
return fdb.FirstGreaterOrEqual(b), fdb.FirstGreaterOrEqual(e)
|
||||
}
|
||||
|
||||
func concat(a []byte, b ...byte) []byte {
|
||||
r := make([]byte, len(a) + len(b))
|
||||
copy(r, a)
|
||||
copy(r[len(a):], b)
|
||||
return r
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,124 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreReleaseDecoration>
|
||||
</PreReleaseDecoration>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{9617584C-22E8-4272-934F-733F378BF6AE}</ProjectGuid>
|
||||
<RootNamespace>java</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
|
||||
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
|
||||
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup>
|
||||
<PreBuildEvent>
|
||||
<Command>echo "Removing java jars from directory: $(OutDir)"
|
||||
if exist "$(OutDir)fdb-java-*.jar" del "$(OutDir)fdb-java-*.jar"</Command>
|
||||
</PreBuildEvent>
|
||||
<PostBuildEvent>
|
||||
<Command>rmdir /S /Q classes
|
||||
mkdir classes\main
|
||||
mkdir classes\test
|
||||
"C:\Program Files\Java\jdk6\bin\javac.exe" -source 1.6 -target 1.6 -d classes\main src\main\com\apple\cie\foundationdb\*.java src\main\com\apple\cie\foundationdb\async\*.java src\main\com\apple\cie\foundationdb\tuple\*.java src\main\com\apple\cie\foundationdb\directory\*.java src\main\com\apple\cie\foundationdb\subspace\*.java
|
||||
"C:\Program Files\Java\jdk6\bin\javac.exe" -source 1.6 -target 1.6 -cp classes\main -d classes\test src\test\com\apple\cie\foundationdb\test\*.java
|
||||
mkdir classes\main\lib\windows\amd64
|
||||
copy "$(TargetPath)" "classes\main\lib\windows\amd64"
|
||||
"C:\Program Files\Java\jdk6\bin\jar.exe" cf "$(OutDir)fdb-java-$(Version)$(PreReleaseDecoration)-windows-$(Platform).jar" -C classes\main com\apple\cie\foundationdb -C classes\main lib\windows\amd64
|
||||
"C:\Program Files\Java\jdk6\bin\jar.exe" cf "$(OutDir)foundationdb-tests.jar" -C classes\test com\apple\cie\foundationdb
|
||||
FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)-%%i"</Command>
|
||||
</PostBuildEvent>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdb_c.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="fdbJNI.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="fdbJNI.cpp" />
|
||||
</ItemGroup>
|
||||
<ImportGroup Label="ExtensionTargets" />
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<Target Name="AfterClean">
|
||||
<ItemGroup>
|
||||
<FilesToDelete Include="$(OutDir)fdb-java-*.jar">
|
||||
<Visible>false</Visible>
|
||||
</FilesToDelete>
|
||||
<FilesToDelete Include="$(OutDir)fdb_java.dll-*">
|
||||
<Visible>false</Visible>
|
||||
</FilesToDelete>
|
||||
</ItemGroup>
|
||||
<Message Text="Cleaning old jar and dll files" Importance="high" />
|
||||
<Delete Files="@(FilesToDelete)" />
|
||||
</Target>
|
||||
</Project>
|
|
@ -0,0 +1,224 @@
|
|||
#
|
||||
# local.mk
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# -*- mode: makefile; -*-
|
||||
|
||||
fdb_java_LDFLAGS := -Llib
|
||||
fdb_java_CFLAGS := $(fdbclient_CFLAGS) -Ibindings/c
|
||||
|
||||
# We only override if the environment didn't set it (this is used by
|
||||
# the fdbwebsite documentation build process)
|
||||
JAVADOC_DIR ?= bindings/java
|
||||
|
||||
fdb_java_LIBS := lib/libfdb_c.$(DLEXT)
|
||||
fdb_java_LDFLAGS += -Llib
|
||||
|
||||
ifeq ($(RELEASE),true)
|
||||
JARVER = $(VERSION)
|
||||
APPLEJARVER = $(VERSION)
|
||||
else
|
||||
JARVER = $(VERSION)-PRERELEASE
|
||||
APPLEJARVER = $(VERSION)-SNAPSHOT
|
||||
endif
|
||||
|
||||
define add_java_binding_targets
|
||||
|
||||
JAVA$(1)_GENERATED_SOURCES := bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/NetworkOptions.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/ClusterOptions.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/DatabaseOptions.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/TransactionOptions.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/ConflictRangeType.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/MutationType.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/FDBException.java
|
||||
|
||||
JAVA$(1)_SOURCES := $$(JAVA$(1)_GENERATED_SOURCES) bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/async/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/tuple/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/directory/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/subspace/*.java bindings/java/src$(1)/test/com/apple/apple/foundationdbdb/test/*.java
|
||||
|
||||
fdb_java$(1): bindings/java/foundationdb-client$(1).jar bindings/java/foundationdb-tests$(1).jar
|
||||
|
||||
bindings/java/foundationdb-tests$(1).jar: bindings/java/.classstamp$(1)
|
||||
@echo "Building $$@"
|
||||
@jar cf $$@ -C bindings/java/classes$(1)/test com/apple/apple/foundationdbdb
|
||||
|
||||
bindings/java/foundationdb-client$(1).jar: bindings/java/.classstamp$(1) lib/libfdb_java.$(DLEXT)
|
||||
@echo "Building $$@"
|
||||
@rm -rf bindings/java/classes$(1)/main/lib/$$(PLATFORM)/$$(java_ARCH)
|
||||
@mkdir -p bindings/java/classes$(1)/main/lib/$$(PLATFORM)/$$(java_ARCH)
|
||||
@cp lib/libfdb_java.$$(DLEXT) bindings/java/classes$(1)/main/lib/$$(PLATFORM)/$$(java_ARCH)/libfdb_java.$$(java_DLEXT)
|
||||
@jar cf $$@ -C bindings/java/classes$(1)/main com/apple/apple/foundationdbdb -C bindings/java/classes$(1)/main lib
|
||||
|
||||
fdb_java$(1)_jar_clean:
|
||||
@rm -rf $$(JAVA$(1)_GENERATED_SOURCES)
|
||||
@rm -rf bindings/java/classes$(1)
|
||||
@rm -f bindings/java/foundationdb-client$(1).jar bindings/java/foundationdb-tests$(1).jar bindings/java/.classstamp$(1)
|
||||
|
||||
# Redefinition of a target already defined in generated.mk, but it's "okay" and the way things were done before.
|
||||
fdb_java_clean: fdb_java$(1)_jar_clean
|
||||
|
||||
bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java: bin/vexillographer.exe fdbclient/vexillographer/fdb.options
|
||||
@echo "Building Java options"
|
||||
@$$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options java $$(@D)
|
||||
|
||||
bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/MutationType.java: bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java
|
||||
@true
|
||||
|
||||
bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/ConflictRangeType.java: bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java
|
||||
@true
|
||||
|
||||
bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/FDBException.java: bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java
|
||||
@true
|
||||
|
||||
bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/%Options.java: bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/StreamingMode.java
|
||||
@true
|
||||
|
||||
bindings/java/src$(1)/main/overview.html: bindings/java/src$(1)/main/overview.html.in $$(ALL_MAKEFILES) versions.target
|
||||
@m4 -DVERSION=$$(VERSION) $$< > $$@
|
||||
|
||||
bindings/java/.classstamp$(1): $$(JAVA$(1)_SOURCES)
|
||||
@echo "Compiling Java$(1) source"
|
||||
@rm -rf bindings/java/classes$(1)
|
||||
@mkdir -p bindings/java/classes$(1)/main
|
||||
@mkdir -p bindings/java/classes$(1)/test
|
||||
@$$(JAVAC) $$(JAVA$(1)FLAGS) -d bindings/java/classes$(1)/main bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/async/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/tuple/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/directory/*.java bindings/java/src$(1)/main/com/apple/apple/foundationdbdb/subspace/*.java
|
||||
@$$(JAVAC) $$(JAVA$(1)FLAGS) -cp bindings/java/classes$(1)/main -d bindings/java/classes$(1)/test bindings/java/src$(1)/test/com/apple/apple/foundationdbdb/test/*.java
|
||||
@echo timestamp > bindings/java/.classstamp$(1)
|
||||
|
||||
javadoc$(1): $$(JAVA$(1)_SOURCES) bindings/java/src$(1)/main/overview.html
|
||||
@echo "Generating Javadocs"
|
||||
@mkdir -p $$(JAVADOC_DIR)/javadoc$(1)/
|
||||
@javadoc -quiet -public -notimestamp -source 1.8 -sourcepath bindings/java/src$(1)/main \
|
||||
-overview bindings/java/src$(1)/main/overview.html -d $$(JAVADOC_DIR)/javadoc$(1)/ \
|
||||
-windowtitle "FoundationDB Java Client API" \
|
||||
-doctitle "FoundationDB Java Client API" \
|
||||
-link "http://docs.oracle.com/javase/8/docs/api" \
|
||||
com.apple.cie.foundationdb.org.apple.cie.foundationdb.async com.apple.cie.foundationdb.tuple com.apple.cie.foundationdb.directory com.apple.cie.foundationdb.subspace
|
||||
|
||||
javadoc$(1)_clean:
|
||||
@rm -rf $$(JAVADOC_DIR)/javadoc$(1)
|
||||
@rm bindings/java/src$(1)/main/overview.html
|
||||
|
||||
ifeq ($$(PLATFORM),linux)
|
||||
# We only need javadoc from one source
|
||||
TARGETS += javadoc$(1)
|
||||
CLEAN_TARGETS += javadoc$(1)_clean
|
||||
|
||||
# _release builds the lib on OS X and the jars (including the OS X lib) on Linux
|
||||
TARGETS += fdb_java$(1)_release
|
||||
CLEAN_TARGETS += fdb_java$(1)_release_clean
|
||||
|
||||
ifneq ($$(FATJAR),)
|
||||
packages/fdb-java$(1)-$$(JARVER).jar: $$(MAC_OBJ_JAVA) $$(WINDOWS_OBJ_JAVA)
|
||||
endif
|
||||
|
||||
bindings/java/pom$(1).xml: bindings/java/pom.xml.in $$(ALL_MAKEFILES) versions.target
|
||||
@echo "Generating $$@"
|
||||
@m4 -DVERSION=$$(JARVER) -DNAME=fdb-java$(1) $$< > $$@
|
||||
|
||||
bindings/java/fdb-java$(1)-$(APPLEJARVER).pom: bindings/java/pom$(1).xml
|
||||
@echo "Copying $$@"
|
||||
sed -e 's/-PRERELEASE/-SNAPSHOT/g' bindings/java/pom$(1).xml > "$$@"
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER).jar: fdb_java$(1) versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
@rm -rf packages/jar$(1)_regular
|
||||
@mkdir -p packages/jar$(1)_regular
|
||||
@cd packages/jar$(1)_regular && unzip -qq $$(TOPDIR)/bindings/java/foundationdb-client$(1).jar
|
||||
ifneq ($$(FATJAR),)
|
||||
@mkdir -p packages/jar$(1)_regular/lib/windows/amd64
|
||||
@mkdir -p packages/jar$(1)_regular/lib/osx/x86_64
|
||||
@cp $$(MAC_OBJ_JAVA) packages/jar$(1)_regular/lib/osx/x86_64/libfdb_java.jnilib
|
||||
@cp $$(WINDOWS_OBJ_JAVA) packages/jar$(1)_regular/lib/windows/amd64/fdb_java.dll
|
||||
endif
|
||||
@cd packages/jar$(1)_regular && jar cf $$(TOPDIR)/$$@ *
|
||||
@rm -r packages/jar$(1)_regular
|
||||
@cd bindings && jar uf $$(TOPDIR)/$$@ ../LICENSE
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER)-sources.jar: $$(JAVA$(1)_GENERATED_SOURCES) versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
@jar cf $(TOPDIR)/$$@ -C bindings/java/src$(1)/main com/apple/apple/foundationdbdb
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER)-javadoc.jar: javadoc$(1) versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
@cd $$(JAVADOC_DIR)/javadoc$(1)/ && jar cf $$(TOPDIR)/$$@ *
|
||||
@cd bindings && jar uf $$(TOPDIR)/$$@ ../LICENSE
|
||||
|
||||
packages/fdb-java$(1)-$$(JARVER)-bundle.jar: packages/fdb-java$(1)-$$(JARVER).jar packages/fdb-java$(1)-$$(JARVER)-javadoc.jar packages/fdb-java$(1)-$$(JARVER)-sources.jar bindings/java/pom$(1).xml bindings/java/fdb-java$(1)-$$(APPLEJARVER).pom versions.target
|
||||
@echo "Building $$@"
|
||||
@rm -f $$@
|
||||
@rm -rf packages/bundle$(1)_regular
|
||||
@mkdir -p packages/bundle$(1)_regular
|
||||
@cp packages/fdb-java$(1)-$$(JARVER).jar packages/fdb-java$(1)-$$(JARVER)-javadoc.jar packages/fdb-java$(1)-$$(JARVER)-sources.jar bindings/java/fdb-java$(1)-$$(APPLEJARVER).pom packages/bundle$(1)_regular
|
||||
@cp bindings/java/pom$(1).xml packages/bundle$(1)_regular/pom.xml
|
||||
@cd packages/bundle$(1)_regular && jar cf $(TOPDIR)/$$@ *
|
||||
@rm -rf packages/bundle$(1)_regular
|
||||
|
||||
fdb_java$(1)_release: packages/fdb-java$(1)-$$(JARVER)-bundle.jar
|
||||
|
||||
fdb_java$(1)_release_clean:
|
||||
@echo "Cleaning Java release"
|
||||
@rm -f packages/fdb-java$(1)-*.jar packages/fdb-java$(1)-*-sources.jar bindings/java/pom$(1).xml bindings/java/fdb-java$(1)-$$(APPLEJARVER).pom
|
||||
|
||||
endif
|
||||
|
||||
endef
|
||||
|
||||
$(eval $(call add_java_binding_targets,))
|
||||
ifeq ($(JAVAVERMAJOR).$(JAVAVERMINOR),1.8)
|
||||
$(eval $(call add_java_binding_targets,-completable))
|
||||
endif
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
|
||||
fdb_java_CFLAGS += -I/usr/lib/jvm/java-8-openjdk-amd64/include -I/usr/lib/jvm/java-8-openjdk-amd64/include/linux
|
||||
fdb_java_LDFLAGS += -static-libgcc
|
||||
|
||||
# Linux is where we build all the java packages
|
||||
packages: fdb_java_release fdb_java-completable_release
|
||||
packages_clean: fdb_java_release_clean fdb_java-completable_release_clean
|
||||
|
||||
java_ARCH := amd64
|
||||
|
||||
ifneq ($(FATJAR),)
|
||||
MAC_OBJ_JAVA := lib/libfdb_java.jnilib-$(VERSION_ID)
|
||||
WINDOWS_OBJ_JAVA := lib/fdb_java.dll-$(VERSION_ID)
|
||||
endif
|
||||
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
TARGETS += fdb_java_release
|
||||
CLEAN_TARGETS += fdb_java_release_clean
|
||||
java_ARCH := x86_64
|
||||
|
||||
fdb_java_release: lib/libfdb_java.$(DLEXT)
|
||||
@mkdir -p lib
|
||||
@rm -f lib/libfdb_java.$(java_DLEXT)-*
|
||||
@cp lib/libfdb_java.$(DLEXT) lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID)
|
||||
@cp lib/libfdb_java.$(DLEXT)-debug lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID)
|
||||
|
||||
fdb_java_release_clean:
|
||||
@rm -f lib/libfdb_java.$(DLEXT)-*
|
||||
@rm -f lib/libfdb_java.$(java_DLEXT)-*
|
||||
|
||||
# FIXME: Surely there is a better way to grab the JNI headers on any version of OS X.
|
||||
fdb_java_CFLAGS += -I/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers
|
||||
|
||||
# OS X needs to put its java lib in packages
|
||||
packages: fdb_java_lib_package
|
||||
|
||||
fdb_java_lib_package: fdb_java_release
|
||||
mkdir -p packages
|
||||
cp lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID) packages
|
||||
cp lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID) packages
|
||||
endif
|
|
@ -0,0 +1,39 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
||||
http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.apple.cie.foundationdb</groupId>
|
||||
<artifactId>NAME</artifactId>
|
||||
<version>VERSION</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>foundationdb-java</name>
|
||||
<description>Java bindings for the FoundationDB database. These bindings require the FoundationDB client, which is under a different license. The client can be obtained from https://files.foundationdb.org/fdb-c/.</description>
|
||||
<inceptionYear>2010</inceptionYear>
|
||||
<url>http://foundationdb.org</url>
|
||||
|
||||
<organization>
|
||||
<name>FoundationDB</name>
|
||||
<url>http://foundationdb.org</url>
|
||||
</organization>
|
||||
|
||||
<developers>
|
||||
<developer>
|
||||
<name>FoundationDB</name>
|
||||
</developer>
|
||||
</developers>
|
||||
|
||||
<scm>
|
||||
<url>http://0.0.0.0</url>
|
||||
</scm>
|
||||
|
||||
<licenses>
|
||||
<license>
|
||||
<name>The Apache v2 License</name>
|
||||
<url>http://www.apache.org/licenses/</url>
|
||||
</license>
|
||||
</licenses>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* AllTests.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb.tuple;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Suite;
|
||||
import org.junit.runners.Suite.SuiteClasses;
|
||||
|
||||
@RunWith(Suite.class)
|
||||
@SuiteClasses({ ArrayUtilTests.class })
|
||||
public class AllTests {
|
||||
|
||||
}
|
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
* ArrayUtilTests.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb.tuple;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.apple.cie.foundationdb.tuple.ByteArrayUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* @author Ben
|
||||
*
|
||||
*/
|
||||
public class ArrayUtilTests {
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[], java.util.List)}.
|
||||
*/
|
||||
@Test
|
||||
public void testJoinByteArrayListOfbyte() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] {'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'z', 'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(b);
|
||||
result = new byte[] {'a', 'b', 'c', 'z', 'z', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {'b'});
|
||||
result = new byte[] {'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {'z', 'z'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(null, parts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[][])}.
|
||||
*/
|
||||
@Test
|
||||
public void testJoinByteArrayArray() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {'b'});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {'b'});
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
// Self-referential, with conversion to array
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
assertArrayEquals(ByteArrayUtil.join(a, b), ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
// Test exception on null elements
|
||||
boolean isError = false;
|
||||
try {
|
||||
ByteArrayUtil.join(a, b, null);
|
||||
} catch(Exception e) {
|
||||
isError = true;
|
||||
} finally {
|
||||
assertTrue(isError);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#regionEquals(byte[], int, byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testRegionEquals() {
|
||||
byte[] src = new byte[] {'a', (byte)12, (byte)255, 'n', 'm', 'z', 'k'};
|
||||
assertTrue(ByteArrayUtil.regionEquals(src, 3, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 2, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertTrue(ByteArrayUtil.regionEquals(null, 0, null));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 0, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#replace(byte[], byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testReplace() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b);
|
||||
byte[] result = new byte[] {'z', 'd', 'e', 'f', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.replace(src, a, new byte[] {'z'}));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(b, b), ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(a, a), ByteArrayUtil.replace(src, b, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, a, a);
|
||||
assertArrayEquals(new byte[] {}, ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#split(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testSplit() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b, a);
|
||||
List<byte[]> parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 3);
|
||||
for(byte[] p : parts) {
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(b, a, b, a, b, a);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
int counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ == 0)
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
else
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
}
|
||||
|
||||
// Multiple ending delimiters
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b, b, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 6);
|
||||
counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#bisectLeft(java.math.BigInteger[], java.math.BigInteger)}.
|
||||
*/
|
||||
@Test
|
||||
public void testBisectLeft() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#compareUnsigned(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testCompare() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#findNext(byte[], byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testFindNext() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#findTerminator(byte[], byte, byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testFindTerminator() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#copyOfRange(byte[], int, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testCopyOfRange() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#strinc(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testStrinc() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#printable(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testPrintable() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Cluster.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
/**
|
||||
* The {@code Cluster} represents a connection to a physical set of cooperating machines
|
||||
* running FoundationDB. A {@code Cluster} is opened with a reference to a cluster file.
|
||||
*/
|
||||
public class Cluster extends DefaultDisposableImpl implements Disposable {
|
||||
private ClusterOptions options;
|
||||
private final Executor executor;
|
||||
|
||||
private static final Charset UTF8 = Charset.forName("UTF-8");
|
||||
|
||||
protected Cluster(long cPtr, Executor executor) {
|
||||
super(cPtr);
|
||||
this.executor = executor;
|
||||
this.options = new ClusterOptions(new OptionConsumer() {
|
||||
@Override
|
||||
public void setOption(int code, byte[] parameter) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Cluster_setOption(getPtr(), code, parameter);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of options that can be set on a {@code Cluster}. In the current version
|
||||
* of the API, there are no options that can be set on a {@code Cluster}.
|
||||
*
|
||||
* @return a set of cluster-specific options affecting this {@code Cluster}
|
||||
*/
|
||||
public ClusterOptions options() { return options; }
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
dispose();
|
||||
super.finalize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a connection to a specific database on an <i>FDB</i> cluster.
|
||||
*
|
||||
* @return a {@code Future} that will be set to a {@code Database} upon
|
||||
* successful connection.
|
||||
*/
|
||||
public Database openDatabase() throws FDBException {
|
||||
return openDatabase(executor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a connection to a specific database on an <i>FDB</i> cluster.
|
||||
*
|
||||
* @return a {@code Future} that will be set to a {@code Database} upon
|
||||
* successful connection.
|
||||
*/
|
||||
public Database openDatabase(Executor e) throws FDBException {
|
||||
FutureDatabase futureDatabase = null;
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
futureDatabase = new FutureDatabase(Cluster_createDatabase(getPtr(), "DB".getBytes(UTF8)), e);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
return futureDatabase.join();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void disposeInternal(long cPtr) {
|
||||
Cluster_dispose(cPtr);
|
||||
}
|
||||
|
||||
private native void Cluster_dispose(long cPtr);
|
||||
private native long Cluster_createDatabase(long cPtr, byte[] dbName);
|
||||
private native void Cluster_setOption(long cPtr, int code, byte[] value) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,213 @@
|
|||
/*
|
||||
* Database.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A mutable, lexicographically ordered mapping from binary keys to binary values.
|
||||
* A {@code Database} is stored on a FoundationDB {@link Cluster}.
|
||||
* {@link Transaction}s are used to manipulate data within a single
|
||||
* {@code Database} -- multiple, concurrent
|
||||
* {@code Transaction}s on a {@code Database} enforce <b>ACID</b> properties.<br>
|
||||
* <br>
|
||||
* The simplest correct programs using FoundationDB will make use of the methods defined
|
||||
* in the {@link TransactionContext} interface. When used on a {@code Database} these
|
||||
* methods will call {@code Transaction#commit()} after user code has been
|
||||
* executed. These methods will not return successfully until {@code commit()} has
|
||||
* returned successfully.
|
||||
*
|
||||
*/
|
||||
public interface Database extends Disposable, TransactionContext {
|
||||
/**
|
||||
* Creates a {@link Transaction} that operates on this {@code Database}.<br>
|
||||
* <br>
|
||||
* Note: Java transactions automatically set the {@link TransactionOptions#setUsedDuringCommitProtectionDisable}
|
||||
* option. This is because the Java bindings disallow use of {@code Transaction} objects after either
|
||||
* {@link Transaction#reset} or {@link Transaction#onError} is called.
|
||||
*
|
||||
* @return a newly created {@code Transaction} that reads from and writes to this {@code Database}.
|
||||
*/
|
||||
default Transaction createTransaction() {
|
||||
return createTransaction(getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link Transaction} that operates on this {@code Database} with the given {@link Executor}
|
||||
* for asynchronous callbacks.
|
||||
*
|
||||
* @param e the {@link Executor} to use when executing asynchronous callbacks for the database
|
||||
* @return a newly created {@code Transaction} that reads from and writes to this {@code Database}.
|
||||
*/
|
||||
Transaction createTransaction(Executor e);
|
||||
|
||||
/**
|
||||
* Returns a set of options that can be set on a {@code Database}
|
||||
*
|
||||
* @return a set of database-specific options affecting this {@code Database}
|
||||
*/
|
||||
DatabaseOptions options();
|
||||
|
||||
/**
|
||||
* Runs a read-only transactional function against this {@code Database} with retry logic.
|
||||
* {@link Function#apply(Object) apply(ReadTransaction)} will be called on the
|
||||
* supplied {@link Function} until a non-retryable
|
||||
* {@link FDBException} (or any {@code Throwable} other than an {@code FDBException})
|
||||
* is thrown. This call is blocking -- this
|
||||
* method will not return until the {@code Function} has been called and completed without error.<br>
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
*/
|
||||
@Override
|
||||
default <T> T read(Function<? super ReadTransaction, T> retryable) {
|
||||
return read(retryable, getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a read-only transactional function against this {@code Database} with retry logic. Use
|
||||
* this formulation of {@link #read(Function)} if one wants to set a custom {@link Executor}
|
||||
* for the transaction when run.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
* @param e the {@link Executor} to use for asynchronous callbacks
|
||||
*
|
||||
* @see #read(Function)
|
||||
*/
|
||||
<T> T read(Function<? super ReadTransaction, T> retryable, Executor e);
|
||||
|
||||
/**
|
||||
* Runs a read-only transactional function against this {@code Database} with retry logic.
|
||||
* {@link Function#apply(Object) apply(ReadTransaction)} will be called on the
|
||||
* supplied {@link Function} until a non-retryable
|
||||
* {@link FDBException} (or any {@code Throwable} other than an {@code FDBException})
|
||||
* is thrown. This call is non-blocking -- this
|
||||
* method will return immediately and with a {@link CompletableFuture} that will be
|
||||
* set when the {@code Function} has been called and completed without error.<br>
|
||||
* <br>
|
||||
* Any errors encountered executing {@code retryable}, or received from the
|
||||
* database, will be set on the returned {@code CompletableFuture}.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link ReadTransaction} against
|
||||
* this database
|
||||
*/
|
||||
@Override
|
||||
default <T> CompletableFuture<T> readAsync(
|
||||
Function<? super ReadTransaction, CompletableFuture<T>> retryable) {
|
||||
return readAsync(retryable, getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a read-only transactional function against this {@code Database} with retry logic.
|
||||
* Use this version of {@link #readAsync(Function)} if one wants to set a custom
|
||||
* {@link Executor} for the transaction when run.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link ReadTransaction} against
|
||||
* this database
|
||||
* @param e the {@link Executor} to use for asynchronous callbacks
|
||||
*
|
||||
* @see #readAsync(Function)
|
||||
*/
|
||||
<T> CompletableFuture<T> readAsync(
|
||||
Function<? super ReadTransaction, CompletableFuture<T>> retryable, Executor e);
|
||||
|
||||
/**
|
||||
* Runs a transactional function against this {@code Database} with retry logic.
|
||||
* {@link Function#apply(Object) apply(Transaction)} will be called on the
|
||||
* supplied {@link Function} until a non-retryable
|
||||
* {@link FDBException} (or any {@code Throwable} other than an {@code FDBException})
|
||||
* is thrown or {@link Transaction#commit() commit()},
|
||||
* when called after {@code apply()}, returns success. This call is blocking -- this
|
||||
* method will not return until {@code commit()} has been called and returned success.<br>
|
||||
* <br>
|
||||
* As with other client/server databases, in some failure scenarios a client may
|
||||
* be unable to determine whether a transaction succeeded. In these cases, your
|
||||
* transaction may be executed twice. For more information about how to reason
|
||||
* about these situations see
|
||||
* <a href="/documentation/developer-guide.html#transactions-with-unknown-results"
|
||||
* target="_blank">the FounationDB Developer Guide</a>
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
*/
|
||||
@Override
|
||||
default <T> T run(Function<? super Transaction, T> retryable) {
|
||||
return run(retryable, getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a transactional function against this {@code Database} with retry logic.
|
||||
* Use this formulation of {@link #run(Function)} if one would like to set a
|
||||
* custom {@link Executor} for the transaction when run.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
* @param e the {@link Executor} to use for asynchronous callbacks
|
||||
*/
|
||||
<T> T run(Function<? super Transaction, T> retryable, Executor e);
|
||||
|
||||
/**
|
||||
* Runs a transactional function against this {@code Database} with retry logic.
|
||||
* {@link Function#apply(Object) apply(Transaction)} will be called on the
|
||||
* supplied {@link Function} until a non-retryable
|
||||
* {@link FDBException} (or any {@code Throwable} other than an {@code FDBException})
|
||||
* is thrown or {@link Transaction#commit() commit()},
|
||||
* when called after {@code apply()}, returns success. This call is non-blocking -- this
|
||||
* method will return immediately and with a {@link CompletableFuture} that will be
|
||||
* set when {@code commit()} has been called and returned success.<br>
|
||||
* <br>
|
||||
* As with other client/server databases, in some failure scenarios a client may
|
||||
* be unable to determine whether a transaction succeeded. In these cases, your
|
||||
* transaction may be executed twice. For more information about how to reason
|
||||
* about these situations see
|
||||
* <a href="/documentation/developer-guide.html#transactions-with-unknown-results"
|
||||
* target="_blank">the FounationDB Developer Guide</a><br>
|
||||
* <br>
|
||||
* Any errors encountered executing {@code retryable}, or received from the
|
||||
* database, will be set on the returned {@code CompletableFuture}.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
*/
|
||||
@Override
|
||||
default <T> CompletableFuture<T> runAsync(
|
||||
Function<? super Transaction, CompletableFuture<T>> retryable) {
|
||||
return runAsync(retryable, getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a transactional function against this {@code Database} with retry logic. Use
|
||||
* this formulation of the non-blocking {@link #runAsync(Function)} if one wants
|
||||
* to set a custom {@link Executor} for the transaction when run.
|
||||
*
|
||||
* @param retryable the block of logic to execute in a {@link Transaction} against
|
||||
* this database
|
||||
* @param e the {@link Executor} to use for asynchronous callbacks
|
||||
*
|
||||
* @see #run(Function)
|
||||
*/
|
||||
<T> CompletableFuture<T> runAsync(
|
||||
Function<? super Transaction, CompletableFuture<T>> retryable, Executor e);
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* DefaultDisposableImpl.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
abstract class DefaultDisposableImpl implements Disposable {
|
||||
private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
|
||||
protected final Lock pointerReadLock = rwl.readLock();
|
||||
|
||||
private boolean disposed = false;
|
||||
private long cPtr;
|
||||
|
||||
public DefaultDisposableImpl() {
|
||||
}
|
||||
|
||||
public DefaultDisposableImpl(long cPtr) {
|
||||
this.cPtr = cPtr;
|
||||
if(this.cPtr == 0)
|
||||
this.disposed = true;
|
||||
}
|
||||
|
||||
public boolean isDisposed() {
|
||||
// we must have a read lock for this function to make sense, however it
|
||||
// does not make sense to take the lock here, since the code that uses
|
||||
// the result must inherently have the read lock itself.
|
||||
assert( rwl.getReadHoldCount() > 0 );
|
||||
|
||||
return disposed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dispose() {
|
||||
rwl.writeLock().lock();
|
||||
long ptr = 0;
|
||||
try {
|
||||
if(disposed)
|
||||
return;
|
||||
|
||||
ptr = cPtr;
|
||||
this.cPtr = 0;
|
||||
disposed = true;
|
||||
} finally {
|
||||
rwl.writeLock().unlock();
|
||||
}
|
||||
|
||||
disposeInternal(ptr);
|
||||
}
|
||||
|
||||
protected long getPtr() {
|
||||
// we must have a read lock for this function to make sense, however it
|
||||
// does not make sense to take the lock here, since the code that uses
|
||||
// the result must inherently have the read lock itself.
|
||||
assert( rwl.getReadHoldCount() > 0 );
|
||||
|
||||
if(this.disposed)
|
||||
throw new IllegalStateException("Cannot access disposed object");
|
||||
|
||||
return this.cPtr;
|
||||
}
|
||||
|
||||
protected abstract void disposeInternal(long cPtr);
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Disposable.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
/**
|
||||
* A FoundationDB object with native resources that can be freed. It is not mandatory to call
|
||||
* {@link Disposable#dispose()} most of the time, as disposal will happen at finalization.
|
||||
*/
|
||||
public interface Disposable {
|
||||
/**
|
||||
* Dispose of the object. This can be called multiple times, but care should be
|
||||
* taken that an object is not in use in another thread at the time of the call.
|
||||
*/
|
||||
void dispose();
|
||||
}
|
|
@ -0,0 +1,411 @@
|
|||
/*
|
||||
* FDB.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
||||
/**
|
||||
* The starting point for accessing FoundationDB.
|
||||
* <br>
|
||||
* <h3>Setting API version</h3>
|
||||
* The FoundationDB API is accessed with a call to {@link #selectAPIVersion(int)}.
|
||||
* This call is required before using any other part of the API. The call allows
|
||||
* an error to be thrown at this point to prevent client code from accessing a later library
|
||||
* with incorrect assumptions from the current version. The API version documented here is version
|
||||
* {@code 500}.<br><br>
|
||||
* FoundationDB encapsulates multiple versions of its interface by requiring
|
||||
* the client to explicitly specify the version of the API it uses. The purpose
|
||||
* of this design is to allow you to upgrade the server, client libraries, or
|
||||
* bindings without having to modify client code. The client libraries support
|
||||
* all previous versions of the API. The API version specified by the client is
|
||||
* used to control the behavior of the binding. You can therefore upgrade to
|
||||
* more recent packages (and thus receive various improvements) without having
|
||||
* to change your code.<br><br>
|
||||
* Warning: When using the multi-version client API, setting an API version that
|
||||
* is not supported by a particular client library will prevent that client from
|
||||
* being used to connect to the cluster. In particular, you should not advance
|
||||
* the API version of your application after upgrading your client until the
|
||||
* cluster has also been upgraded.<br>
|
||||
* <h3>Getting a database</h3>
|
||||
* Once the API version has been set, the easiest way to get a {@link Database} object to use is
|
||||
* to call {@link #open}.
|
||||
* <br>
|
||||
* <h3>Client networking</h3>
|
||||
* The network is started either implicitly with a call to a variant of {@link #open()} or
|
||||
* {@link #createCluster()}, or started explicitly with a call to {@link #startNetwork()}.
|
||||
* <br>
|
||||
*
|
||||
*/
|
||||
public class FDB {
|
||||
static FDB singleton = null;
|
||||
|
||||
static class DaemonThreadFactory implements ThreadFactory {
|
||||
private final ThreadFactory factory;
|
||||
|
||||
public DaemonThreadFactory(ThreadFactory factory) {
|
||||
this.factory = factory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
Thread t = factory.newThread(r);
|
||||
t.setDaemon(true);
|
||||
return t;
|
||||
}
|
||||
}
|
||||
|
||||
public static final ExecutorService DEFAULT_EXECUTOR;
|
||||
|
||||
final int apiVersion;
|
||||
private volatile boolean netStarted = false;
|
||||
private volatile boolean netStopped = false;
|
||||
final private Semaphore netRunning = new Semaphore(1);
|
||||
private final NetworkOptions options;
|
||||
|
||||
static {
|
||||
try {
|
||||
JNIUtil.loadLibrary("fdb_c");
|
||||
} catch (Throwable t) {
|
||||
// EAT: this can be useful for loading on windows
|
||||
}
|
||||
JNIUtil.loadLibrary("fdb_java");
|
||||
|
||||
ThreadFactory factory = new DaemonThreadFactory(Executors.defaultThreadFactory());
|
||||
DEFAULT_EXECUTOR = Executors.newCachedThreadPool(factory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called only once to create the FDB singleton.
|
||||
*/
|
||||
private FDB(int apiVersion) {
|
||||
this.apiVersion = apiVersion;
|
||||
|
||||
options = new NetworkOptions(new OptionConsumer() {
|
||||
@Override
|
||||
public void setOption(int code, byte[] parameter) {
|
||||
Network_setOption(code, parameter);
|
||||
}
|
||||
});
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(
|
||||
new Runnable(){
|
||||
@Override
|
||||
public void run() {
|
||||
FDB.this.stopNetwork();
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of options that can be set on a the FoundationDB API. Generally,
|
||||
* these options to the top level of the API affect the networking engine and
|
||||
* therefore must be set before the network engine is started. The network is started
|
||||
* by calls to {@link #startNetwork()} and implicitly by calls to {@link #open()} and
|
||||
* {@link #createCluster()} (and their respective variants).
|
||||
*
|
||||
* @return a set of options affecting this instance of the FoundationDB API
|
||||
*/
|
||||
public NetworkOptions options() { return options; }
|
||||
|
||||
/**
|
||||
* Select the version for the client API. An exception will be thrown if the
|
||||
* requested version is not supported by this implementation of the API. As
|
||||
* only one version can be selected for the lifetime of the JVM, the result
|
||||
* of a successful call to this method is always the same instance of a FDB
|
||||
* object.<br><br>
|
||||
*
|
||||
* Warning: When using the multi-version client API, setting an API version that
|
||||
* is not supported by a particular client library will prevent that client from
|
||||
* being used to connect to the cluster. In particular, you should not advance
|
||||
* the API version of your application after upgrading your client until the
|
||||
* cluster has also been upgraded.
|
||||
*
|
||||
* @param version the API version required
|
||||
*
|
||||
* @return the FoundationDB API object
|
||||
*/
|
||||
public synchronized static FDB selectAPIVersion(final int version) throws FDBException {
|
||||
if(singleton != null) {
|
||||
if(version != singleton.apiVersion) {
|
||||
throw new IllegalArgumentException(
|
||||
"FoundationDB API already started at different version");
|
||||
}
|
||||
return singleton;
|
||||
}
|
||||
if(version < 500)
|
||||
throw new IllegalArgumentException("API version not supported (minimum 500)");
|
||||
if(version > 500)
|
||||
throw new IllegalArgumentException("API version not supported (maximum 500)");
|
||||
Select_API_version(version);
|
||||
return singleton = new FDB(version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connects to the cluster specified by the
|
||||
* <a href="https://foundationdb.com/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
|
||||
* If the FoundationDB network has not been started, it will be started in the course of this call
|
||||
* as if {@link FDB#startNetwork()} had been called.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
|
||||
*
|
||||
* @throws FDBException on errors encountered starting the FoundationDB networking engine
|
||||
* @throws IllegalStateException if the network had been previously stopped
|
||||
*/
|
||||
public Cluster createCluster() throws IllegalStateException, FDBException {
|
||||
return createCluster(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connects to the cluster specified by {@code clusterFilePath}. If the FoundationDB network
|
||||
* has not been started, it will be started in the course of this call as if
|
||||
* {@link #startNetwork()} had been called.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/documentation/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
|
||||
*
|
||||
* @throws FDBException on errors encountered starting the FoundationDB networking engine
|
||||
* @throws IllegalStateException if the network had been previously stopped
|
||||
*/
|
||||
public Cluster createCluster(String clusterFilePath) throws IllegalStateException, FDBException {
|
||||
return createCluster(clusterFilePath, DEFAULT_EXECUTOR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connects to the cluster specified by {@code clusterFilePath}. If the FoundationDB network
|
||||
* has not been started, it will be started in the course of this call. The supplied
|
||||
* {@link Executor} will be used as the default for the execution of all callbacks that
|
||||
* are produced from using the resulting {@link Cluster}.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/documentation/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
* @param e used to run the FDB network thread
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
|
||||
*
|
||||
* @throws FDBException on errors encountered starting the FoundationDB networking engine
|
||||
* @throws IllegalStateException if the network had been previously stopped
|
||||
*/
|
||||
public Cluster createCluster(String clusterFilePath, Executor e)
|
||||
throws FDBException, IllegalStateException {
|
||||
FutureCluster f;
|
||||
synchronized (this) {
|
||||
if (!isConnected()) {
|
||||
startNetwork(e);
|
||||
}
|
||||
f = new FutureCluster(Cluster_create(clusterFilePath), e);
|
||||
}
|
||||
return f.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes networking, connects with the
|
||||
* <a href="https://foundationdb.com/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
|
||||
* and opens the database.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
|
||||
*/
|
||||
public Database open() throws FDBException {
|
||||
return open(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes networking, connects to the cluster specified by {@code clusterFilePath}
|
||||
* and opens the database.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/documentation/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
|
||||
*/
|
||||
public Database open(String clusterFilePath) throws FDBException {
|
||||
return open(clusterFilePath, DEFAULT_EXECUTOR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes networking, connects to the cluster specified by {@code clusterFilePath}
|
||||
* and opens the database.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/documentation/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/documentation/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
* @param e the {@link Executor} to use to execute asynchronous callbacks
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
|
||||
*/
|
||||
public Database open(String clusterFilePath, Executor e) throws FDBException {
|
||||
FutureCluster f;
|
||||
synchronized (this) {
|
||||
if (!isConnected()) {
|
||||
startNetwork();
|
||||
}
|
||||
f = new FutureCluster(Cluster_create(clusterFilePath), e);
|
||||
}
|
||||
Cluster c = f.join();
|
||||
return c.openDatabase(e);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes networking. Can only be called once. This version of
|
||||
* {@code startNetwork()} will use the default thread pool to execute the
|
||||
* FoundationDB network.<br>
|
||||
* <br>
|
||||
* Configuration of the networking engine can be achieved through calls to the methods
|
||||
* in {@link NetworkOptions}.
|
||||
*
|
||||
* @throws IllegalStateException if the network has already been stopped
|
||||
*
|
||||
* @see NetworkOptions
|
||||
*/
|
||||
public void startNetwork() throws FDBException, IllegalStateException {
|
||||
startNetwork(DEFAULT_EXECUTOR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes networking. Can only be called once. The FoundationDB
|
||||
* networking event loop will be run in the specified {@code Executor}. This
|
||||
* event loop is a blocking operation that is not
|
||||
* expected to terminate until the program in complete. This will therefore consume an
|
||||
* entire thread from {@code e} if {@code e} is a thread pool or will completely block
|
||||
* operation of a single threaded {@code Executor}.<br>
|
||||
* <br>
|
||||
* Manual configuration of the networking engine can be achieved through calls on
|
||||
* {@link NetworkOptions}. These options should be set before a call
|
||||
* to this method.
|
||||
*
|
||||
* @see NetworkOptions
|
||||
*
|
||||
* @throws IllegalStateException if the network has already been stopped
|
||||
*/
|
||||
public synchronized void startNetwork(Executor e) throws FDBException, IllegalStateException {
|
||||
if(netStopped)
|
||||
throw new IllegalStateException("Network has been stopped and cannot be restarted");
|
||||
if(netStarted) {
|
||||
return;
|
||||
}
|
||||
Network_setup();
|
||||
netStarted = true;
|
||||
|
||||
e.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
boolean acquired = false;
|
||||
try {
|
||||
while(!acquired) {
|
||||
try {
|
||||
// make attempt to avoid a needless deadlock
|
||||
synchronized (FDB.this) {
|
||||
if(netStopped) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
netRunning.acquire();
|
||||
acquired = true;
|
||||
} catch(InterruptedException e) {}
|
||||
}
|
||||
try {
|
||||
Network_run();
|
||||
} catch (Throwable t) {
|
||||
System.err.println("Unhandled error in FoundationDB network thread: " + t.getMessage());
|
||||
// eat this error. we have nowhere to send it.
|
||||
}
|
||||
} finally {
|
||||
if(acquired) {
|
||||
netRunning.release();
|
||||
}
|
||||
synchronized (FDB.this) {
|
||||
netStopped = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state of the FoundationDB networking thread.
|
||||
*
|
||||
* @return {@code true} if the FDB network thread is running, {@code false} otherwise.
|
||||
*/
|
||||
private synchronized boolean isConnected() {
|
||||
return netStarted && !netStopped;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the FoundationDB networking engine. This can be called only once -- the network
|
||||
* cannot be restarted after this call. This call blocks for the completion of
|
||||
* the FoundationDB networking engine.
|
||||
*
|
||||
* @throws FDBException on errors while stopping the network
|
||||
*/
|
||||
public synchronized void stopNetwork() throws FDBException {
|
||||
if(!netStarted || netStopped) {
|
||||
netStopped = true;
|
||||
return;
|
||||
}
|
||||
Network_stop();
|
||||
// set netStarted here in case the network has never really ever been run
|
||||
netStopped = netStarted = true;
|
||||
while(true) {
|
||||
try {
|
||||
// This will be released when runNetwork() returns.
|
||||
// Taking this and never releasing it will also assure
|
||||
// that we will never again be able to call runNetwork()
|
||||
netRunning.acquire();
|
||||
return;
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
}
|
||||
|
||||
protected static boolean evalErrorPredicate(int predicate, int code) {
|
||||
if(singleton == null)
|
||||
throw new IllegalStateException("FDB API not yet initalized");
|
||||
return singleton.Error_predicate(predicate, code);
|
||||
}
|
||||
|
||||
static native void Select_API_version(int version) throws FDBException;
|
||||
|
||||
private native void Network_setOption(int code, byte[] value) throws FDBException;
|
||||
private native void Network_setup() throws FDBException;
|
||||
private native void Network_run() throws FDBException;
|
||||
private native void Network_stop() throws FDBException;
|
||||
|
||||
private native boolean Error_predicate(int predicate, int code);
|
||||
|
||||
private native long Cluster_create(String clusterFileName);
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* FDBDatabase.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import com.apple.cie.foundationdb.async.AsyncUtil;
|
||||
|
||||
class FDBDatabase extends DefaultDisposableImpl implements Database, Disposable, OptionConsumer {
|
||||
private DatabaseOptions options;
|
||||
private Executor executor;
|
||||
|
||||
protected FDBDatabase(long cPtr, Executor executor) {
|
||||
super(cPtr);
|
||||
this.executor = executor;
|
||||
this.options = new DatabaseOptions(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DatabaseOptions options() {
|
||||
return options;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T run(Function<? super Transaction, T> retryable, Executor e) {
|
||||
Transaction t = this.createTransaction();
|
||||
try {
|
||||
while (true) {
|
||||
try {
|
||||
T returnVal = retryable.apply(t);
|
||||
t.commit().join();
|
||||
return returnVal;
|
||||
} catch (RuntimeException err) {
|
||||
t = t.onError(err).join();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
t.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T read(Function<? super ReadTransaction, T> retryable, Executor e) {
|
||||
return this.run(retryable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> CompletableFuture<T> runAsync(final Function<? super Transaction, CompletableFuture<T>> retryable, Executor e) {
|
||||
final AtomicReference<Transaction> trRef = new AtomicReference<>(createTransaction(e));
|
||||
final AtomicReference<T> returnValue = new AtomicReference<>();
|
||||
return AsyncUtil.whileTrue(v -> {
|
||||
CompletableFuture<T> process = AsyncUtil.applySafely(retryable, trRef.get());
|
||||
|
||||
return process.thenComposeAsync(returnVal ->
|
||||
trRef.get().commit().thenApply(o -> {
|
||||
returnValue.set(returnVal);
|
||||
return false;
|
||||
})
|
||||
, e).handleAsync((value, t) -> {
|
||||
if(t == null)
|
||||
return CompletableFuture.completedFuture(value);
|
||||
if(!(t instanceof RuntimeException))
|
||||
throw new CompletionException(t);
|
||||
return trRef.get().onError(t).thenApply(newTr -> {
|
||||
trRef.set(newTr);
|
||||
return true;
|
||||
});
|
||||
}, e).thenCompose(x -> x);
|
||||
}, e).thenApply(o -> {
|
||||
trRef.get().dispose();
|
||||
return returnValue.get();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> CompletableFuture<T> readAsync(
|
||||
Function<? super ReadTransaction, CompletableFuture<T>> retryable, Executor e) {
|
||||
return this.runAsync(retryable, e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
dispose();
|
||||
super.finalize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Transaction createTransaction(Executor e) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction tr = new FDBTransaction(Database_createTransaction(getPtr()), this, e);
|
||||
tr.options().setUsedDuringCommitProtectionDisable();
|
||||
return tr;
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOption(int code, byte[] value) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Database_setOption(getPtr(), code, value);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Executor getExecutor() {
|
||||
return executor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void disposeInternal(long cPtr) {
|
||||
Database_dispose(cPtr);
|
||||
}
|
||||
|
||||
private native long Database_createTransaction(long cPtr);
|
||||
private native void Database_dispose(long cPtr);
|
||||
private native void Database_setOption(long cPtr, int code, byte[] value) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,603 @@
|
|||
/*
|
||||
* FDBTransaction.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
|
||||
import com.apple.cie.foundationdb.async.*;
|
||||
import com.apple.cie.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
class FDBTransaction extends DefaultDisposableImpl implements Disposable, Transaction, OptionConsumer {
|
||||
private final Database database;
|
||||
private final Executor executor;
|
||||
private final TransactionOptions options;
|
||||
|
||||
private boolean transactionOwner;
|
||||
|
||||
public final ReadTransaction snapshot;
|
||||
|
||||
class ReadSnapshot implements ReadTransaction {
|
||||
@Override
|
||||
public CompletableFuture<Long> getReadVersion() {
|
||||
return FDBTransaction.this.getReadVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<byte[]> get(byte[] key) {
|
||||
return get_internal(key, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<byte[]> getKey(KeySelector selector) {
|
||||
return getKey_internal(selector, true);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> KeySelectors
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return new RangeQuery(FDBTransaction.this, true, begin, end, limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(begin, end, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit) {
|
||||
return getRange(begin, end, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end) {
|
||||
return getRange(begin, end, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> byte[]s
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return getRange(KeySelector.firstGreaterOrEqual(begin),
|
||||
KeySelector.firstGreaterOrEqual(end),
|
||||
limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(begin, end, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit) {
|
||||
return getRange(begin, end, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end) {
|
||||
return getRange(begin, end, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange (Range)
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return getRange(range.begin, range.end, limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(range, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit) {
|
||||
return getRange(range, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range) {
|
||||
return getRange(range, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionOptions options() {
|
||||
return FDBTransaction.this.options();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T read(Function<? super ReadTransaction, T> retryable) {
|
||||
return retryable.apply(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> CompletableFuture<T> readAsync(
|
||||
Function<? super ReadTransaction, CompletableFuture<T>> retryable) {
|
||||
return AsyncUtil.applySafely(retryable, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Executor getExecutor() {
|
||||
return FDBTransaction.this.getExecutor();
|
||||
}
|
||||
}
|
||||
|
||||
protected FDBTransaction(long cPtr, Database database, Executor executor) {
|
||||
super(cPtr);
|
||||
this.database = database;
|
||||
this.executor = executor;
|
||||
snapshot = new ReadSnapshot();
|
||||
options = new TransactionOptions(this);
|
||||
transactionOwner = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReadTransaction snapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionOptions options() {
|
||||
return options;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReadVersion(long version) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_setVersion(getPtr(), version);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public CompletableFuture<Long> getReadVersion() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureVersion( Transaction_getReadVersion(getPtr()));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public CompletableFuture<byte[]> get(byte[] key) {
|
||||
return get_internal(key, false);
|
||||
}
|
||||
|
||||
private CompletableFuture<byte[]> get_internal(byte[] key, boolean isSnapshot) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureResult( Transaction_get(getPtr(), key, isSnapshot));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public CompletableFuture<byte[]> getKey(KeySelector selector) {
|
||||
return getKey_internal(selector, false);
|
||||
}
|
||||
|
||||
private CompletableFuture<byte[]> getKey_internal(KeySelector selector, boolean isSnapshot) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureKey( Transaction_getKey(getPtr(),
|
||||
selector.getKey(), selector.orEqual(), selector.getOffset(), isSnapshot));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> KeySelectors
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return new RangeQuery(this, false, begin, end, limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(begin, end, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
|
||||
int limit) {
|
||||
return getRange(begin, end, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end) {
|
||||
return getRange(begin, end, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> byte[]s
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return getRange(KeySelector.firstGreaterOrEqual(begin),
|
||||
KeySelector.firstGreaterOrEqual(end),
|
||||
limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(begin, end, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
|
||||
int limit) {
|
||||
return getRange(begin, end, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end) {
|
||||
return getRange(begin, end, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange (Range)
|
||||
///////////////////
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
return getRange(range.begin, range.end, limit, reverse, mode);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit, boolean reverse) {
|
||||
return getRange(range, limit, reverse, StreamingMode.ITERATOR);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit) {
|
||||
return getRange(range, limit, false);
|
||||
}
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRange(Range range) {
|
||||
return getRange(range, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Database getDatabase() {
|
||||
return database;
|
||||
}
|
||||
|
||||
protected FutureResults getRange_internal(
|
||||
KeySelector begin, KeySelector end,
|
||||
int rowLimit, int targetBytes, int streamingMode,
|
||||
int iteration, boolean isSnapshot, boolean reverse) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
/*System.out.println(String.format(
|
||||
" -- range get: (%s, %s) limit: %d, bytes: %d, mode: %d, iteration: %d, snap: %s, reverse %s",
|
||||
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
|
||||
return new FutureResults(Transaction_getRange(
|
||||
getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), rowLimit, targetBytes,
|
||||
streamingMode, iteration, isSnapshot, reverse));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addReadConflictRange(byte[] keyBegin, byte[] keyEnd) {
|
||||
addConflictRange(keyBegin, keyEnd, ConflictRangeType.READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addReadConflictKey(byte[] key) {
|
||||
addConflictRange(key, ByteArrayUtil.join(key, new byte[]{(byte) 0}), ConflictRangeType.READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addWriteConflictRange(byte[] keyBegin, byte[] keyEnd) {
|
||||
addConflictRange(keyBegin, keyEnd, ConflictRangeType.WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addWriteConflictKey(byte[] key) {
|
||||
addConflictRange(key, ByteArrayUtil.join(key, new byte[] { (byte)0 }), ConflictRangeType.WRITE);
|
||||
}
|
||||
|
||||
private void addConflictRange(byte[] keyBegin, byte[] keyEnd,
|
||||
ConflictRangeType type) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_addConflictRange(getPtr(), keyBegin, keyEnd, type.code());
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T run(Function<? super Transaction, T> retryable) {
|
||||
return retryable.apply(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> CompletableFuture<T> runAsync(
|
||||
Function<? super Transaction, CompletableFuture<T>> retryable) {
|
||||
return AsyncUtil.applySafely(retryable, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T read(Function<? super ReadTransaction, T> retryable) {
|
||||
return retryable.apply(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> CompletableFuture<T> readAsync(
|
||||
Function<? super ReadTransaction, CompletableFuture<T>> retryable) {
|
||||
return AsyncUtil.applySafely(retryable, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void set(byte[] key, byte[] value) {
|
||||
if(key == null || value == null)
|
||||
throw new IllegalArgumentException("Keys/Values must be non-null");
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_set(getPtr(), key, value);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(byte[] key) {
|
||||
if(key == null)
|
||||
throw new IllegalArgumentException("Key cannot be null");
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_clear(getPtr(), key);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(byte[] beginKey, byte[] endKey) {
|
||||
if(beginKey == null || endKey == null)
|
||||
throw new IllegalArgumentException("Keys cannot be null");
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_clear(getPtr(), beginKey, endKey);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public void clearRangeStartsWith(byte[] prefix) {
|
||||
clear(Range.startsWith(prefix));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(Range range) {
|
||||
clear(range.begin, range.end);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mutate(MutationType optype, byte[] key, byte[] value) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_mutate(getPtr(), optype.code(), key, value);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOption(int code, byte[] param) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_setOption(getPtr(), code, param);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> commit() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureVoid(Transaction_commit(getPtr()));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getCommittedVersion() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return Transaction_getCommittedVersion(getPtr());
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<byte[]> getVersionstamp() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureKey(Transaction_getVersionstamp(getPtr()));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> watch(byte[] key) throws FDBException {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureVoid(Transaction_watch(getPtr(), key));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Transaction> onError(Throwable e) {
|
||||
if((e instanceof CompletionException || e instanceof ExecutionException) && e.getCause() != null) {
|
||||
e = e.getCause();
|
||||
}
|
||||
if(!(e instanceof FDBException)) {
|
||||
CompletableFuture<Transaction> future = new CompletableFuture<>();
|
||||
future.completeExceptionally(e);
|
||||
return future;
|
||||
}
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
CompletableFuture<Void> f = new FutureVoid(Transaction_onError(getPtr(), ((FDBException)e).getCode()));
|
||||
final Transaction tr = transfer();
|
||||
return f.thenApply(v -> tr)
|
||||
.whenComplete((v, t) -> {
|
||||
if(t != null) {
|
||||
tr.dispose();
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
if(!transactionOwner) {
|
||||
dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Transaction reset() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_reset(getPtr());
|
||||
return transfer();
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
if(!transactionOwner) {
|
||||
dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
Transaction_cancel(getPtr());
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public CompletableFuture<String[]> getAddressesForKey(byte[] key) {
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
return new FutureStrings(Transaction_getKeyLocations(getPtr(), key));
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Must hold pointerReadLock when calling
|
||||
private FDBTransaction transfer() {
|
||||
FDBTransaction tr = new FDBTransaction(getPtr(), database, executor);
|
||||
tr.options().setUsedDuringCommitProtectionDisable();
|
||||
transactionOwner = false;
|
||||
return tr;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long getPtr() {
|
||||
if(!transactionOwner) {
|
||||
throw new IllegalStateException("Transaction has been invalidated by reset");
|
||||
}
|
||||
else {
|
||||
return super.getPtr();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
dispose();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void disposeInternal(long cPtr) {
|
||||
if(transactionOwner) {
|
||||
Transaction_dispose(cPtr);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Executor getExecutor() {
|
||||
return executor;
|
||||
}
|
||||
|
||||
private native long Transaction_getReadVersion(long cPtr);
|
||||
private native void Transaction_setVersion(long cPtr, long version);
|
||||
private native long Transaction_get(long cPtr, byte[] key, boolean isSnapshot);
|
||||
private native long Transaction_getKey(long cPtr, byte[] key, boolean orEqual,
|
||||
int offset, boolean isSnapshot);
|
||||
private native long Transaction_getRange(long cPtr,
|
||||
byte[] keyBegin, boolean orEqualBegin, int offsetBegin,
|
||||
byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native void Transaction_addConflictRange(long cPtr,
|
||||
byte[] keyBegin, byte[] keyEnd, int conflictRangeType);
|
||||
private native void Transaction_set(long cPtr, byte[] key, byte[] value);
|
||||
private native void Transaction_clear(long cPtr, byte[] key);
|
||||
private native void Transaction_clear(long cPtr, byte[] beginKey, byte[] endKey);
|
||||
private native void Transaction_mutate(long ptr, int code, byte[] key, byte[] value);
|
||||
private native void Transaction_setOption(long cPtr, int code, byte[] value) throws FDBException;
|
||||
private native long Transaction_commit(long cPtr);
|
||||
private native long Transaction_getCommittedVersion(long cPtr);
|
||||
private native long Transaction_getVersionstamp(long cPtr);
|
||||
private native long Transaction_onError(long cPtr, int errorCode);
|
||||
private native void Transaction_dispose(long cPtr);
|
||||
private native void Transaction_reset(long cPtr);
|
||||
private native long Transaction_watch(long ptr, byte[] key) throws FDBException;
|
||||
private native void Transaction_cancel(long cPtr);
|
||||
private native long Transaction_getKeyLocations(long cPtr, byte[] key);
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* FutureCluster.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
class FutureCluster extends NativeFuture<Cluster> {
|
||||
private final Executor executor;
|
||||
|
||||
protected FutureCluster(long cPtr, Executor executor) {
|
||||
super(cPtr);
|
||||
this.executor = executor;
|
||||
registerMarshalCallback();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cluster getIfDone_internal() throws FDBException {
|
||||
return new Cluster(FutureCluster_get(cPtr), executor);
|
||||
}
|
||||
|
||||
private native long FutureCluster_get(long cPtr) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* FutureDatabase.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
class FutureDatabase extends NativeFuture<Database> {
|
||||
private final Executor executor;
|
||||
|
||||
FutureDatabase(long cPtr, Executor executor) {
|
||||
super(cPtr);
|
||||
this.executor = executor;
|
||||
registerMarshalCallback();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Database getIfDone_internal() throws FDBException {
|
||||
return new FDBDatabase(FutureDatabase_get(cPtr), executor);
|
||||
}
|
||||
|
||||
private native long FutureDatabase_get(long cPtr) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* FutureKey.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
|
||||
|
||||
class FutureKey extends NativeFuture<byte[]> {
|
||||
FutureKey(long cPtr) {
|
||||
super(cPtr);
|
||||
registerMarshalCallback();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getIfDone_internal() throws FDBException {
|
||||
return FutureKey_get(cPtr);
|
||||
}
|
||||
|
||||
private native byte[] FutureKey_get(long cPtr) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* FutureResult.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
|
||||
class FutureResult extends NativeFuture<byte[]> {
|
||||
FutureResult(long cPtr) {
|
||||
super(cPtr);
|
||||
registerMarshalCallback();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getIfDone_internal() throws FDBException {
|
||||
return FutureResult_get(cPtr);
|
||||
}
|
||||
|
||||
private native byte[] FutureResult_get(long cPtr) throws FDBException;
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* FutureResults.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.cie.foundationdb;
|
||||
|
||||
|
||||
class FutureResults extends NativeFuture<RangeResultInfo> {
|
||||
FutureResults(long cPtr) {
|
||||
super(cPtr);
|
||||
registerMarshalCallback();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postMarshal() {
|
||||
// We can't dispose because this class actually marshals on-demand
|
||||
}
|
||||
|
||||
@Override
|
||||
public RangeResultInfo getIfDone_internal() throws FDBException {
|
||||
FDBException err = Future_getError(cPtr);
|
||||
|
||||
if(!err.isSuccess()) {
|
||||
throw err;
|
||||
}
|
||||
|
||||
return new RangeResultInfo(this);
|
||||
}
|
||||
|
||||
public RangeResultSummary getSummary() {
|
||||
return FutureResults_getSummary(cPtr);
|
||||
}
|
||||
|
||||
public RangeResult getResults() {
|
||||
return FutureResults_get(cPtr);
|
||||
}
|
||||
|
||||
private native RangeResultSummary FutureResults_getSummary(long ptr) throws FDBException;
|
||||
private native RangeResult FutureResults_get(long cPtr) throws FDBException;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue