foundationdb/flow/Arena.h

1413 lines
43 KiB
C
Raw Normal View History

2017-05-26 04:48:44 +08:00
/*
* Arena.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
2017-05-26 04:48:44 +08:00
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
2017-05-26 04:48:44 +08:00
* http://www.apache.org/licenses/LICENSE-2.0
*
2017-05-26 04:48:44 +08:00
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOW_ARENA_H
#define FLOW_ARENA_H
2020-07-10 01:49:33 +08:00
#include <array>
#include <iterator>
2017-05-26 04:48:44 +08:00
#pragma once
#include "flow/FastAlloc.h"
#include "flow/FastRef.h"
#include "flow/Error.h"
#include "flow/Trace.h"
#include "flow/ObjectSerializerTraits.h"
#include "flow/FileIdentifier.h"
2017-05-26 04:48:44 +08:00
#include <algorithm>
#include <stdint.h>
#include <string>
#include <cstring>
#include <limits>
2020-07-04 06:45:18 +08:00
#include <optional>
2017-05-26 04:48:44 +08:00
#include <set>
#include <type_traits>
#include <sstream>
2017-05-26 04:48:44 +08:00
// TrackIt is a zero-size class for tracking constructions, destructions, and assignments of instances
// of a class. Just inherit TrackIt<T> from T to enable tracking of construction and destruction of
// T, and use the TRACKIT_ASSIGN(rhs) macro in any operator= definitions to enable assignment tracking.
//
// TrackIt writes to standard output because the trace log isn't available early in execution
// so applying TrackIt to StringRef or VectorRef, for example, would a segfault using the trace log.
//
// The template parameter enables TrackIt to be inherited multiple times in the ancestry
// of a class without producing an "inaccessible due to ambiguity" error.
template <class T>
2017-05-26 04:48:44 +08:00
struct TrackIt {
typedef TrackIt<T> TrackItType;
// Put TRACKIT_ASSIGN into any operator= functions for which you want assignments tracked
#define TRACKIT_ASSIGN(o) *(TrackItType*)this = *(TrackItType*)&(o)
2017-05-26 04:48:44 +08:00
// The type name T is in the TrackIt output so that objects that inherit TrackIt multiple times
// can be tracked propertly, otherwise the create and delete addresses appear duplicative.
// This function returns just the string "T]" parsed from the __PRETTY_FUNCTION__ macro. There
// doesn't seem to be a better portable way to do this.
static const char* __trackit__type() {
const char* s = __PRETTY_FUNCTION__ + sizeof(__PRETTY_FUNCTION__);
while (*--s != '=')
;
2017-05-26 04:48:44 +08:00
return s + 2;
}
TrackIt() { printf("TrackItCreate\t%s\t%p\t%s\n", __trackit__type(), this, platform::get_backtrace().c_str()); }
TrackIt(const TrackIt& o) : TrackIt() {}
TrackIt(const TrackIt&& o) : TrackIt() {}
TrackIt& operator=(const TrackIt& o) {
2017-05-26 04:48:44 +08:00
printf("TrackItAssigned\t%s\t%p<%p\t%s\n", __trackit__type(), this, &o, platform::get_backtrace().c_str());
return *this;
}
TrackIt& operator=(const TrackIt&& o) { return *this = (const TrackIt&)o; }
~TrackIt() { printf("TrackItDestroy\t%s\t%p\n", __trackit__type(), this); }
2017-05-26 04:48:44 +08:00
};
class NonCopyable {
protected:
NonCopyable() = default;
~NonCopyable() = default; /// Protected non-virtual destructor
NonCopyable(NonCopyable&&) = default;
NonCopyable& operator=(NonCopyable&&) = default;
NonCopyable(const NonCopyable&) = delete;
NonCopyable& operator=(const NonCopyable&) = delete;
2017-05-26 04:48:44 +08:00
};
2020-06-11 23:43:31 +08:00
// An Arena is a custom allocator that consists of a set of ArenaBlocks. Allocation is performed by bumping a pointer
// on the most recent ArenaBlock until the block is unable to service the next allocation request. When the current
// ArenaBlock is full, a new (larger) one is added to the Arena. Deallocation is not directly supported. Instead,
// memory is freed by deleting the entire Arena at once. See flow/README.md for details on using Arenas.
2017-05-26 04:48:44 +08:00
class Arena {
public:
2020-02-29 06:21:38 +08:00
Arena();
explicit Arena(size_t reservedSize);
2017-05-26 04:48:44 +08:00
//~Arena();
Arena(const Arena&);
2020-06-10 08:33:41 +08:00
Arena(Arena&& r) noexcept;
2017-05-26 04:48:44 +08:00
Arena& operator=(const Arena&);
2020-06-10 08:33:41 +08:00
Arena& operator=(Arena&&) noexcept;
2017-05-26 04:48:44 +08:00
2020-02-29 06:21:38 +08:00
void dependsOn(const Arena& p);
size_t getSize() const;
2017-05-26 04:48:44 +08:00
2020-02-29 06:21:38 +08:00
bool hasFree(size_t size, const void* address);
2017-05-26 04:48:44 +08:00
friend void* operator new(size_t size, Arena& p);
friend void* operator new[](size_t size, Arena& p);
2020-02-29 06:21:38 +08:00
bool sameArena(const Arena& other) const { return impl.getPtr() == other.impl.getPtr(); }
2020-07-14 12:10:34 +08:00
private:
2017-05-26 04:48:44 +08:00
Reference<struct ArenaBlock> impl;
};
template <>
struct scalar_traits<Arena> : std::true_type {
constexpr static size_t size = 0;
template <class Context>
static void save(uint8_t*, const Arena&, Context&) {}
// Context is an arbitrary type that is plumbed by reference throughout
// the load call tree.
template <class Context>
static void load(const uint8_t*, Arena& arena, Context& context) {
context.addArena(arena);
}
};
2017-05-26 04:48:44 +08:00
struct ArenaBlockRef {
ArenaBlock* next;
uint32_t nextBlockOffset;
};
struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted<ArenaBlock> {
2017-05-26 04:48:44 +08:00
enum {
SMALL = 64,
LARGE = 8193 // If size == used == LARGE, then use hugeSize, hugeUsed
2017-05-26 04:48:44 +08:00
};
enum { NOT_TINY = 255, TINY_HEADER = 6 };
// int32_t referenceCount; // 4 bytes (in ThreadSafeReferenceCounted)
uint8_t tinySize, tinyUsed; // If these == NOT_TINY, use bigSize, bigUsed instead
2017-05-26 04:48:44 +08:00
// if tinySize != NOT_TINY, following variables aren't used
uint32_t bigSize, bigUsed; // include block header
2017-05-26 04:48:44 +08:00
uint32_t nextBlockOffset;
2020-02-29 06:21:38 +08:00
void addref();
void delref();
bool isTiny() const;
int size() const;
int used() const;
int unused() const;
const void* getData() const;
const void* getNextData() const;
size_t totalSize();
2017-05-26 04:48:44 +08:00
// just for debugging:
void getUniqueBlocks(std::set<ArenaBlock*>& a);
int addUsed(int bytes);
void makeReference(ArenaBlock* next);
static void dependOn(Reference<ArenaBlock>& self, ArenaBlock* other);
static void* allocate(Reference<ArenaBlock>& self, int bytes);
2017-05-26 04:48:44 +08:00
// Return an appropriately-sized ArenaBlock to store the given data
static ArenaBlock* create(int dataSize, Reference<ArenaBlock>& next);
void destroy();
void destroyLeaf();
static void* operator new(size_t s) = delete;
2017-05-26 04:48:44 +08:00
};
inline void* operator new(size_t size, Arena& p) {
UNSTOPPABLE_ASSERT(size < std::numeric_limits<int>::max());
return ArenaBlock::allocate(p.impl, (int)size);
2017-05-26 04:48:44 +08:00
}
inline void operator delete(void*, Arena& p) {}
inline void* operator new[](size_t size, Arena& p) {
UNSTOPPABLE_ASSERT(size < std::numeric_limits<int>::max());
return ArenaBlock::allocate(p.impl, (int)size);
2017-05-26 04:48:44 +08:00
}
inline void operator delete[](void*, Arena& p) {}
2017-05-26 04:48:44 +08:00
template <class Archive>
inline void load(Archive& ar, Arena& p) {
2017-05-26 04:48:44 +08:00
p = ar.arena();
}
template <class Archive>
inline void save(Archive& ar, const Arena& p) {
2017-05-26 04:48:44 +08:00
// No action required
}
2020-07-04 06:45:18 +08:00
// Optional is a wrapper for std::optional. There
// are two primary reasons to use this wrapper instead
// of using std::optional directly:
//
// 1) Legacy: A lot of code was written using Optional before
// std::optional was available.
// 2) When you call get but no value is present Optional gives an
// assertion failure. std::optional, on the other hand, would
2020-07-07 10:24:28 +08:00
// throw std::bad_optional_access. It is easier to debug assertion
// failures, and FDB generally does not handle std exceptions, so
// assertion failures are preferable. This is the main reason we
// don't intend to use std::optional directly.
template <class T>
class Optional : public ComposedIdentifier<T, 4> {
public:
2020-07-04 06:45:18 +08:00
Optional() = default;
template <class U>
2020-07-07 10:24:28 +08:00
Optional(const U& t) : impl(std::in_place, t) {}
/* This conversion constructor was nice, but combined with the prior constructor it means that Optional<int> can be
converted to Optional<Optional<int>> in the wrong way (a non-present Optional<int> converts to a non-present
Optional<Optional<int>>). Use .castTo<>() instead. template <class S> Optional(const Optional<S>& o) :
valid(o.present()) { if (valid) new (&value) T(o.get()); } */
2020-07-04 06:45:18 +08:00
Optional(Arena& a, const Optional<T>& o) {
if (o.present())
impl = std::make_optional<T>(a, o.get());
}
2020-07-04 06:45:18 +08:00
int expectedSize() const { return present() ? get().expectedSize() : 0; }
template <class R>
Optional<R> castTo() const {
return map<R>([](const T& v) { return (R)v; });
}
template <class R>
Optional<R> map(std::function<R(T)> f) const {
if (present()) {
return Optional<R>(f(get()));
} else {
return Optional<R>();
}
}
2020-07-04 06:45:18 +08:00
bool present() const { return impl.has_value(); }
T& get() & {
2020-07-04 06:45:18 +08:00
UNSTOPPABLE_ASSERT(impl.has_value());
return impl.value();
}
T const& get() const& {
2020-07-04 06:45:18 +08:00
UNSTOPPABLE_ASSERT(impl.has_value());
return impl.value();
}
T&& get() && {
UNSTOPPABLE_ASSERT(impl.has_value());
return std::move(impl.value());
}
2020-07-04 06:45:18 +08:00
T orDefault(T const& default_value) const { return impl.value_or(default_value); }
// Spaceship operator. Treats not-present as less-than present.
int compare(Optional const& rhs) const {
if (present() == rhs.present()) {
return present() ? get().compare(rhs.get()) : 0;
}
return present() ? 1 : -1;
}
2020-07-04 06:45:18 +08:00
bool operator==(Optional const& o) const { return impl == o.impl; }
bool operator!=(Optional const& o) const { return !(*this == o); }
// Ordering: If T is ordered, then Optional() < Optional(t) and (Optional(u)<Optional(v))==(u<v)
2020-07-04 06:45:18 +08:00
bool operator<(Optional const& o) const { return impl < o.impl; }
void reset() { impl.reset(); }
private:
2020-07-04 06:45:18 +08:00
std::optional<T> impl;
};
2020-07-04 06:45:18 +08:00
template <class Archive, class T>
inline void load(Archive& ar, Optional<T>& value) {
bool valid;
ar >> valid;
if (valid) {
T t;
ar >> t;
value = Optional<T>(t);
} else {
value.reset();
}
}
template <class Archive, class T>
inline void save(Archive& ar, const Optional<T>& value) {
ar << value.present();
if (value.present()) {
ar << value.get();
}
}
template <class T>
2019-04-05 00:59:08 +08:00
struct Traceable<Optional<T>> : std::conditional<Traceable<T>::value, std::true_type, std::false_type>::type {
static std::string toString(const Optional<T>& value) {
return value.present() ? Traceable<T>::toString(value.get()) : "[not set]";
2019-04-14 01:05:59 +08:00
}
2019-04-10 05:29:21 +08:00
};
template <class T>
2019-04-10 05:29:21 +08:00
struct union_like_traits<Optional<T>> : std::true_type {
using Member = Optional<T>;
using alternatives = pack<T>;
template <class Context>
static uint8_t index(const Member& variant, Context&) {
return 0;
}
template <class Context>
static bool empty(const Member& variant, Context&) {
return !variant.present();
}
template <int i, class Context>
static const T& get(const Member& variant, Context&) {
2019-04-10 05:29:21 +08:00
static_assert(i == 0);
return variant.get();
}
template <size_t i, class U, class Context>
static void assign(Member& member, const U& t, Context&) {
2019-04-10 05:29:21 +08:00
member = t;
2019-04-05 00:59:08 +08:00
}
};
2017-05-26 04:48:44 +08:00
//#define STANDALONE_ALWAYS_COPY
template <class T>
class Standalone : private Arena, public T {
public:
// T must have no destructor
Arena& arena() { return *(Arena*)this; }
const Arena& arena() const { return *(const Arena*)this; }
T& contents() { return *(T*)this; }
T const& contents() const { return *(T const*)this; }
Standalone() {}
Standalone(const T& t) : Arena(t.expectedSize()), T(arena(), t) {}
Standalone<T>& operator=(const T& t) {
Arena old = std::move(arena()); // We want to defer the destruction of the arena until after we have copied t,
// in case it cross-references our previous value
2017-05-26 04:48:44 +08:00
*(Arena*)this = Arena(t.expectedSize());
*(T*)this = T(arena(), t);
2017-05-26 04:48:44 +08:00
return *this;
}
// Always-copy mode was meant to make alloc instrumentation more useful by making allocations occur at the final resting
// place of objects leaked It doesn't actually work because some uses of Standalone things assume the object's memory
// will not change on copy or assignment
2017-05-26 04:48:44 +08:00
#ifdef STANDALONE_ALWAYS_COPY
// Treat Standalone<T>'s as T's in construction and assignment so the memory is copied
Standalone(const T& t, const Arena& arena) : Standalone(t) {}
Standalone(const Standalone<T>& t) : Standalone((T const&)t) {}
Standalone(const Standalone<T>&& t) : Standalone((T const&)t) {}
Standalone<T>& operator=(const Standalone<T>&& t) {
2017-05-26 04:48:44 +08:00
*this = (T const&)t;
return *this;
}
Standalone<T>& operator=(const Standalone<T>& t) {
2017-05-26 04:48:44 +08:00
*this = (T const&)t;
return *this;
}
#else
Standalone(const T& t, const Arena& arena) : Arena(arena), T(t) {}
Standalone(const Standalone<T>&) = default;
Standalone<T>& operator=(const Standalone<T>&) = default;
Standalone(Standalone<T>&&) = default;
Standalone<T>& operator=(Standalone<T>&&) = default;
~Standalone() = default;
2017-05-26 04:48:44 +08:00
#endif
template <class U>
Standalone<U> castTo() const {
return Standalone<U>(*this, arena());
}
2017-05-26 04:48:44 +08:00
template <class Archive>
void serialize(Archive& ar) {
// FIXME: something like BinaryReader(ar) >> arena >> *(T*)this; to guarantee standalone arena???
// T tmp;
// ar >> tmp;
2017-05-26 04:48:44 +08:00
//*this = tmp;
serializer(ar, (*(T*)this), arena());
2017-05-26 04:48:44 +08:00
}
/*static Standalone<T> fakeStandalone( const T& t ) {
Standalone<T> x;
*(T*)&x = t;
return x;
2017-05-26 04:48:44 +08:00
}*/
private:
template <class U>
Standalone(Standalone<U> const&); // unimplemented
template <class U>
Standalone<T> const& operator=(Standalone<U> const&); // unimplemented
2017-05-26 04:48:44 +08:00
};
extern std::string format(const char* form, ...);
#pragma pack(push, 4)
2017-05-26 04:48:44 +08:00
class StringRef {
public:
constexpr static FileIdentifier file_identifier = 13300811;
2017-05-26 04:48:44 +08:00
StringRef() : data(0), length(0) {}
StringRef(Arena& p, const StringRef& toCopy) : data(new (p) uint8_t[toCopy.size()]), length(toCopy.size()) {
if (length > 0) {
memcpy((void*)data, toCopy.data, length);
}
2017-05-26 04:48:44 +08:00
}
StringRef(Arena& p, const std::string& toCopy) : length((int)toCopy.size()) {
UNSTOPPABLE_ASSERT(toCopy.size() <= std::numeric_limits<int>::max());
2017-05-26 04:48:44 +08:00
data = new (p) uint8_t[toCopy.size()];
if (length)
memcpy((void*)data, &toCopy[0], length);
2017-05-26 04:48:44 +08:00
}
StringRef(Arena& p, const uint8_t* toCopy, int length) : data(new (p) uint8_t[length]), length(length) {
if (length > 0) {
memcpy((void*)data, toCopy, length);
}
2017-05-26 04:48:44 +08:00
}
StringRef(const uint8_t* data, int length) : data(data), length(length) {}
StringRef(const std::string& s) : data((const uint8_t*)s.c_str()), length((int)s.size()) {
if (s.size() > std::numeric_limits<int>::max())
abort();
2017-05-26 04:48:44 +08:00
}
// StringRef( const StringRef& p );
2017-05-26 04:48:44 +08:00
const uint8_t* begin() const { return data; }
const uint8_t* end() const { return data + length; }
int size() const { return length; }
uint8_t operator[](int i) const { return data[i]; }
StringRef substr(int start) const { return StringRef(data + start, length - start); }
StringRef substr(int start, int size) const { return StringRef(data + start, size); }
bool startsWith(const StringRef& s) const { return size() >= s.size() && !memcmp(begin(), s.begin(), s.size()); }
bool endsWith(const StringRef& s) const {
return size() >= s.size() && !memcmp(end() - s.size(), s.begin(), s.size());
}
2017-05-26 04:48:44 +08:00
StringRef withPrefix(const StringRef& prefix, Arena& arena) const {
uint8_t* s = new (arena) uint8_t[prefix.size() + size()];
if (prefix.size() > 0) {
memcpy(s, prefix.begin(), prefix.size());
}
if (size() > 0) {
memcpy(s + prefix.size(), begin(), size());
}
return StringRef(s, prefix.size() + size());
2017-05-26 04:48:44 +08:00
}
StringRef withSuffix(const StringRef& suffix, Arena& arena) const {
uint8_t* s = new (arena) uint8_t[suffix.size() + size()];
if (size() > 0) {
memcpy(s, begin(), size());
}
if (suffix.size() > 0) {
memcpy(s + size(), suffix.begin(), suffix.size());
}
return StringRef(s, suffix.size() + size());
}
Standalone<StringRef> withPrefix(const StringRef& prefix) const {
2017-05-26 04:48:44 +08:00
Standalone<StringRef> r;
r.contents() = withPrefix(prefix, r.arena());
2017-05-26 04:48:44 +08:00
return r;
}
Standalone<StringRef> withSuffix(const StringRef& suffix) const {
Standalone<StringRef> r;
r.contents() = withSuffix(suffix, r.arena());
2017-05-26 04:48:44 +08:00
return r;
}
StringRef removePrefix(const StringRef& s) const {
2017-05-26 04:48:44 +08:00
// pre: startsWith(s)
UNSTOPPABLE_ASSERT(s.size() <= size()); //< In debug mode, we could check startsWith()
return substr(s.size());
2017-05-26 04:48:44 +08:00
}
StringRef removeSuffix(const StringRef& s) const {
// pre: endsWith(s)
UNSTOPPABLE_ASSERT(s.size() <= size()); //< In debug mode, we could check endsWith()
return substr(0, size() - s.size());
}
2020-04-02 12:27:49 +08:00
std::string toString() const { return std::string((const char*)data, length); }
static bool isPrintable(char c) { return c > 32 && c < 127; }
2019-04-06 04:11:50 +08:00
inline std::string printable() const;
2017-05-26 04:48:44 +08:00
std::string toHexString(int limit = -1) const {
if (limit < 0)
limit = length;
if (length > limit) {
// If limit is high enough split it so that 2/3 of limit is used to show prefix bytes and the rest is used
// for suffix bytes
if (limit >= 9) {
int suffix = limit / 3;
return substr(0, limit - suffix).toHexString() + "..." + substr(length - suffix, suffix).toHexString() +
format(" [%d bytes]", length);
}
return substr(0, limit).toHexString() + format("...[%d]", length);
}
std::string s;
s.reserve(length * 7);
for (int i = 0; i < length; i++) {
uint8_t b = (*this)[i];
if (isalnum(b))
s.append(format("%02x (%c) ", b, b));
else
s.append(format("%02x ", b));
}
if (s.size() > 0)
s.resize(s.size() - 1);
return s;
}
2017-05-26 04:48:44 +08:00
int expectedSize() const { return size(); }
int compare(StringRef const& other) const {
size_t minSize = std::min(size(), other.size());
if (minSize != 0) {
int c = memcmp(begin(), other.begin(), minSize);
if (c != 0)
return c;
}
return ::compare(size(), other.size());
2017-05-26 04:48:44 +08:00
}
// Removes bytes from begin up to and including the sep string, returns StringRef of the part before sep
StringRef eat(StringRef sep) {
for (int i = 0, iend = size() - sep.size(); i <= iend; ++i) {
if (sep.compare(substr(i, sep.size())) == 0) {
StringRef token = substr(0, i);
*this = substr(i + sep.size());
return token;
}
}
return eat();
}
StringRef eat() {
StringRef r = *this;
*this = StringRef();
return r;
}
StringRef eat(const char* sep) { return eat(StringRef((const uint8_t*)sep, (int)strlen(sep))); }
// Return StringRef of bytes from begin() up to but not including the first byte matching any byte in sep,
// and remove that sequence (including the sep byte) from *this
// Returns and removes all bytes from *this if no bytes within sep were found
StringRef eatAny(StringRef sep, uint8_t* foundSeparator) {
auto iSep = std::find_first_of(begin(), end(), sep.begin(), sep.end());
if (iSep != end()) {
if (foundSeparator != nullptr) {
*foundSeparator = *iSep;
}
const int i = iSep - begin();
StringRef token = substr(0, i);
*this = substr(i + 1);
return token;
}
return eat();
}
StringRef eatAny(const char* sep, uint8_t* foundSeparator) {
return eatAny(StringRef((const uint8_t*)sep, strlen(sep)), foundSeparator);
}
// Copies string contents to dst and returns a pointer to the next byte after
uint8_t* copyTo(uint8_t* dst) const {
memcpy(dst, data, length);
return dst + length;
}
std::vector<StringRef> splitAny(StringRef sep) const {
StringRef r = *this;
std::vector<StringRef> tokens;
while (r.size()) {
tokens.push_back(r.eatAny(sep, nullptr));
}
return tokens;
}
2017-05-26 04:48:44 +08:00
private:
// Unimplemented; blocks conversion through std::string
StringRef(char*);
2017-05-26 04:48:44 +08:00
const uint8_t* data;
int length;
};
#pragma pack(pop)
2017-05-26 04:48:44 +08:00
namespace std {
template <>
struct hash<StringRef> {
static constexpr std::hash<std::string_view> hashFunc{};
std::size_t operator()(StringRef const& tag) const {
return hashFunc(std::string_view((const char*)tag.begin(), tag.size()));
}
};
} // namespace std
template <>
2019-04-06 04:11:50 +08:00
struct TraceableString<StringRef> {
static const char* begin(StringRef value) { return reinterpret_cast<const char*>(value.begin()); }
2019-04-06 04:11:50 +08:00
static bool atEnd(const StringRef& value, const char* iter) {
return iter == reinterpret_cast<const char*>(value.end());
}
static std::string toString(const StringRef& value) { return value.toString(); }
};
template <>
2019-04-06 04:11:50 +08:00
struct Traceable<StringRef> : TraceableStringImpl<StringRef> {};
inline std::string StringRef::printable() const {
return Traceable<StringRef>::toString(*this);
}
template <class T>
struct Traceable<Standalone<T>> : std::conditional<Traceable<T>::value, std::true_type, std::false_type>::type {
static std::string toString(const Standalone<T>& value) { return Traceable<T>::toString(value); }
};
#define LiteralStringRef(str) StringRef((const uint8_t*)(str), sizeof((str)) - 1)
inline StringRef operator"" _sr(const char* str, size_t size) {
return StringRef(reinterpret_cast<const uint8_t*>(str), size);
}
2017-05-26 04:48:44 +08:00
// makeString is used to allocate a Standalone<StringRef> of a known length for later
// mutation (via mutateString). If you need to append to a string of unknown length,
// consider factoring StringBuffer from DiskQueue.actor.cpp.
inline static Standalone<StringRef> makeString(int length) {
2017-05-26 04:48:44 +08:00
Standalone<StringRef> returnString;
uint8_t* outData = new (returnString.arena()) uint8_t[length];
2017-05-26 04:48:44 +08:00
((StringRef&)returnString) = StringRef(outData, length);
return returnString;
}
inline static Standalone<StringRef> makeAlignedString(int alignment, int length) {
Standalone<StringRef> returnString;
uint8_t* outData = new (returnString.arena()) uint8_t[alignment + length];
outData = (uint8_t*)((((uintptr_t)outData + (alignment - 1)) / alignment) * alignment);
((StringRef&)returnString) = StringRef(outData, length);
return returnString;
}
inline static StringRef makeString(int length, Arena& arena) {
uint8_t* outData = new (arena) uint8_t[length];
2017-05-26 04:48:44 +08:00
return StringRef(outData, length);
}
// mutateString() simply casts away const and returns a pointer that can be used to mutate the
// contents of the given StringRef (it will also accept Standalone<StringRef>). Obviously this
// is only legitimate if you know where the StringRef's memory came from and that it is not shared!
inline static uint8_t* mutateString(StringRef& s) {
return const_cast<uint8_t*>(s.begin());
}
2017-05-26 04:48:44 +08:00
template <class Archive>
inline void load(Archive& ar, StringRef& value) {
2017-05-26 04:48:44 +08:00
uint32_t length;
ar >> length;
value = StringRef(ar.arenaRead(length), length);
}
template <class Archive>
inline void save(Archive& ar, const StringRef& value) {
2017-05-26 04:48:44 +08:00
ar << (uint32_t)value.size();
ar.serializeBytes(value.begin(), value.size());
2017-05-26 04:48:44 +08:00
}
2019-07-02 07:32:25 +08:00
template <>
struct dynamic_size_traits<StringRef> : std::true_type {
template <class Context>
static size_t size(const StringRef& t, Context&) {
return t.size();
}
template <class Context>
static void save(uint8_t* out, const StringRef& t, Context&) {
std::copy(t.begin(), t.end(), out);
}
template <class Context>
static void load(const uint8_t* ptr, size_t sz, StringRef& str, Context& context) {
str = StringRef(context.tryReadZeroCopy(ptr, sz), sz);
}
};
inline bool operator==(const StringRef& lhs, const StringRef& rhs) {
if (lhs.size() == 0 && rhs.size() == 0) {
return true;
}
ASSERT(lhs.size() >= 0);
return lhs.size() == rhs.size() && memcmp(lhs.begin(), rhs.begin(), static_cast<unsigned int>(lhs.size())) == 0;
2017-05-26 04:48:44 +08:00
}
inline bool operator<(const StringRef& lhs, const StringRef& rhs) {
if (std::min(lhs.size(), rhs.size()) > 0) {
int c = memcmp(lhs.begin(), rhs.begin(), std::min(lhs.size(), rhs.size()));
if (c != 0)
return c < 0;
}
2017-05-26 04:48:44 +08:00
return lhs.size() < rhs.size();
}
inline bool operator>(const StringRef& lhs, const StringRef& rhs) {
if (std::min(lhs.size(), rhs.size()) > 0) {
int c = memcmp(lhs.begin(), rhs.begin(), std::min(lhs.size(), rhs.size()));
if (c != 0)
return c > 0;
}
2017-05-26 04:48:44 +08:00
return lhs.size() > rhs.size();
}
inline bool operator!=(const StringRef& lhs, const StringRef& rhs) {
return !(lhs == rhs);
}
inline bool operator<=(const StringRef& lhs, const StringRef& rhs) {
return !(lhs > rhs);
}
inline bool operator>=(const StringRef& lhs, const StringRef& rhs) {
return !(lhs < rhs);
}
2017-05-26 04:48:44 +08:00
// This trait is used by VectorRef to determine if deep copy constructor should recursively
// call deep copies of each element.
//
// TODO: There should be an easier way to identify the difference between flow_ref and non-flow_ref types.
// std::is_trivially_copyable does not work because some flow_ref types are trivially copyable
// and some non-flow_ref types are not trivially copyable.
2017-05-26 04:48:44 +08:00
template <typename T>
struct flow_ref : std::integral_constant<bool, !std::is_fundamental_v<T>> {};
2017-05-26 04:48:44 +08:00
template <>
struct flow_ref<UID> : std::integral_constant<bool, false> {};
template <class A, class B>
struct flow_ref<std::pair<A, B>> : std::integral_constant<bool, false> {};
2017-05-26 04:48:44 +08:00
template <class T>
struct string_serialized_traits : std::false_type {
int32_t getSize(const T& item) const { return 0; }
uint32_t save(uint8_t* out, const T& t) const { return 0; }
template <class Context>
uint32_t load(const uint8_t* data, T& t, Context& context) {
return 0;
}
};
enum class VecSerStrategy { FlatBuffers, String };
template <class T, VecSerStrategy>
struct VectorRefPreserializer {
VectorRefPreserializer() {}
VectorRefPreserializer(const VectorRefPreserializer<T, VecSerStrategy::FlatBuffers>&) {}
VectorRefPreserializer& operator=(const VectorRefPreserializer<T, VecSerStrategy::FlatBuffers>&) { return *this; }
VectorRefPreserializer(const VectorRefPreserializer<T, VecSerStrategy::String>&) {}
VectorRefPreserializer& operator=(const VectorRefPreserializer<T, VecSerStrategy::String>&) { return *this; }
void invalidate() {}
void add(const T& item) {}
void remove(const T& item) {}
};
2017-05-26 04:48:44 +08:00
template <class T>
struct VectorRefPreserializer<T, VecSerStrategy::String> {
mutable int32_t _cached_size; // -1 means unknown
string_serialized_traits<T> _string_traits;
VectorRefPreserializer() : _cached_size(0) {}
VectorRefPreserializer(const VectorRefPreserializer<T, VecSerStrategy::String>& other)
: _cached_size(other._cached_size) {}
VectorRefPreserializer& operator=(const VectorRefPreserializer<T, VecSerStrategy::String>& other) {
_cached_size = other._cached_size;
return *this;
}
VectorRefPreserializer(const VectorRefPreserializer<T, VecSerStrategy::FlatBuffers>&) : _cached_size(-1) {}
VectorRefPreserializer& operator=(const VectorRefPreserializer<T, VecSerStrategy::FlatBuffers>&) {
_cached_size = -1;
return *this;
}
void invalidate() { _cached_size = -1; }
void add(const T& item) {
if (_cached_size > 0) {
_cached_size += _string_traits.getSize(item);
}
}
void remove(const T& item) {
if (_cached_size > 0) {
_cached_size -= _string_traits.getSize(item);
}
}
};
template <class T, VecSerStrategy SerStrategy = VecSerStrategy::FlatBuffers>
class VectorRef : public ComposedIdentifier<T, 3>, public VectorRefPreserializer<T, SerStrategy> {
using VPS = VectorRefPreserializer<T, SerStrategy>;
friend class VectorRef<T,
SerStrategy == VecSerStrategy::FlatBuffers ? VecSerStrategy::String
: VecSerStrategy::FlatBuffers>;
2017-05-26 04:48:44 +08:00
public:
using value_type = T;
static_assert(SerStrategy == VecSerStrategy::FlatBuffers || string_serialized_traits<T>::value);
// T must be trivially copyable!
// T must be trivially destructible, because ~T is never called
2020-07-04 08:55:57 +08:00
static_assert(std::is_trivially_destructible_v<T>);
2017-05-26 04:48:44 +08:00
VectorRef() : data(0), m_size(0), m_capacity(0) {}
template <VecSerStrategy S>
VectorRef(const VectorRef<T, S>& other)
: VPS(other), data(other.data), m_size(other.m_size), m_capacity(other.m_capacity) {}
template <VecSerStrategy S>
VectorRef& operator=(const VectorRef<T, S>& other) {
*static_cast<VPS*>(this) = other;
data = other.data;
m_size = other.m_size;
m_capacity = other.m_capacity;
return *this;
}
// Arena constructor for non-Ref types, identified by !flow_ref
template <class T2 = T, VecSerStrategy S>
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<!flow_ref<T2>::value, int>::type = 0)
: VPS(toCopy), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()),
m_capacity(toCopy.size()) {
if (m_size > 0) {
std::copy(toCopy.data, toCopy.data + m_size, data);
}
2017-05-26 04:48:44 +08:00
}
// Arena constructor for Ref types, which must have an Arena constructor
template <class T2 = T, VecSerStrategy S>
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<flow_ref<T2>::value, int>::type = 0)
: VPS(), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), m_capacity(toCopy.size()) {
for (int i = 0; i < m_size; i++) {
auto ptr = new (&data[i]) T(p, toCopy[i]);
VPS::add(*ptr);
}
2017-05-26 04:48:44 +08:00
}
2020-07-15 05:33:30 +08:00
template <class It>
VectorRef(Arena& arena, It first, It last) {
if constexpr (flow_ref<T>::value) {
append_deep(arena, first, std::distance(first, last));
} else {
append(arena, first, std::distance(first, last));
}
}
VectorRef(T* data, int size) : data(data), m_size(size), m_capacity(size) {}
VectorRef(T* data, int size, int capacity) : data(data), m_size(size), m_capacity(capacity) {}
// VectorRef( const VectorRef<T>& toCopy ) : data( toCopy.data ), m_size( toCopy.m_size ), m_capacity(
// toCopy.m_capacity ) {} VectorRef<T>& operator=( const VectorRef<T>& );
2017-05-26 04:48:44 +08:00
template <VecSerStrategy S = SerStrategy>
typename std::enable_if<S == VecSerStrategy::String, uint32_t>::type serializedSize() const {
uint32_t result = sizeof(uint32_t);
string_serialized_traits<T> t;
if (VPS::_cached_size >= 0) {
return result + VPS::_cached_size;
}
for (const auto& v : *this) {
result += t.getSize(v);
}
VPS::_cached_size = result - sizeof(uint32_t);
return result;
}
2017-05-26 04:48:44 +08:00
const T* begin() const { return data; }
const T* end() const { return data + m_size; }
T const& front() const { return *begin(); }
T const& back() const { return end()[-1]; }
int size() const { return m_size; }
bool empty() const { return m_size == 0; }
2017-05-26 04:48:44 +08:00
const T& operator[](int i) const { return data[i]; }
2019-09-05 07:44:19 +08:00
// const versions of some VectorRef operators
const T* cbegin() const { return data; }
const T* cend() const { return data + m_size; }
T const& cfront() const { return *begin(); }
T const& cback() const { return end()[-1]; }
std::reverse_iterator<const T*> rbegin() const { return std::reverse_iterator<const T*>(end()); }
std::reverse_iterator<const T*> rend() const { return std::reverse_iterator<const T*>(begin()); }
2017-05-26 04:48:44 +08:00
template <VecSerStrategy S = SerStrategy>
typename std::enable_if<S == VecSerStrategy::FlatBuffers, VectorRef>::type slice(int begin, int end) const {
return VectorRef(data + begin, end - begin);
}
2017-05-26 04:48:44 +08:00
template <VecSerStrategy S>
bool operator==(VectorRef<T, S> const& rhs) const {
if (size() != rhs.size())
return false;
for (int i = 0; i < m_size; i++)
if ((*this)[i] != rhs[i])
return false;
2017-05-26 04:48:44 +08:00
return true;
}
template <VecSerStrategy S>
bool operator!=(VectorRef<T, S> const& rhs) const {
return !(*this == rhs);
}
2017-05-26 04:48:44 +08:00
// Warning: Do not mutate a VectorRef that has previously been copy constructed or assigned,
// since copies will share data
T* begin() {
VPS::invalidate();
return data;
}
T* end() {
VPS::invalidate();
return data + m_size;
}
T& front() {
VPS::invalidate();
return *begin();
}
T& back() {
VPS::invalidate();
return end()[-1];
}
T& operator[](int i) {
VPS::invalidate();
return data[i];
}
void push_back(Arena& p, const T& value) {
if (m_size + 1 > m_capacity)
reallocate(p, m_size + 1);
auto ptr = new (&data[m_size]) T(value);
VPS::add(*ptr);
2017-05-26 04:48:44 +08:00
m_size++;
}
template <class... Us>
T& emplace_back(Arena& p, Us&&... args) {
if (m_size + 1 > m_capacity)
reallocate(p, m_size + 1);
auto ptr = new (&data[m_size]) T(std::forward<Us>(args)...);
VPS::add(*ptr);
m_size++;
return *ptr;
}
2017-05-26 04:48:44 +08:00
// invokes the "Deep copy constructor" T(Arena&, const T&) moving T entirely into arena
void push_back_deep(Arena& p, const T& value) {
if (m_size + 1 > m_capacity)
reallocate(p, m_size + 1);
auto ptr = new (&data[m_size]) T(p, value);
VPS::add(*ptr);
2017-05-26 04:48:44 +08:00
m_size++;
}
// invokes the "Deep copy constructor" T(Arena&, U&&) moving T entirely into arena
template <class... Us>
T& emplace_back_deep(Arena& p, Us&&... args) {
if (m_size + 1 > m_capacity)
reallocate(p, m_size + 1);
auto ptr = new (&data[m_size]) T(p, std::forward<Us>(args)...);
VPS::add(*ptr);
m_size++;
return *ptr;
}
2020-07-15 01:44:16 +08:00
template <class It>
void append(Arena& p, It begin, int count) {
if (m_size + count > m_capacity)
reallocate(p, m_size + count);
VPS::invalidate();
if (count > 0) {
std::copy(begin, begin + count, data + m_size);
}
2017-05-26 04:48:44 +08:00
m_size += count;
}
template <class It>
void append_deep(Arena& p, It begin, int count) {
if (m_size + count > m_capacity)
reallocate(p, m_size + count);
for (int i = 0; i < count; i++) {
auto ptr = new (&data[m_size + i]) T(p, *begin++);
VPS::add(*ptr);
}
2017-05-26 04:48:44 +08:00
m_size += count;
}
void pop_back() {
VPS::remove(back());
m_size--;
}
2017-05-26 04:48:44 +08:00
void pop_front(int count) {
VPS::invalidate();
2017-05-26 04:48:44 +08:00
count = std::min(m_size, count);
data += count;
m_size -= count;
m_capacity -= count;
}
void resize(Arena& p, int size) {
if (size > m_capacity)
reallocate(p, size);
for (int i = m_size; i < size; i++) {
auto ptr = new (&data[i]) T();
VPS::add(*ptr);
}
2017-05-26 04:48:44 +08:00
m_size = size;
}
void reserve(Arena& p, int size) {
if (size > m_capacity)
reallocate(p, size);
2017-05-26 04:48:44 +08:00
}
// expectedSize() for non-Ref types, identified by !flow_ref
template <class T2 = T>
typename std::enable_if<!flow_ref<T2>::value, size_t>::type expectedSize() const {
return sizeof(T) * m_size;
2017-05-26 04:48:44 +08:00
}
// expectedSize() for Ref types, which must in turn have expectedSize() implemented.
template <class T2 = T>
typename std::enable_if<flow_ref<T2>::value, size_t>::type expectedSize() const {
size_t t = sizeof(T) * m_size;
for (int i = 0; i < m_size; i++)
t += data[i].expectedSize();
2017-05-26 04:48:44 +08:00
return t;
}
int capacity() const { return m_capacity; }
2017-05-26 04:48:44 +08:00
void extendUnsafeNoReallocNoInit(int amount) { m_size += amount; }
2020-07-10 01:49:33 +08:00
protected:
2017-05-26 04:48:44 +08:00
T* data;
int m_size, m_capacity;
void reallocate(Arena& p, int requiredCapacity) {
requiredCapacity = std::max(m_capacity * 2, requiredCapacity);
2017-05-26 04:48:44 +08:00
// SOMEDAY: Maybe we are right at the end of the arena and can expand cheaply
T* newData = new (p) T[requiredCapacity];
if (m_size > 0) {
std::move(data, data + m_size, newData);
}
2017-05-26 04:48:44 +08:00
data = newData;
m_capacity = requiredCapacity;
}
};
2020-07-10 01:49:33 +08:00
// This is a VectorRef that optimizes for tiny to small sizes.
// It keeps the first #InlineMembers on the stack - which means
// that all of them are always copied. This should be faster
// when you expect the vector to be usually very small as it
// won't need allocations in these cases.
template <class T, int InlineMembers = 1>
class SmallVectorRef {
2020-07-15 01:44:16 +08:00
static_assert(InlineMembers >= 0);
2020-07-10 01:49:33 +08:00
public:
// types
template <bool isConst>
class iterator_impl {
using self_t = iterator_impl<isConst>;
using VecType = SmallVectorRef<T, InlineMembers>;
std::conditional_t<isConst, const VecType*, VecType*> vec = nullptr;
int idx = 0;
public:
2020-07-15 01:44:16 +08:00
using iterator_category = std::random_access_iterator_tag;
2020-07-10 01:49:33 +08:00
using value_type = std::conditional_t<isConst, const T, T>;
using difference_type = int;
using pointer = value_type*;
using reference = value_type&;
friend class SmallVectorRef<T, InlineMembers>;
2020-07-15 05:33:30 +08:00
friend bool operator<(const self_t& lhs, const self_t& rhs) {
ASSERT(lhs.vec == rhs.vec);
return lhs.idx < rhs.idx;
}
friend bool operator>(const self_t& lhs, const self_t& rhs) {
ASSERT(lhs.vec == rhs.vec);
return lhs.idx > rhs.idx;
}
friend bool operator<=(const self_t& lhs, const self_t& rhs) {
ASSERT(lhs.vec == rhs.vec);
return lhs.idx <= rhs.idx;
}
friend bool operator>=(const self_t& lhs, const self_t& rhs) {
ASSERT(lhs.vec == rhs.vec);
return lhs.idx >= rhs.idx;
}
friend self_t operator+(const self_t& lhs, difference_type diff) {
auto res = lhs;
res.idx += diff;
return res;
}
friend self_t operator+(difference_type diff, const self_t& lhs) {
auto res = lhs;
res.idx += diff;
return res;
}
friend self_t operator-(const self_t& lhs, difference_type diff) {
auto res = lhs;
res.idx -= diff;
return res;
}
friend difference_type operator-(const self_t& lhs, const self_t& rhs) {
ASSERT(lhs.vec == rhs.vec);
return lhs.idx - rhs.idx;
}
2020-07-10 01:49:33 +08:00
self_t& operator++() {
++idx;
return *this;
}
self_t operator++(int) {
auto res = *this;
++(*this);
2020-07-10 01:49:33 +08:00
return res;
}
self_t& operator--() {
--idx;
return *this;
}
self_t operator--(int) {
auto res = *this;
--(*this);
2020-07-10 01:49:33 +08:00
return res;
}
self_t& operator+=(difference_type diff) {
idx += diff;
return *this;
}
self_t& operator-=(difference_type diff) {
idx -= diff;
return *this;
}
bool operator!=(self_t const& o) const { return vec != o.vec || idx != o.idx; }
bool operator==(self_t const& o) const { return vec == o.vec && idx == o.idx; }
reference operator[](difference_type i) const { return get(idx + i); }
reference& get(int i) const {
if (i < InlineMembers) {
return vec->arr[i];
} else {
return vec->data[i - InlineMembers];
2020-07-10 01:49:33 +08:00
}
}
reference get() const { return get(idx); }
reference operator*() const { return get(); }
pointer operator->() const { return &get(); }
};
using const_iterator = iterator_impl<true>;
using iterator = iterator_impl<false>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
public: // Construction
2020-07-10 07:20:55 +08:00
static_assert(std::is_trivially_destructible_v<T>);
2020-07-10 01:49:33 +08:00
SmallVectorRef() {}
SmallVectorRef(const SmallVectorRef<T, InlineMembers>& other)
: m_size(other.m_size), arr(other.arr), data(other.data) {}
2020-07-10 01:49:33 +08:00
SmallVectorRef& operator=(const SmallVectorRef<T, InlineMembers>& other) {
m_size = other.m_size;
arr = other.arr;
data = other.data;
return *this;
}
template <class T2 = T, int IM = InlineMembers>
SmallVectorRef(Arena& arena,
const SmallVectorRef<T, IM>& toCopy,
2020-07-15 05:33:30 +08:00
typename std::enable_if<!flow_ref<T2>::value, int>::type = 0)
: m_size(0) {
append(arena, toCopy.begin(), toCopy.size());
2020-07-10 01:49:33 +08:00
}
template <class T2 = T, int IM = InlineMembers>
SmallVectorRef(Arena& arena,
const SmallVectorRef<T2, IM>& toCopy,
2020-07-10 01:49:33 +08:00
typename std::enable_if<flow_ref<T2>::value, int>::type = 0)
2020-07-15 05:33:30 +08:00
: m_size(0) {
append_deep(arena, toCopy.begin(), toCopy.size());
2020-07-10 01:49:33 +08:00
}
template <class It>
2020-07-15 05:33:30 +08:00
SmallVectorRef(Arena& arena, It first, It last) : m_size(0) {
if constexpr (flow_ref<T>::value) {
append_deep(arena, first, std::distance(first, last));
} else {
append(arena, first, std::distance(first, last));
2020-07-10 01:49:33 +08:00
}
}
public: // information
int size() const { return m_size; }
int capacity() const { return InlineMembers + data.capacity(); }
2020-07-10 01:49:33 +08:00
bool empty() const { return m_size == 0; }
public: // element access
T const& front() const { return *cbegin(); }
T const& back() const { return *crbegin(); }
T& front() { return *begin(); }
T& back() { return *rbegin(); }
T const& operator[](int i) const {
if (i < InlineMembers) {
return arr[i];
} else {
return data[i - InlineMembers];
}
}
public: // Modification
void push_back(Arena& arena, T const& value) {
if (m_size < InlineMembers) {
new (&arr[m_size++]) T(value);
return;
}
++m_size;
data.push_back(arena, value);
2020-07-10 01:49:33 +08:00
}
void push_back_deep(Arena& arena, T const& value) {
if (m_size < InlineMembers) {
new (&arr[m_size++]) T(arena, value);
return;
}
++m_size;
data.push_back_deep(arena, value);
2020-07-10 01:49:33 +08:00
}
void pop_back() { --m_size; }
2020-07-10 01:49:33 +08:00
template <class It>
2020-07-15 01:44:16 +08:00
void append(Arena& arena, It first, int count) {
ASSERT(count >= 0);
while (count > 0 && m_size < InlineMembers) {
2020-07-10 01:49:33 +08:00
new (&(arr[m_size++])) T(*(first++));
2020-07-15 01:44:16 +08:00
--count;
2020-07-10 01:49:33 +08:00
}
2020-07-15 01:44:16 +08:00
data.append(arena, first, count);
m_size += count;
2020-07-10 01:49:33 +08:00
}
template <class It>
2020-07-15 01:44:16 +08:00
void append_deep(Arena& arena, It first, int count) {
ASSERT(count >= 0);
while (count > 0 && m_size < InlineMembers) {
2020-07-28 05:11:50 +08:00
new (&(arr[m_size++])) T(arena, *(first++));
2020-07-15 01:44:16 +08:00
--count;
2020-07-10 01:49:33 +08:00
}
2020-07-15 01:44:16 +08:00
data.append_deep(arena, first, count);
m_size += count;
2020-07-10 01:49:33 +08:00
}
public: // iterator access
iterator begin() {
iterator res;
res.vec = this;
res.idx = 0;
return res;
}
const_iterator cbegin() const {
const_iterator res;
res.vec = this;
res.idx = 0;
return res;
}
const_iterator begin() const { return cbegin(); }
iterator end() {
iterator res;
res.vec = this;
res.idx = m_size;
return res;
}
const_iterator cend() const {
const_iterator res;
res.vec = this;
res.idx = m_size;
return res;
}
const_iterator end() const { return cend(); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
const_reverse_iterator rbegin() const { return crbegin(); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator crend() const { return const_reverse_iterator(begin()); }
const_reverse_iterator rend() const { return crend(); }
private:
int m_size = 0;
2020-07-10 01:49:33 +08:00
std::array<T, InlineMembers> arr;
VectorRef<T> data;
2020-07-10 01:49:33 +08:00
};
template <class T>
struct Traceable<VectorRef<T>> {
constexpr static bool value = Traceable<T>::value;
static std::string toString(const VectorRef<T>& value) {
std::stringstream ss;
bool first = true;
for (const auto& v : value) {
if (first) {
first = false;
} else {
ss << ' ';
}
ss << Traceable<T>::toString(v);
}
return ss.str();
}
};
template <class Archive, class T, VecSerStrategy S>
inline void load(Archive& ar, VectorRef<T, S>& value) {
2017-05-26 04:48:44 +08:00
// FIXME: range checking for length, here and in other serialize code
uint32_t length;
ar >> length;
UNSTOPPABLE_ASSERT(length * sizeof(T) < (100 << 20));
2017-05-26 04:48:44 +08:00
// SOMEDAY: Can we avoid running constructors for all the values?
value.resize(ar.arena(), length);
for (uint32_t i = 0; i < length; i++)
2017-05-26 04:48:44 +08:00
ar >> value[i];
}
template <class Archive, class T, VecSerStrategy S>
inline void save(Archive& ar, const VectorRef<T, S>& value) {
2017-05-26 04:48:44 +08:00
uint32_t length = value.size();
ar << length;
for (uint32_t i = 0; i < length; i++)
2017-05-26 04:48:44 +08:00
ar << value[i];
}
template <class T>
struct vector_like_traits<VectorRef<T, VecSerStrategy::FlatBuffers>> : std::true_type {
using Vec = VectorRef<T>;
using value_type = typename Vec::value_type;
using iterator = const T*;
using insert_iterator = T*;
template <class Context>
static size_t num_entries(const VectorRef<T>& v, Context&) {
return v.size();
}
template <class Context>
static void reserve(VectorRef<T>& v, size_t s, Context& context) {
v.resize(context.arena(), s);
}
template <class Context>
static insert_iterator insert(Vec& v, Context&) {
return v.begin();
}
template <class Context>
static iterator begin(const Vec& v, Context&) {
return v.begin();
}
};
template <class V>
struct dynamic_size_traits<VectorRef<V, VecSerStrategy::String>> : std::true_type {
using T = VectorRef<V, VecSerStrategy::String>;
// May be called multiple times during one serialization
template <class Context>
static size_t size(const T& t, Context&) {
return t.serializedSize();
}
// Guaranteed to be called only once during serialization
template <class Context>
static void save(uint8_t* out, const T& t, Context&) {
string_serialized_traits<V> traits;
auto* p = out;
uint32_t length = t.size();
*reinterpret_cast<decltype(length)*>(out) = length;
out += sizeof(length);
for (const auto& item : t) {
out += traits.save(out, item);
}
2019-07-11 02:35:52 +08:00
ASSERT(out - p == t._cached_size + sizeof(uint32_t));
}
// Context is an arbitrary type that is plumbed by reference throughout the
// load call tree.
template <class Context>
static void load(const uint8_t* data, size_t size, T& t, Context& context) {
string_serialized_traits<V> traits;
auto* p = data;
uint32_t num_elements;
memcpy(&num_elements, data, sizeof(num_elements));
data += sizeof(num_elements);
t.resize(context.arena(), num_elements);
2019-08-01 08:01:23 +08:00
for (unsigned i = 0; i < num_elements; ++i) {
data += traits.load(data, t[i], context);
}
ASSERT(data - p == size);
t._cached_size = size - sizeof(uint32_t);
}
};
2017-05-26 04:48:44 +08:00
#endif