Unrevert "Enable -Wclass-memaccess and fix warnings"
This commit is contained in:
parent
939a62449f
commit
88fa9bdb63
|
@ -243,6 +243,7 @@ else()
|
|||
-fvisibility=hidden
|
||||
-Wreturn-type
|
||||
-fPIC)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wclass-memaccess>)
|
||||
if (GPERFTOOLS_FOUND AND GCC)
|
||||
add_compile_options(
|
||||
-fno-builtin-malloc
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/flow.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
|
@ -77,6 +78,10 @@ struct Tag {
|
|||
serializer(ar, locality, id);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct non_flow_ref<Tag> : std::integral_constant<bool, true> {};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
template <class Ar> void load( Ar& ar, Tag& tag ) { tag.serialize_unversioned(ar); }
|
||||
|
|
|
@ -1013,7 +1013,7 @@ private:
|
|||
ASSERT( nextPageSeq%sizeof(Page)==0 );
|
||||
|
||||
auto& p = backPage();
|
||||
memset(&p, 0, sizeof(Page)); // FIXME: unnecessary?
|
||||
memset(static_cast<void*>(&p), 0, sizeof(Page)); // FIXME: unnecessary?
|
||||
p.magic = 0xFDB;
|
||||
switch (diskQueueVersion) {
|
||||
case DiskQueueVersion::V0:
|
||||
|
|
|
@ -531,7 +531,7 @@ static int asyncOpen(
|
|||
if (flags & SQLITE_OPEN_WAL) oflags |= IAsyncFile::OPEN_LARGE_PAGES;
|
||||
oflags |= IAsyncFile::OPEN_LOCK;
|
||||
|
||||
memset(p, 0, sizeof(VFSAsyncFile));
|
||||
memset(static_cast<void*>(p), 0, sizeof(VFSAsyncFile));
|
||||
new (p) VFSAsyncFile(zName, flags);
|
||||
try {
|
||||
// Note that SQLiteDB::open also opens the db file, so its flags and modes are important, too
|
||||
|
|
35
flow/Arena.h
35
flow/Arena.h
|
@ -699,15 +699,18 @@ inline bool operator != (const StringRef& lhs, const StringRef& rhs ) { return !
|
|||
inline bool operator <= ( const StringRef& lhs, const StringRef& rhs ) { return !(lhs>rhs); }
|
||||
inline bool operator >= ( const StringRef& lhs, const StringRef& rhs ) { return !(lhs<rhs); }
|
||||
|
||||
// This trait is used by VectorRef to determine if it should just memcpy the vector contents.
|
||||
// FIXME: VectorRef really should use std::is_trivially_copyable for this BUT that is not implemented
|
||||
// in gcc c++0x so instead we will use this custom trait which defaults to std::is_trivial, which
|
||||
// handles most situations but others will have to be specialized.
|
||||
// This trait is used by VectorRef to determine if deep copy constructor should recursively
|
||||
// call deep copies of each element.
|
||||
// TODO: There should be an easier way to identify the difference between
|
||||
// flow_ref and non-flow_ref types.
|
||||
template <typename T>
|
||||
struct memcpy_able : std::is_trivial<T> {};
|
||||
struct non_flow_ref : std::is_fundamental<T> {};
|
||||
|
||||
template <>
|
||||
struct memcpy_able<UID> : std::integral_constant<bool, true> {};
|
||||
struct non_flow_ref<UID> : std::integral_constant<bool, true> {};
|
||||
|
||||
template <class A, class B>
|
||||
struct non_flow_ref<std::pair<A, B>> : std::integral_constant<bool, true> {};
|
||||
|
||||
template<class T>
|
||||
struct string_serialized_traits : std::false_type {
|
||||
|
@ -783,7 +786,7 @@ public:
|
|||
using value_type = T;
|
||||
static_assert(SerStrategy == VecSerStrategy::FlatBuffers || string_serialized_traits<T>::value);
|
||||
|
||||
// T must be trivially destructible (and copyable)!
|
||||
// T must be trivially destructible!
|
||||
VectorRef() : data(0), m_size(0), m_capacity(0) {}
|
||||
|
||||
template <VecSerStrategy S>
|
||||
|
@ -798,19 +801,19 @@ public:
|
|||
return *this;
|
||||
}
|
||||
|
||||
// Arena constructor for non-Ref types, identified by memcpy_able
|
||||
// Arena constructor for non-Ref types, identified by non_flow_ref
|
||||
template <class T2 = T, VecSerStrategy S>
|
||||
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<memcpy_able<T2>::value, int>::type = 0)
|
||||
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<non_flow_ref<T2>::value, int>::type = 0)
|
||||
: VPS(toCopy), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()),
|
||||
m_capacity(toCopy.size()) {
|
||||
if (m_size > 0) {
|
||||
memcpy(data, toCopy.data, m_size * sizeof(T));
|
||||
std::copy(toCopy.data, toCopy.data + m_size, data);
|
||||
}
|
||||
}
|
||||
|
||||
// Arena constructor for Ref types, which must have an Arena constructor
|
||||
template <class T2 = T, VecSerStrategy S>
|
||||
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<!memcpy_able<T2>::value, int>::type = 0)
|
||||
VectorRef(Arena& p, const VectorRef<T, S>& toCopy, typename std::enable_if<!non_flow_ref<T2>::value, int>::type = 0)
|
||||
: VPS(), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), m_capacity(toCopy.size()) {
|
||||
for (int i = 0; i < m_size; i++) {
|
||||
auto ptr = new (&data[i]) T(p, toCopy[i]);
|
||||
|
@ -900,7 +903,7 @@ public:
|
|||
if (m_size + count > m_capacity) reallocate(p, m_size + count);
|
||||
VPS::invalidate();
|
||||
if (count > 0) {
|
||||
memcpy(data + m_size, begin, sizeof(T) * count);
|
||||
std::copy(begin, begin + count, data + m_size);
|
||||
}
|
||||
m_size += count;
|
||||
}
|
||||
|
@ -940,15 +943,15 @@ public:
|
|||
if (size > m_capacity) reallocate(p, size);
|
||||
}
|
||||
|
||||
// expectedSize() for non-Ref types, identified by memcpy_able
|
||||
// expectedSize() for non-Ref types, identified by non_flow_ref
|
||||
template <class T2 = T>
|
||||
typename std::enable_if<memcpy_able<T2>::value, size_t>::type expectedSize() const {
|
||||
typename std::enable_if<non_flow_ref<T2>::value, size_t>::type expectedSize() const {
|
||||
return sizeof(T) * m_size;
|
||||
}
|
||||
|
||||
// expectedSize() for Ref types, which must in turn have expectedSize() implemented.
|
||||
template <class T2 = T>
|
||||
typename std::enable_if<!memcpy_able<T2>::value, size_t>::type expectedSize() const {
|
||||
typename std::enable_if<!non_flow_ref<T2>::value, size_t>::type expectedSize() const {
|
||||
size_t t = sizeof(T) * m_size;
|
||||
for (int i = 0; i < m_size; i++) t += data[i].expectedSize();
|
||||
return t;
|
||||
|
@ -967,7 +970,7 @@ private:
|
|||
// SOMEDAY: Maybe we are right at the end of the arena and can expand cheaply
|
||||
T* newData = (T*)new (p) uint8_t[requiredCapacity * sizeof(T)];
|
||||
if (m_size > 0) {
|
||||
memcpy(newData, data, m_size * sizeof(T));
|
||||
std::move(data, data + m_size, newData);
|
||||
}
|
||||
data = newData;
|
||||
m_capacity = requiredCapacity;
|
||||
|
|
Loading…
Reference in New Issue