Prevent main thread from destroying flatbuffers globals
We recently witnessed (using tsan) the main thread exiting without first joining the network thread, and this caused data races and heap-use-after-free's Now the lifetime of these globals will be tied to the network thread itself (and I guess every thread, but the one that actually uses memory will be owned by the network thread.)
This commit is contained in:
parent
b5d7780293
commit
cb6389d42d
|
@ -31,10 +31,12 @@
|
||||||
namespace detail {
|
namespace detail {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
std::vector<int> mWriteToOffsetsMemoy;
|
thread_local std::vector<int> gWriteToOffsetsMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int>* writeToOffsetsMemory = &mWriteToOffsetsMemoy;
|
void swapWithThreadLocalGlobal(std::vector<int>& writeToOffsets) {
|
||||||
|
gWriteToOffsetsMemory.swap(writeToOffsets);
|
||||||
|
}
|
||||||
|
|
||||||
VTable generate_vtable(size_t numMembers, const std::vector<unsigned>& sizesAlignments) {
|
VTable generate_vtable(size_t numMembers, const std::vector<unsigned>& sizesAlignments) {
|
||||||
if (numMembers == 0) {
|
if (numMembers == 0) {
|
||||||
|
|
|
@ -343,15 +343,16 @@ struct _SizeOf {
|
||||||
static constexpr unsigned int align = fb_align<T>;
|
static constexpr unsigned int align = fb_align<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern std::vector<int>* writeToOffsetsMemory;
|
// Re-use this intermediate memory to avoid frequent new/delete
|
||||||
|
void swapWithThreadLocalGlobal(std::vector<int>& writeToOffsets);
|
||||||
|
|
||||||
template <class Context>
|
template <class Context>
|
||||||
struct PrecomputeSize : Context {
|
struct PrecomputeSize : Context {
|
||||||
PrecomputeSize(const Context& context) : Context(context) {
|
PrecomputeSize(const Context& context) : Context(context) {
|
||||||
writeToOffsets.swap(*writeToOffsetsMemory);
|
swapWithThreadLocalGlobal(writeToOffsets);
|
||||||
writeToOffsets.clear();
|
writeToOffsets.clear();
|
||||||
}
|
}
|
||||||
~PrecomputeSize() { writeToOffsets.swap(*writeToOffsetsMemory); }
|
~PrecomputeSize() { swapWithThreadLocalGlobal(writeToOffsets); }
|
||||||
// |offset| is measured from the end of the buffer. Precondition: len <=
|
// |offset| is measured from the end of the buffer. Precondition: len <=
|
||||||
// offset.
|
// offset.
|
||||||
void write(const void*, int offset, int /*len*/) { current_buffer_size = std::max(current_buffer_size, offset); }
|
void write(const void*, int offset, int /*len*/) { current_buffer_size = std::max(current_buffer_size, offset); }
|
||||||
|
@ -491,7 +492,7 @@ extern VTable generate_vtable(size_t numMembers, const std::vector<unsigned>& si
|
||||||
|
|
||||||
template <unsigned... MembersAndAlignments>
|
template <unsigned... MembersAndAlignments>
|
||||||
const VTable* gen_vtable3() {
|
const VTable* gen_vtable3() {
|
||||||
static VTable table =
|
static thread_local VTable table =
|
||||||
generate_vtable(sizeof...(MembersAndAlignments) / 2, std::vector<unsigned>{ MembersAndAlignments... });
|
generate_vtable(sizeof...(MembersAndAlignments) / 2, std::vector<unsigned>{ MembersAndAlignments... });
|
||||||
return &table;
|
return &table;
|
||||||
}
|
}
|
||||||
|
@ -619,7 +620,7 @@ VTableSet get_vtableset_impl(const Root& root, const Context& context) {
|
||||||
|
|
||||||
template <class Root, class Context>
|
template <class Root, class Context>
|
||||||
const VTableSet* get_vtableset(const Root& root, const Context& context) {
|
const VTableSet* get_vtableset(const Root& root, const Context& context) {
|
||||||
static VTableSet result = get_vtableset_impl(root, context);
|
static thread_local VTableSet result = get_vtableset_impl(root, context);
|
||||||
return &result;
|
return &result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue