foundationdb/flow/ThreadPrimitives.h

159 lines
3.5 KiB
C
Raw Normal View History

2017-05-26 04:48:44 +08:00
/*
* ThreadPrimitives.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
2017-05-26 04:48:44 +08:00
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
2017-05-26 04:48:44 +08:00
* http://www.apache.org/licenses/LICENSE-2.0
*
2017-05-26 04:48:44 +08:00
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOW_THREADPRIMITIVES_H
#define FLOW_THREADPRIMITIVES_H
#pragma once
#include <atomic>
#include <array>
#include "flow/Error.h"
#include "flow/Trace.h"
2017-05-26 04:48:44 +08:00
2020-02-02 05:23:53 +08:00
#if defined(__linux__) || defined(__FreeBSD__)
2017-05-26 04:48:44 +08:00
#include <semaphore.h>
#endif
#ifdef __APPLE__
#include <mach/mach_init.h>
#include <mach/task.h>
#include <mach/semaphore.h>
#include <mach/sync_policy.h>
#include <mach/mach_error.h>
#include <mach/clock_types.h>
#endif
#if VALGRIND
#include <drd.h>
#endif
// TODO: We should make this dependent on the CPU. Maybe cmake
// can set this variable properly?
constexpr size_t MAX_CACHE_LINE_SIZE = 64;
class alignas(MAX_CACHE_LINE_SIZE) ThreadSpinLock {
2017-05-26 04:48:44 +08:00
public:
// #ifdef _WIN32
ThreadSpinLock() {
2017-05-26 04:48:44 +08:00
#if VALGRIND
ANNOTATE_RWLOCK_CREATE(this);
#endif
}
~ThreadSpinLock() {
#if VALGRIND
ANNOTATE_RWLOCK_DESTROY(this);
#endif
}
void enter() {
2020-04-15 10:22:37 +08:00
while (isLocked.test_and_set(std::memory_order_acquire))
#ifndef __aarch64__
_mm_pause();
#else
; /* spin */
#endif
2017-05-26 04:48:44 +08:00
#if VALGRIND
ANNOTATE_RWLOCK_ACQUIRED(this, true);
#endif
}
void leave() {
isLocked.clear(std::memory_order_release);
2017-05-26 04:48:44 +08:00
#if VALGRIND
ANNOTATE_RWLOCK_RELEASED(this, true);
#endif
}
void assertNotEntered() {
ASSERT(!isLocked.test_and_set(std::memory_order_acquire));
isLocked.clear(std::memory_order_release);
2017-05-26 04:48:44 +08:00
}
2017-05-26 04:48:44 +08:00
private:
ThreadSpinLock(const ThreadSpinLock&);
void operator=(const ThreadSpinLock&);
std::atomic_flag isLocked = ATOMIC_FLAG_INIT;
// We want a spin lock to occupy a cache line in order to
// prevent false sharing.
std::array<uint8_t, MAX_CACHE_LINE_SIZE - sizeof(isLocked)> padding;
2017-05-26 04:48:44 +08:00
};
class ThreadSpinLockHolder {
ThreadSpinLock& lock;
public:
ThreadSpinLockHolder( ThreadSpinLock& lock ) : lock(lock) { lock.enter(); }
~ThreadSpinLockHolder() { lock.leave(); }
};
class ThreadUnsafeSpinLock { public: void enter(){}; void leave(){}; void assertNotEntered(){}; };
class ThreadUnsafeSpinLockHolder { public: ThreadUnsafeSpinLockHolder(ThreadUnsafeSpinLock&){}; };
#if FLOW_THREAD_SAFE
typedef ThreadSpinLock SpinLock;
typedef ThreadSpinLockHolder SpinLockHolder;
#else
typedef ThreadUnsafeSpinLock SpinLock;
typedef ThreadUnsafeSpinLockHolder SpinLockHolder;
#endif
class Event {
public:
Event();
~Event();
void set();
void block();
private:
#ifdef _WIN32
void* ev;
2020-02-02 05:23:53 +08:00
#elif defined(__linux__) || defined(__FreeBSD__)
2017-05-26 04:48:44 +08:00
sem_t sem;
#elif defined(__APPLE__)
mach_port_t self;
semaphore_t sem;
#else
#error Port me!
#endif
};
class Mutex
{
// A re-entrant process-local blocking lock (e.g. CRITICAL_SECTION on Windows)
// Thread safe even if !FLOW_THREAD_SAFE
public:
Mutex();
~Mutex();
void enter();
void leave();
private:
void* impl;
};
class MutexHolder {
Mutex& lock;
public:
MutexHolder( Mutex& lock ) : lock(lock) { lock.enter(); }
~MutexHolder() { lock.leave(); }
};
#endif