2019-05-15 06:03:48 +08:00
|
|
|
//===- StorageUniquer.cpp - Common Storage Class Uniquer ------------------===//
|
2019-04-26 12:01:21 +08:00
|
|
|
//
|
2020-01-26 11:58:30 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-24 01:35:36 +08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2019-04-26 12:01:21 +08:00
|
|
|
//
|
2019-12-24 01:35:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-04-26 12:01:21 +08:00
|
|
|
|
|
|
|
#include "mlir/Support/StorageUniquer.h"
|
2019-10-20 03:10:34 +08:00
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
#include "mlir/Support/LLVM.h"
|
2020-08-08 04:29:36 +08:00
|
|
|
#include "mlir/Support/ThreadLocalCache.h"
|
2020-08-08 04:29:11 +08:00
|
|
|
#include "mlir/Support/TypeID.h"
|
2019-04-26 12:01:21 +08:00
|
|
|
#include "llvm/Support/RWMutex.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::detail;
|
|
|
|
|
2020-08-08 04:29:11 +08:00
|
|
|
namespace {
|
|
|
|
/// This class represents a uniquer for storage instances of a specific type. It
|
|
|
|
/// contains all of the necessary data to unique storage instances in a thread
|
|
|
|
/// safe way. This allows for the main uniquer to bucket each of the individual
|
|
|
|
/// sub-types removing the need to lock the main uniquer itself.
|
|
|
|
struct InstSpecificUniquer {
|
2019-04-26 12:01:21 +08:00
|
|
|
using BaseStorage = StorageUniquer::BaseStorage;
|
|
|
|
using StorageAllocator = StorageUniquer::StorageAllocator;
|
|
|
|
|
|
|
|
/// A lookup key for derived instances of storage objects.
|
|
|
|
struct LookupKey {
|
|
|
|
/// The known derived kind for the storage.
|
|
|
|
unsigned kind;
|
|
|
|
|
|
|
|
/// The known hash value of the key.
|
|
|
|
unsigned hashValue;
|
|
|
|
|
|
|
|
/// An equality function for comparing with an existing storage instance.
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<bool(const BaseStorage *)> isEqual;
|
2019-04-26 12:01:21 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/// A utility wrapper object representing a hashed storage object. This class
|
|
|
|
/// contains a storage object and an existing computed hash value.
|
|
|
|
struct HashedStorage {
|
2020-08-08 04:29:36 +08:00
|
|
|
HashedStorage(unsigned hashValue = 0, BaseStorage *storage = nullptr)
|
|
|
|
: hashValue(hashValue), storage(storage) {}
|
2019-04-26 12:01:21 +08:00
|
|
|
unsigned hashValue;
|
|
|
|
BaseStorage *storage;
|
|
|
|
};
|
|
|
|
|
2020-08-08 04:29:11 +08:00
|
|
|
/// Storage info for derived TypeStorage objects.
|
|
|
|
struct StorageKeyInfo : DenseMapInfo<HashedStorage> {
|
|
|
|
static HashedStorage getEmptyKey() {
|
2020-08-08 04:29:36 +08:00
|
|
|
return HashedStorage(0, DenseMapInfo<BaseStorage *>::getEmptyKey());
|
2020-08-08 04:29:11 +08:00
|
|
|
}
|
|
|
|
static HashedStorage getTombstoneKey() {
|
2020-08-08 04:29:36 +08:00
|
|
|
return HashedStorage(0, DenseMapInfo<BaseStorage *>::getTombstoneKey());
|
2020-08-08 04:29:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getHashValue(const HashedStorage &key) {
|
|
|
|
return key.hashValue;
|
|
|
|
}
|
|
|
|
static unsigned getHashValue(LookupKey key) { return key.hashValue; }
|
|
|
|
|
|
|
|
static bool isEqual(const HashedStorage &lhs, const HashedStorage &rhs) {
|
|
|
|
return lhs.storage == rhs.storage;
|
|
|
|
}
|
|
|
|
static bool isEqual(const LookupKey &lhs, const HashedStorage &rhs) {
|
|
|
|
if (isEqual(rhs, getEmptyKey()) || isEqual(rhs, getTombstoneKey()))
|
|
|
|
return false;
|
|
|
|
// If the lookup kind matches the kind of the storage, then invoke the
|
|
|
|
// equality function on the lookup key.
|
|
|
|
return lhs.kind == rhs.storage->getKind() && lhs.isEqual(rhs.storage);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Unique types with specific hashing or storage constraints.
|
|
|
|
using StorageTypeSet = DenseSet<HashedStorage, StorageKeyInfo>;
|
|
|
|
StorageTypeSet complexInstances;
|
|
|
|
|
|
|
|
/// Instances of this storage object.
|
|
|
|
llvm::SmallDenseMap<unsigned, BaseStorage *, 1> simpleInstances;
|
|
|
|
|
|
|
|
/// Allocator to use when constructing derived instances.
|
|
|
|
StorageAllocator allocator;
|
|
|
|
|
|
|
|
/// A mutex to keep type uniquing thread-safe.
|
|
|
|
llvm::sys::SmartRWMutex<true> mutex;
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
namespace mlir {
|
|
|
|
namespace detail {
|
|
|
|
/// This is the implementation of the StorageUniquer class.
|
|
|
|
struct StorageUniquerImpl {
|
|
|
|
using BaseStorage = StorageUniquer::BaseStorage;
|
|
|
|
using StorageAllocator = StorageUniquer::StorageAllocator;
|
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
/// Get or create an instance of a complex derived type.
|
|
|
|
BaseStorage *
|
2020-08-08 04:29:11 +08:00
|
|
|
getOrCreate(TypeID id, unsigned kind, unsigned hashValue,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<bool(const BaseStorage *)> isEqual,
|
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
assert(instUniquers.count(id) && "creating unregistered storage instance");
|
|
|
|
InstSpecificUniquer::LookupKey lookupKey{kind, hashValue, isEqual};
|
|
|
|
InstSpecificUniquer &storageUniquer = *instUniquers[id];
|
2020-05-03 03:28:57 +08:00
|
|
|
if (!threadingIsEnabled)
|
2020-08-08 04:29:11 +08:00
|
|
|
return getOrCreateUnsafe(storageUniquer, kind, lookupKey, ctorFn);
|
2019-04-26 12:01:21 +08:00
|
|
|
|
2020-08-08 04:29:36 +08:00
|
|
|
// Check for a instance of this object in the local cache.
|
|
|
|
auto localIt = complexStorageLocalCache->insert_as(
|
|
|
|
InstSpecificUniquer::HashedStorage(lookupKey.hashValue), lookupKey);
|
|
|
|
BaseStorage *&localInst = localIt.first->storage;
|
|
|
|
if (localInst)
|
|
|
|
return localInst;
|
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
// Check for an existing instance in read-only mode.
|
|
|
|
{
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedReader<true> typeLock(storageUniquer.mutex);
|
|
|
|
auto it = storageUniquer.complexInstances.find_as(lookupKey);
|
|
|
|
if (it != storageUniquer.complexInstances.end())
|
2020-08-08 04:29:36 +08:00
|
|
|
return localInst = it->storage;
|
2019-04-26 12:01:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Acquire a writer-lock so that we can safely create the new type instance.
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedWriter<true> typeLock(storageUniquer.mutex);
|
2020-08-08 04:29:36 +08:00
|
|
|
return localInst =
|
|
|
|
getOrCreateUnsafe(storageUniquer, kind, lookupKey, ctorFn);
|
2020-05-03 03:28:57 +08:00
|
|
|
}
|
2020-08-08 04:29:11 +08:00
|
|
|
/// Get or create an instance of a complex derived type in an thread-unsafe
|
|
|
|
/// fashion.
|
2020-05-03 03:28:57 +08:00
|
|
|
BaseStorage *
|
2020-08-08 04:29:11 +08:00
|
|
|
getOrCreateUnsafe(InstSpecificUniquer &storageUniquer, unsigned kind,
|
2020-08-08 04:29:36 +08:00
|
|
|
InstSpecificUniquer::LookupKey &key,
|
2020-05-03 03:28:57 +08:00
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) {
|
2020-08-08 04:29:36 +08:00
|
|
|
auto existing =
|
|
|
|
storageUniquer.complexInstances.insert_as({key.hashValue}, key);
|
2019-04-26 12:01:21 +08:00
|
|
|
if (!existing.second)
|
|
|
|
return existing.first->storage;
|
|
|
|
|
|
|
|
// Otherwise, construct and initialize the derived storage for this type
|
|
|
|
// instance.
|
2020-08-08 04:29:11 +08:00
|
|
|
BaseStorage *storage =
|
|
|
|
initializeStorage(kind, storageUniquer.allocator, ctorFn);
|
2020-08-08 04:29:36 +08:00
|
|
|
return existing.first->storage = storage;
|
2019-04-26 12:01:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get or create an instance of a simple derived type.
|
|
|
|
BaseStorage *
|
2020-08-08 04:29:11 +08:00
|
|
|
getOrCreate(TypeID id, unsigned kind,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
assert(instUniquers.count(id) && "creating unregistered storage instance");
|
|
|
|
InstSpecificUniquer &storageUniquer = *instUniquers[id];
|
2020-05-03 03:28:57 +08:00
|
|
|
if (!threadingIsEnabled)
|
2020-08-08 04:29:11 +08:00
|
|
|
return getOrCreateUnsafe(storageUniquer, kind, ctorFn);
|
2020-05-03 03:28:57 +08:00
|
|
|
|
2020-08-08 04:29:36 +08:00
|
|
|
// Check for a instance of this object in the local cache.
|
|
|
|
BaseStorage *&localInst = (*simpleStorageLocalCache)[kind];
|
|
|
|
if (localInst)
|
|
|
|
return localInst;
|
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
// Check for an existing instance in read-only mode.
|
|
|
|
{
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedReader<true> typeLock(storageUniquer.mutex);
|
|
|
|
auto it = storageUniquer.simpleInstances.find(kind);
|
|
|
|
if (it != storageUniquer.simpleInstances.end())
|
2019-04-26 12:01:21 +08:00
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Acquire a writer-lock so that we can safely create the new type instance.
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedWriter<true> typeLock(storageUniquer.mutex);
|
2020-08-08 04:29:36 +08:00
|
|
|
return localInst = getOrCreateUnsafe(storageUniquer, kind, ctorFn);
|
2020-05-03 03:28:57 +08:00
|
|
|
}
|
2020-08-08 04:29:11 +08:00
|
|
|
/// Get or create an instance of a simple derived type in an thread-unsafe
|
|
|
|
/// fashion.
|
2020-05-03 03:28:57 +08:00
|
|
|
BaseStorage *
|
2020-08-08 04:29:11 +08:00
|
|
|
getOrCreateUnsafe(InstSpecificUniquer &storageUniquer, unsigned kind,
|
2020-05-03 03:28:57 +08:00
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
auto &result = storageUniquer.simpleInstances[kind];
|
2019-04-26 12:01:21 +08:00
|
|
|
if (result)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
// Otherwise, create and return a new storage instance.
|
2020-08-08 04:29:11 +08:00
|
|
|
return result = initializeStorage(kind, storageUniquer.allocator, ctorFn);
|
2019-04-26 12:01:21 +08:00
|
|
|
}
|
|
|
|
|
2019-05-01 01:31:29 +08:00
|
|
|
/// Erase an instance of a complex derived type.
|
2020-08-08 04:29:11 +08:00
|
|
|
void erase(TypeID id, unsigned kind, unsigned hashValue,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<bool(const BaseStorage *)> isEqual,
|
|
|
|
function_ref<void(BaseStorage *)> cleanupFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
assert(instUniquers.count(id) && "erasing unregistered storage instance");
|
|
|
|
InstSpecificUniquer &storageUniquer = *instUniquers[id];
|
|
|
|
InstSpecificUniquer::LookupKey lookupKey{kind, hashValue, isEqual};
|
2019-05-01 01:31:29 +08:00
|
|
|
|
|
|
|
// Acquire a writer-lock so that we can safely erase the type instance.
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedWriter<true> lock(storageUniquer.mutex);
|
|
|
|
auto existing = storageUniquer.complexInstances.find_as(lookupKey);
|
|
|
|
if (existing == storageUniquer.complexInstances.end())
|
2019-05-01 01:31:29 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Cleanup the storage and remove it from the map.
|
|
|
|
cleanupFn(existing->storage);
|
2020-08-08 04:29:11 +08:00
|
|
|
storageUniquer.complexInstances.erase(existing);
|
2019-05-01 01:31:29 +08:00
|
|
|
}
|
|
|
|
|
2020-07-22 19:03:24 +08:00
|
|
|
/// Mutates an instance of a derived storage in a thread-safe way.
|
|
|
|
LogicalResult
|
2020-08-08 04:29:11 +08:00
|
|
|
mutate(TypeID id,
|
|
|
|
function_ref<LogicalResult(StorageAllocator &)> mutationFn) {
|
|
|
|
assert(instUniquers.count(id) && "mutating unregistered storage instance");
|
|
|
|
InstSpecificUniquer &storageUniquer = *instUniquers[id];
|
2020-07-22 19:03:24 +08:00
|
|
|
if (!threadingIsEnabled)
|
2020-08-08 04:29:11 +08:00
|
|
|
return mutationFn(storageUniquer.allocator);
|
2020-07-22 19:03:24 +08:00
|
|
|
|
2020-08-08 04:29:11 +08:00
|
|
|
llvm::sys::SmartScopedWriter<true> lock(storageUniquer.mutex);
|
|
|
|
return mutationFn(storageUniquer.allocator);
|
2020-07-22 19:03:24 +08:00
|
|
|
}
|
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Instance Storage
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Utility to create and initialize a storage instance.
|
2019-12-19 01:28:48 +08:00
|
|
|
BaseStorage *
|
2020-08-08 04:29:11 +08:00
|
|
|
initializeStorage(unsigned kind, StorageAllocator &allocator,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) {
|
2019-04-26 12:01:21 +08:00
|
|
|
BaseStorage *storage = ctorFn(allocator);
|
|
|
|
storage->kind = kind;
|
|
|
|
return storage;
|
|
|
|
}
|
|
|
|
|
2020-08-08 04:29:11 +08:00
|
|
|
/// Map of type ids to the storage uniquer to use for registered objects.
|
|
|
|
DenseMap<TypeID, std::unique_ptr<InstSpecificUniquer>> instUniquers;
|
2020-05-03 03:28:57 +08:00
|
|
|
|
2020-08-08 04:29:36 +08:00
|
|
|
/// A thread local cache for simple and complex storage objects. This helps to
|
|
|
|
/// reduce the lock contention when an object already existing in the cache.
|
|
|
|
ThreadLocalCache<DenseMap<unsigned, BaseStorage *>> simpleStorageLocalCache;
|
|
|
|
ThreadLocalCache<InstSpecificUniquer::StorageTypeSet>
|
|
|
|
complexStorageLocalCache;
|
|
|
|
|
2020-05-03 03:28:57 +08:00
|
|
|
/// Flag specifying if multi-threading is enabled within the uniquer.
|
|
|
|
bool threadingIsEnabled = true;
|
2019-04-26 12:01:21 +08:00
|
|
|
};
|
|
|
|
} // end namespace detail
|
|
|
|
} // namespace mlir
|
|
|
|
|
|
|
|
StorageUniquer::StorageUniquer() : impl(new StorageUniquerImpl()) {}
|
|
|
|
StorageUniquer::~StorageUniquer() {}
|
|
|
|
|
2020-05-03 03:28:57 +08:00
|
|
|
/// Set the flag specifying if multi-threading is disabled within the uniquer.
|
|
|
|
void StorageUniquer::disableMultithreading(bool disable) {
|
|
|
|
impl->threadingIsEnabled = !disable;
|
|
|
|
}
|
|
|
|
|
2020-08-08 04:29:11 +08:00
|
|
|
/// Register a new storage object with this uniquer using the given unique type
|
|
|
|
/// id.
|
|
|
|
void StorageUniquer::registerStorageType(TypeID id) {
|
|
|
|
impl->instUniquers.try_emplace(id, std::make_unique<InstSpecificUniquer>());
|
|
|
|
}
|
|
|
|
|
2019-04-26 12:01:21 +08:00
|
|
|
/// Implementation for getting/creating an instance of a derived type with
|
|
|
|
/// complex storage.
|
|
|
|
auto StorageUniquer::getImpl(
|
2020-08-08 04:29:11 +08:00
|
|
|
const TypeID &id, unsigned kind, unsigned hashValue,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<bool(const BaseStorage *)> isEqual,
|
2020-04-12 14:00:11 +08:00
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) -> BaseStorage * {
|
2020-08-08 04:29:11 +08:00
|
|
|
return impl->getOrCreate(id, kind, hashValue, isEqual, ctorFn);
|
2019-04-26 12:01:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation for getting/creating an instance of a derived type with
|
|
|
|
/// default storage.
|
|
|
|
auto StorageUniquer::getImpl(
|
2020-08-08 04:29:11 +08:00
|
|
|
const TypeID &id, unsigned kind,
|
|
|
|
function_ref<BaseStorage *(StorageAllocator &)> ctorFn) -> BaseStorage * {
|
|
|
|
return impl->getOrCreate(id, kind, ctorFn);
|
2019-04-26 12:01:21 +08:00
|
|
|
}
|
2019-05-01 01:31:29 +08:00
|
|
|
|
|
|
|
/// Implementation for erasing an instance of a derived type with complex
|
|
|
|
/// storage.
|
2020-08-08 04:29:11 +08:00
|
|
|
void StorageUniquer::eraseImpl(const TypeID &id, unsigned kind,
|
|
|
|
unsigned hashValue,
|
2019-12-19 01:28:48 +08:00
|
|
|
function_ref<bool(const BaseStorage *)> isEqual,
|
2020-04-12 14:00:11 +08:00
|
|
|
function_ref<void(BaseStorage *)> cleanupFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
impl->erase(id, kind, hashValue, isEqual, cleanupFn);
|
2019-05-01 01:31:29 +08:00
|
|
|
}
|
2020-07-22 19:03:24 +08:00
|
|
|
|
|
|
|
/// Implementation for mutating an instance of a derived storage.
|
|
|
|
LogicalResult StorageUniquer::mutateImpl(
|
2020-08-08 04:29:11 +08:00
|
|
|
const TypeID &id,
|
2020-07-22 19:03:24 +08:00
|
|
|
function_ref<LogicalResult(StorageAllocator &)> mutationFn) {
|
2020-08-08 04:29:11 +08:00
|
|
|
return impl->mutate(id, mutationFn);
|
2020-07-22 19:03:24 +08:00
|
|
|
}
|