[OpenMP][libomp] Hold old __kmp_threads arrays until library shutdown

When many nested teams are formed, __kmp_threads may be reallocated
to accommodate new threads. This reallocation causes a data
race when another existing team's thread simultaneously references
__kmp_threads. This patch keeps the old thread arrays around until library
shutdown so these lingering references can complete without issue and
access to __kmp_threads remains a simple array reference.

Fixes: https://github.com/llvm/llvm-project/issues/54708
Differential Revision: https://reviews.llvm.org/D125013
This commit is contained in:
Jonathan Peyton 2022-05-05 09:15:41 -05:00
parent 1061511008
commit b7b4986576
3 changed files with 28 additions and 2 deletions

View File

@ -2989,6 +2989,15 @@ struct fortran_inx_info {
kmp_int32 data; kmp_int32 data;
}; };
// This list type exists to hold old __kmp_threads arrays so that
// old references to them may complete while reallocation takes place when
// expanding the array. The items in this list are kept alive until library
// shutdown.
typedef struct kmp_old_threads_list_t {
kmp_info_t **threads;
struct kmp_old_threads_list_t *next;
} kmp_old_threads_list_t;
/* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */
extern int __kmp_settings; extern int __kmp_settings;
@ -3270,6 +3279,8 @@ extern int __kmp_teams_thread_limit;
/* the following are protected by the fork/join lock */ /* the following are protected by the fork/join lock */
/* write: lock read: anytime */ /* write: lock read: anytime */
extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */ extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */
/* Holds old arrays of __kmp_threads until library shutdown */
extern kmp_old_threads_list_t *__kmp_old_threads_list;
/* read/write: lock */ /* read/write: lock */
extern volatile kmp_team_t *__kmp_team_pool; extern volatile kmp_team_t *__kmp_team_pool;
extern volatile kmp_info_t *__kmp_thread_pool; extern volatile kmp_info_t *__kmp_thread_pool;

View File

@ -442,6 +442,7 @@ kmp_uint64 __kmp_pause_init = 1; // for tpause
KMP_ALIGN_CACHE KMP_ALIGN_CACHE
kmp_info_t **__kmp_threads = NULL; kmp_info_t **__kmp_threads = NULL;
kmp_root_t **__kmp_root = NULL; kmp_root_t **__kmp_root = NULL;
kmp_old_threads_list_t *__kmp_old_threads_list = NULL;
/* data read/written to often by primary threads */ /* data read/written to often by primary threads */
KMP_ALIGN_CACHE KMP_ALIGN_CACHE

View File

@ -3669,11 +3669,16 @@ static int __kmp_expand_threads(int nNeed) {
__kmp_threads_capacity * sizeof(kmp_info_t *)); __kmp_threads_capacity * sizeof(kmp_info_t *));
KMP_MEMCPY(newRoot, __kmp_root, KMP_MEMCPY(newRoot, __kmp_root,
__kmp_threads_capacity * sizeof(kmp_root_t *)); __kmp_threads_capacity * sizeof(kmp_root_t *));
// Put old __kmp_threads array on a list. Any ongoing references to the old
// list will be valid. This list is cleaned up at library shutdown.
kmp_old_threads_list_t *node =
(kmp_old_threads_list_t *)__kmp_allocate(sizeof(kmp_old_threads_list_t));
node->threads = __kmp_threads;
node->next = __kmp_old_threads_list;
__kmp_old_threads_list = node;
kmp_info_t **temp_threads = __kmp_threads;
*(kmp_info_t * *volatile *)&__kmp_threads = newThreads; *(kmp_info_t * *volatile *)&__kmp_threads = newThreads;
*(kmp_root_t * *volatile *)&__kmp_root = newRoot; *(kmp_root_t * *volatile *)&__kmp_root = newRoot;
__kmp_free(temp_threads);
added += newCapacity - __kmp_threads_capacity; added += newCapacity - __kmp_threads_capacity;
*(volatile int *)&__kmp_threads_capacity = newCapacity; *(volatile int *)&__kmp_threads_capacity = newCapacity;
@ -8101,6 +8106,15 @@ void __kmp_cleanup(void) {
__kmp_root = NULL; __kmp_root = NULL;
__kmp_threads_capacity = 0; __kmp_threads_capacity = 0;
// Free old __kmp_threads arrays if they exist.
kmp_old_threads_list_t *ptr = __kmp_old_threads_list;
while (ptr) {
kmp_old_threads_list_t *next = ptr->next;
__kmp_free(ptr->threads);
__kmp_free(ptr);
ptr = next;
}
#if KMP_USE_DYNAMIC_LOCK #if KMP_USE_DYNAMIC_LOCK
__kmp_cleanup_indirect_user_locks(); __kmp_cleanup_indirect_user_locks();
#else #else