forked from OSchip/llvm-project
[OpenMP] Fix race condition in omp_init_lock
This is a partial fix for bug 34050. This prevents callers of omp_set_lock (which does not hold __kmp_global_lock) from ever seeing an uninitialized version of __kmp_i_lock_table.table. It does not solve a use-after-free race condition if omp_set_lock obtains a pointer to __kmp_i_lock_table.table before it is updated and then attempts to dereference afterwards. That race is far less likely and can be handled in a separate patch. The unit test usually segfaults on the current trunk revision. It passes with the patch. Patch by Adam Azarchs Differential Revision: https://reviews.llvm.org/D39439 llvm-svn: 317115
This commit is contained in:
parent
435151ad75
commit
3d18a37ca9
|
@ -3058,11 +3058,12 @@ kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
|
||||||
if (idx == __kmp_i_lock_table.size) {
|
if (idx == __kmp_i_lock_table.size) {
|
||||||
// Double up the space for block pointers
|
// Double up the space for block pointers
|
||||||
int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
|
int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
|
||||||
kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
|
kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
|
||||||
__kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(
|
|
||||||
2 * row * sizeof(kmp_indirect_lock_t *));
|
2 * row * sizeof(kmp_indirect_lock_t *));
|
||||||
KMP_MEMCPY(__kmp_i_lock_table.table, old_table,
|
KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
|
||||||
row * sizeof(kmp_indirect_lock_t *));
|
row * sizeof(kmp_indirect_lock_t *));
|
||||||
|
kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
|
||||||
|
__kmp_i_lock_table.table = new_table;
|
||||||
__kmp_free(old_table);
|
__kmp_free(old_table);
|
||||||
// Allocate new objects in the new blocks
|
// Allocate new objects in the new blocks
|
||||||
for (int i = row; i < 2 * row; ++i)
|
for (int i = row; i < 2 * row; ++i)
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
// RUN: %libomp-compile-and-run
|
||||||
|
#include "omp_testsuite.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
// This should be slightly less than KMP_I_LOCK_CHUNK, which is 1024
|
||||||
|
#define LOCKS_PER_ITER 1000
|
||||||
|
#define ITERATIONS (REPETITIONS + 1)
|
||||||
|
|
||||||
|
// This tests concurrently using locks on one thread while initializing new
|
||||||
|
// ones on another thread. This exercises the global lock pool.
|
||||||
|
int test_omp_init_lock() {
|
||||||
|
int i;
|
||||||
|
omp_lock_t lcks[ITERATIONS * LOCKS_PER_ITER];
|
||||||
|
#pragma omp parallel for schedule(static) num_threads(NUM_TASKS)
|
||||||
|
for (i = 0; i < ITERATIONS; i++) {
|
||||||
|
int j;
|
||||||
|
omp_lock_t *my_lcks = &lcks[i * LOCKS_PER_ITER];
|
||||||
|
for (j = 0; j < LOCKS_PER_ITER; j++) {
|
||||||
|
omp_init_lock(&my_lcks[j]);
|
||||||
|
}
|
||||||
|
for (j = 0; j < LOCKS_PER_ITER * 100; j++) {
|
||||||
|
omp_set_lock(&my_lcks[j % LOCKS_PER_ITER]);
|
||||||
|
omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Wait until all repititions are done. The test is exercising growth of
|
||||||
|
// the global lock pool, which does not shrink when no locks are allocated.
|
||||||
|
{
|
||||||
|
int j;
|
||||||
|
for (j = 0; j < ITERATIONS * LOCKS_PER_ITER; j++) {
|
||||||
|
omp_destroy_lock(&lcks[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
// No use repeating this test, since it's exercising a private global pool
|
||||||
|
// which is not reset between test iterations.
|
||||||
|
return test_omp_init_lock();
|
||||||
|
}
|
Loading…
Reference in New Issue