forked from OSchip/llvm-project
parent
6323ddf99c
commit
3731076997
|
@ -11,14 +11,14 @@ int test_omp_get_num_threads()
|
|||
|
||||
nthreads_lib = -1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
nthreads++;
|
||||
} /* end of critical */
|
||||
#pragma omp single
|
||||
{
|
||||
{
|
||||
nthreads_lib = omp_get_num_threads ();
|
||||
} /* end of single */
|
||||
} /* end of parallel */
|
||||
|
|
|
@ -9,11 +9,11 @@ int test_omp_get_wtime()
|
|||
double start;
|
||||
double end;
|
||||
double measured_time;
|
||||
double wait_time = 5.0;
|
||||
double wait_time = 5.0;
|
||||
start = 0;
|
||||
end = 0;
|
||||
start = omp_get_wtime();
|
||||
my_sleep (wait_time);
|
||||
my_sleep (wait_time);
|
||||
end = omp_get_wtime();
|
||||
measured_time = end-start;
|
||||
return ((measured_time > 0.97 * wait_time) && (measured_time < 1.03 * wait_time)) ;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
/*
|
||||
* Checks that false is returned when called from serial region
|
||||
* and true is returned when called within parallel region.
|
||||
* and true is returned when called within parallel region.
|
||||
*/
|
||||
int test_omp_in_parallel()
|
||||
{
|
||||
|
|
|
@ -32,7 +32,7 @@ int test_omp_atomic()
|
|||
double dpt, div;
|
||||
int logicsArray[LOOPCOUNT];
|
||||
logics = logicsArray;
|
||||
|
||||
|
||||
sum = 0;
|
||||
diff = 0;
|
||||
product = 1;
|
||||
|
@ -46,17 +46,17 @@ int test_omp_atomic()
|
|||
#pragma omp atomic
|
||||
sum += i;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
if (known_sum != sum)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"Error in sum with integers: Result was %d instead of %d.\n",
|
||||
fprintf(stderr,
|
||||
"Error in sum with integers: Result was %d instead of %d.\n",
|
||||
sum, known_sum);
|
||||
result++;
|
||||
}
|
||||
|
||||
|
||||
// difference of integers test
|
||||
#pragma omp parallel
|
||||
{
|
||||
|
@ -113,7 +113,7 @@ int test_omp_atomic()
|
|||
#pragma omp atomic
|
||||
ddiff -= pow (dt, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (fabs (ddiff) > rounding_error) {
|
||||
fprintf (stderr,
|
||||
"Error in difference with doubles: Result was %E instead of 0.0\n",
|
||||
|
@ -157,7 +157,7 @@ int test_omp_atomic()
|
|||
product);
|
||||
result++;
|
||||
}
|
||||
|
||||
|
||||
// division of doubles test
|
||||
div = 5.0E+5;
|
||||
#pragma omp parallel
|
||||
|
@ -266,7 +266,7 @@ int test_omp_atomic()
|
|||
logics[LOOPCOUNT / 2] = 1;
|
||||
#pragma omp parallel
|
||||
{
|
||||
|
||||
|
||||
int i;
|
||||
#pragma omp for
|
||||
for (i = 0; i < LOOPCOUNT; ++i) {
|
||||
|
@ -309,7 +309,7 @@ int test_omp_atomic()
|
|||
#pragma omp atomic
|
||||
exclusiv_bit_or ^= logics[i];
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
if (!exclusiv_bit_or) {
|
||||
result++;
|
||||
|
@ -326,7 +326,7 @@ int test_omp_atomic()
|
|||
#pragma omp atomic
|
||||
x <<= 1;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
if ( x != 1024) {
|
||||
result++;
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
// RUN: %libomp-compile && env OMP_WAIT_POLICY=active %libomp-run active
|
||||
// RUN: %libomp-compile && env OMP_WAIT_POLICY=passive %libomp-run passive
|
||||
//
|
||||
// OMP_WAIT_POLICY=active should imply blocktime == INT_MAX
|
||||
// OMP_WAIT_POLICY=active should imply blocktime == INT_MAX
|
||||
// i.e., threads spin-wait forever
|
||||
// OMP_WAIT_POLICY=passive should imply blocktime == 0
|
||||
// OMP_WAIT_POLICY=passive should imply blocktime == 0
|
||||
// i.e., threads immediately sleep
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
@ -25,7 +25,7 @@ int main(int argc, char** argv)
|
|||
}
|
||||
|
||||
blocktime = kmp_get_blocktime();
|
||||
|
||||
|
||||
env_var_value = argv[1];
|
||||
if (!strcmp(env_var_value, "active")) {
|
||||
retval = (blocktime != INT_MAX);
|
||||
|
|
|
@ -20,7 +20,7 @@ int test_omp_lock()
|
|||
omp_set_lock(&lck);
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
nr_iterations++;
|
||||
nr_threads_in_single--;
|
||||
result = result + nr_threads_in_single;
|
||||
|
|
|
@ -12,14 +12,14 @@ int test_omp_nest_lock()
|
|||
int i;
|
||||
|
||||
omp_init_nest_lock(&lck);
|
||||
#pragma omp parallel shared(lck)
|
||||
#pragma omp parallel shared(lck)
|
||||
{
|
||||
#pragma omp for
|
||||
for(i = 0; i < LOOPCOUNT; i++) {
|
||||
omp_set_nest_lock(&lck);
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
nr_iterations++;
|
||||
nr_threads_in_single--;
|
||||
result = result + nr_threads_in_single;
|
||||
|
|
|
@ -12,7 +12,7 @@ int test_omp_test_lock()
|
|||
int i;
|
||||
|
||||
omp_init_lock (&lck);
|
||||
#pragma omp parallel shared(lck)
|
||||
#pragma omp parallel shared(lck)
|
||||
{
|
||||
#pragma omp for
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
|
@ -20,7 +20,7 @@ int test_omp_test_lock()
|
|||
{};
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
nr_iterations++;
|
||||
nr_threads_in_single--;
|
||||
result = result + nr_threads_in_single;
|
||||
|
|
|
@ -12,7 +12,7 @@ int test_omp_test_nest_lock()
|
|||
int i;
|
||||
|
||||
omp_init_nest_lock (&lck);
|
||||
#pragma omp parallel shared(lck)
|
||||
#pragma omp parallel shared(lck)
|
||||
{
|
||||
#pragma omp for
|
||||
for (i = 0; i < LOOPCOUNT; i++)
|
||||
|
@ -22,7 +22,7 @@ int test_omp_test_nest_lock()
|
|||
{};
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
nr_iterations++;
|
||||
nr_threads_in_single--;
|
||||
result = result + nr_threads_in_single;
|
||||
|
|
|
@ -12,7 +12,7 @@ int test_omp_master()
|
|||
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp master
|
||||
#pragma omp master
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
|
|
|
@ -13,7 +13,7 @@ int test_omp_master_3()
|
|||
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp master
|
||||
#pragma omp master
|
||||
{
|
||||
int tid = omp_get_thread_num();
|
||||
if (tid != 0) {
|
||||
|
|
|
@ -19,7 +19,7 @@ int test_omp_parallel_copyin()
|
|||
{
|
||||
/*printf("sum1=%d\n",sum1);*/
|
||||
int i;
|
||||
#pragma omp for
|
||||
#pragma omp for
|
||||
for (i = 1; i < 1000; i++) {
|
||||
sum1 = sum1 + i;
|
||||
} /*end of for*/
|
||||
|
@ -28,7 +28,7 @@ int test_omp_parallel_copyin()
|
|||
sum = sum + sum1;
|
||||
num_threads++;
|
||||
} /*end of critical*/
|
||||
} /* end of parallel*/
|
||||
} /* end of parallel*/
|
||||
known_sum = (999 * 1000) / 2 + 7 * num_threads;
|
||||
return (known_sum == sum);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ int test_omp_parallel_default()
|
|||
#pragma omp for
|
||||
for (i = 1; i <= LOOPCOUNT; i++) {
|
||||
mysum = mysum + i;
|
||||
}
|
||||
}
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + mysum;
|
||||
|
|
|
@ -18,7 +18,7 @@ int test_omp_parallel_firstprivate()
|
|||
{
|
||||
/*printf("sum1=%d\n",sum1);*/
|
||||
int i;
|
||||
#pragma omp for
|
||||
#pragma omp for
|
||||
for (i = 1; i < 1000; i++) {
|
||||
sum1 = sum1 + i;
|
||||
} /*end of for*/
|
||||
|
@ -27,7 +27,7 @@ int test_omp_parallel_firstprivate()
|
|||
sum = sum + sum1;
|
||||
num_threads++;
|
||||
} /*end of critical*/
|
||||
} /* end of parallel*/
|
||||
} /* end of parallel*/
|
||||
known_sum = (999 * 1000) / 2 + 7 * num_threads;
|
||||
return (known_sum == sum);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ int test_omp_parallel_if()
|
|||
mysum = 0;
|
||||
for (i = 1; i <= LOOPCOUNT; i++) {
|
||||
mysum = mysum + i;
|
||||
}
|
||||
}
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + mysum;
|
||||
|
|
|
@ -18,7 +18,7 @@ int test_omp_parallel_private()
|
|||
int i;
|
||||
sum1 = 7;
|
||||
/*printf("sum1=%d\n",sum1);*/
|
||||
#pragma omp for
|
||||
#pragma omp for
|
||||
for (i = 1; i < 1000; i++) {
|
||||
sum1 = sum1 + i;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ int test_omp_parallel_reduction()
|
|||
|
||||
if(known_sum!=sum) {
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with integers: Result was %d instead of %d\n",sum,known_sum);
|
||||
fprintf(stderr,"Error in sum with integers: Result was %d instead of %d\n",sum,known_sum);
|
||||
}
|
||||
|
||||
diff = (LOOPCOUNT*(LOOPCOUNT+1))/2;
|
||||
|
@ -76,7 +76,7 @@ int test_omp_parallel_reduction()
|
|||
}
|
||||
|
||||
if( fabs(dsum-dknown_sum) > rounding_error ) {
|
||||
result++;
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,14 +11,14 @@ int test_omp_parallel_shared()
|
|||
sum = 0;
|
||||
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ;
|
||||
|
||||
#pragma omp parallel private(i) shared(sum)
|
||||
#pragma omp parallel private(i) shared(sum)
|
||||
{
|
||||
|
||||
int mysum = 0;
|
||||
#pragma omp for
|
||||
for (i = 1; i <= LOOPCOUNT; i++) {
|
||||
mysum = mysum + i;
|
||||
}
|
||||
}
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + mysum;
|
||||
|
|
|
@ -9,14 +9,14 @@ int test_omp_task()
|
|||
int tids[NUM_TASKS];
|
||||
int i;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp single
|
||||
{
|
||||
for (i = 0; i < NUM_TASKS; i++) {
|
||||
/* First we have to store the value of the loop index in a new variable
|
||||
* which will be private for each task because otherwise it will be overwritten
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* enter the next step of the loop!
|
||||
*/
|
||||
int myi;
|
||||
|
|
|
@ -10,14 +10,14 @@ int test_omp_task_final()
|
|||
int includedtids[NUM_TASKS];
|
||||
int i;
|
||||
int error = 0;
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp single
|
||||
{
|
||||
for (i = 0; i < NUM_TASKS; i++) {
|
||||
/* First we have to store the value of the loop index in a new variable
|
||||
* which will be private for each task because otherwise it will be overwritten
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* enter the next step of the loop!
|
||||
*/
|
||||
int myi;
|
||||
|
|
|
@ -27,7 +27,7 @@ int test_omp_task_firstprivate()
|
|||
|
||||
/* check if calculated sum was right */
|
||||
if (sum != known_sum) {
|
||||
#pragma omp critical
|
||||
#pragma omp critical
|
||||
{ result++; }
|
||||
}
|
||||
} /* omp task */
|
||||
|
|
|
@ -12,7 +12,7 @@ int test_omp_task_if()
|
|||
|
||||
count=0;
|
||||
condition_false = (count == 1);
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp single
|
||||
{
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
int main (void) {
|
||||
int passed;
|
||||
|
||||
passed = (omp_get_max_task_priority() == 42);
|
||||
passed = (omp_get_max_task_priority() == 42);
|
||||
printf("Got %d\n", omp_get_max_task_priority());
|
||||
|
||||
if (passed) {
|
||||
|
|
|
@ -29,7 +29,7 @@ int test_omp_task_private()
|
|||
}
|
||||
/* check if calculated sum was right */
|
||||
if (sum != known_sum) {
|
||||
#pragma omp critical
|
||||
#pragma omp critical
|
||||
result++;
|
||||
}
|
||||
} /* end of omp task */
|
||||
|
|
|
@ -12,17 +12,17 @@ int test_omp_taskwait()
|
|||
int i;
|
||||
|
||||
/* fill array */
|
||||
for (i = 0; i < NUM_TASKS; i++)
|
||||
for (i = 0; i < NUM_TASKS; i++)
|
||||
array[i] = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp single
|
||||
{
|
||||
for (i = 0; i < NUM_TASKS; i++) {
|
||||
/* First we have to store the value of the loop index in a new variable
|
||||
* which will be private for each task because otherwise it will be overwritten
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* if the execution of the task takes longer than the time which is needed to
|
||||
* enter the next step of the loop!
|
||||
*/
|
||||
int myi;
|
||||
|
@ -35,11 +35,11 @@ int test_omp_taskwait()
|
|||
} /* end of for */
|
||||
#pragma omp taskwait
|
||||
/* check if all tasks were finished */
|
||||
for (i = 0; i < NUM_TASKS; i++)
|
||||
for (i = 0; i < NUM_TASKS; i++)
|
||||
if (array[i] != 1)
|
||||
result1++;
|
||||
|
||||
/* generate some more tasks which now shall overwrite
|
||||
/* generate some more tasks which now shall overwrite
|
||||
* the values in the tids array */
|
||||
for (i = 0; i < NUM_TASKS; i++) {
|
||||
int myi;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Threadprivate is tested in 2 ways:
|
||||
* 1. The global variable declared as threadprivate should have
|
||||
* local copy for each thread. Otherwise race condition and
|
||||
* local copy for each thread. Otherwise race condition and
|
||||
* wrong result.
|
||||
* 2. If the value of local copy is retained for the two adjacent
|
||||
* parallel regions
|
||||
|
@ -21,7 +21,7 @@ int test_omp_threadprivate()
|
|||
{
|
||||
int sum = 0;
|
||||
int known_sum;
|
||||
int i;
|
||||
int i;
|
||||
int iter;
|
||||
int *data;
|
||||
int size;
|
||||
|
@ -29,10 +29,10 @@ int test_omp_threadprivate()
|
|||
int my_random;
|
||||
omp_set_dynamic(0);
|
||||
|
||||
#pragma omp parallel private(i)
|
||||
#pragma omp parallel private(i)
|
||||
{
|
||||
sum0 = 0;
|
||||
#pragma omp for
|
||||
#pragma omp for
|
||||
for (i = 1; i <= LOOPCOUNT; i++) {
|
||||
sum0 = sum0 + i;
|
||||
} /*end of for*/
|
||||
|
@ -40,7 +40,7 @@ int test_omp_threadprivate()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
} /*end of critical */
|
||||
} /* end of parallel */
|
||||
} /* end of parallel */
|
||||
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
if (known_sum != sum ) {
|
||||
fprintf (stderr, " known_sum = %d, sum = %d\n", known_sum, sum);
|
||||
|
@ -59,7 +59,7 @@ int test_omp_threadprivate()
|
|||
|
||||
srand(45);
|
||||
for (iter = 0; iter < 100; iter++) {
|
||||
my_random = rand(); /* random number generator is
|
||||
my_random = rand(); /* random number generator is
|
||||
called inside serial region*/
|
||||
|
||||
/* the first parallel region is used to initialiye myvalue
|
||||
|
@ -71,7 +71,7 @@ int test_omp_threadprivate()
|
|||
myvalue = data[rank] = my_random + rank;
|
||||
}
|
||||
|
||||
/* the second parallel region verifies that the
|
||||
/* the second parallel region verifies that the
|
||||
value of "myvalue" is retained */
|
||||
#pragma omp parallel reduction(+:num_failed)
|
||||
{
|
||||
|
|
|
@ -26,7 +26,7 @@ int test_omp_threadprivate_for()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
} /* end of parallel */
|
||||
} /* end of parallel */
|
||||
|
||||
if (known_sum != sum ) {
|
||||
fprintf(stderr, " known_sum = %d, sum = %d\n", known_sum, sum);
|
||||
|
|
|
@ -22,7 +22,7 @@ int test_set_schedule_0()
|
|||
if(a > 10)
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return a==10;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#include <math.h>
|
||||
#include "omp_testsuite.h"
|
||||
|
||||
/* Utility function to check that i is increasing monotonically
|
||||
/* Utility function to check that i is increasing monotonically
|
||||
with each call */
|
||||
static int check_i_islarger (int i)
|
||||
{
|
||||
|
|
|
@ -24,19 +24,19 @@ int test_omp_for_firstprivate()
|
|||
threadsnum=omp_get_num_threads();
|
||||
}
|
||||
/* sum0 = 0; */
|
||||
|
||||
|
||||
int i;
|
||||
#pragma omp for firstprivate(sum0)
|
||||
for (i = 1; i <= LOOPCOUNT; i++) {
|
||||
sum0 = sum0 + i;
|
||||
sum1 = sum0;
|
||||
} /* end of for */
|
||||
|
||||
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + sum1;
|
||||
} /* end of critical */
|
||||
} /* end of parallel */
|
||||
} /* end of parallel */
|
||||
known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
return (known_sum == sum);
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ int test_omp_for_lastprivate()
|
|||
int sum = 0;
|
||||
int known_sum;
|
||||
int i0;
|
||||
|
||||
|
||||
i0 = -1;
|
||||
|
||||
#pragma omp parallel
|
||||
|
@ -30,7 +30,7 @@ int test_omp_for_lastprivate()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
} /* end of critical */
|
||||
} /* end of parallel */
|
||||
} /* end of parallel */
|
||||
|
||||
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
fprintf(stderr, "known_sum = %d , sum = %d\n",known_sum,sum);
|
||||
|
|
|
@ -13,14 +13,14 @@ int test_omp_for_nowait()
|
|||
result = 0;
|
||||
count = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int rank;
|
||||
int i;
|
||||
|
||||
rank = omp_get_thread_num();
|
||||
|
||||
#pragma omp for nowait
|
||||
#pragma omp for nowait
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
if (i == 0) {
|
||||
my_sleep(SLEEPTIME);
|
||||
|
@ -28,7 +28,7 @@ int test_omp_for_nowait()
|
|||
#pragma omp flush(count)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#pragma omp for
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
#pragma omp flush(count)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
static int last_i = 0;
|
||||
|
||||
/* Utility function to check that i is increasing monotonically
|
||||
/* Utility function to check that i is increasing monotonically
|
||||
with each call */
|
||||
static int check_i_islarger (int i)
|
||||
{
|
||||
|
|
|
@ -44,10 +44,10 @@ int test_omp_for_private()
|
|||
{
|
||||
sum = sum + sum1;
|
||||
} /*end of critical*/
|
||||
} /* end of parallel*/
|
||||
} /* end of parallel*/
|
||||
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
return (known_sum == sum);
|
||||
}
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
|
|
|
@ -54,7 +54,7 @@ int test_omp_for_reduction ()
|
|||
/************************************************************************/
|
||||
|
||||
/**** Testing integer addition ****/
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(+:sum)
|
||||
|
@ -65,12 +65,12 @@ int test_omp_for_reduction ()
|
|||
if (known_sum != sum) {
|
||||
result++;
|
||||
fprintf (stderr, "Error in sum with integers: Result was %d"
|
||||
" instead of %d.\n", sum, known_sum);
|
||||
" instead of %d.\n", sum, known_sum);
|
||||
}
|
||||
|
||||
/**** Testing integer subtracton ****/
|
||||
diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(-:diff)
|
||||
|
@ -85,7 +85,7 @@ int test_omp_for_reduction ()
|
|||
}
|
||||
|
||||
/**** Testing integer multiplication ****/
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(*:product)
|
||||
|
@ -111,16 +111,16 @@ int test_omp_for_reduction ()
|
|||
dpt *= dt;
|
||||
}
|
||||
dknown_sum = (1 - dpt) / (1 - dt);
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(+:dsum)
|
||||
for (j = 0; j < DOUBLE_DIGITS; j++) {
|
||||
for (j = 0; j < DOUBLE_DIGITS; j++) {
|
||||
dsum += pow (dt, j);
|
||||
}
|
||||
}
|
||||
if (fabs (dsum - dknown_sum) > rounding_error) {
|
||||
result++;
|
||||
result++;
|
||||
fprintf (stderr, "\nError in sum with doubles: Result was %f"
|
||||
" instead of: %f (Difference: %E)\n",
|
||||
dsum, dknown_sum, dsum-dknown_sum);
|
||||
|
@ -128,7 +128,7 @@ int test_omp_for_reduction ()
|
|||
|
||||
/**** Testing double subtraction ****/
|
||||
ddiff = (1 - dpt) / (1 - dt);
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(-:ddiff)
|
||||
|
@ -152,7 +152,7 @@ int test_omp_for_reduction ()
|
|||
logics[i] = 1;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(&&:logic_and)
|
||||
|
@ -168,7 +168,7 @@ int test_omp_for_reduction ()
|
|||
logic_and = 1;
|
||||
logics[LOOPCOUNT / 2] = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(&&:logic_and)
|
||||
|
@ -186,10 +186,10 @@ int test_omp_for_reduction ()
|
|||
logics[i] = 0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(||:logic_or)
|
||||
#pragma omp for schedule(dynamic,1) reduction(||:logic_or)
|
||||
for (j = 0; j < LOOPCOUNT; ++j) {
|
||||
logic_or = logic_or || logics[j];
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ int test_omp_for_reduction ()
|
|||
logic_or = 0;
|
||||
logics[LOOPCOUNT / 2] = 1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(||:logic_or)
|
||||
|
@ -224,10 +224,10 @@ int test_omp_for_reduction ()
|
|||
logics[i] = 1;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(&:bit_and)
|
||||
#pragma omp for schedule(dynamic,1) reduction(&:bit_and)
|
||||
for (j = 0; j < LOOPCOUNT; ++j) {
|
||||
bit_and = (bit_and & logics[j]);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ int test_omp_for_reduction ()
|
|||
bit_and = 1;
|
||||
logics[LOOPCOUNT / 2] = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(&:bit_and)
|
||||
|
@ -258,7 +258,7 @@ int test_omp_for_reduction ()
|
|||
logics[i] = 0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(|:bit_or)
|
||||
|
@ -274,7 +274,7 @@ int test_omp_for_reduction ()
|
|||
bit_or = 0;
|
||||
logics[LOOPCOUNT / 2] = 1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(|:bit_or)
|
||||
|
@ -292,7 +292,7 @@ int test_omp_for_reduction ()
|
|||
logics[i] = 0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(^:exclusiv_bit_or)
|
||||
|
@ -308,7 +308,7 @@ int test_omp_for_reduction ()
|
|||
exclusiv_bit_or = 0;
|
||||
logics[LOOPCOUNT / 2] = 1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int j;
|
||||
#pragma omp for schedule(dynamic,1) reduction(^:exclusiv_bit_or)
|
||||
|
|
|
@ -23,7 +23,7 @@ int test_omp_for_auto()
|
|||
// threads 0 and 3 did not, threads 1 and 2 did
|
||||
int max_threads = omp_get_max_threads();
|
||||
int* active_threads = (int*)malloc(sizeof(int)*max_threads);
|
||||
for(j = 0; j < max_threads; j++)
|
||||
for(j = 0; j < max_threads; j++)
|
||||
active_threads[j] = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
|
@ -36,7 +36,7 @@ int test_omp_for_auto()
|
|||
sum0 = sum0 + i;
|
||||
sum1 = sum0;
|
||||
}
|
||||
|
||||
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + sum1;
|
||||
|
|
|
@ -26,7 +26,7 @@ int test_omp_for_schedule_dynamic()
|
|||
int tmp_count = 0; /*dispatch times*/
|
||||
int *tmp; /*store chunk size for each dispatch*/
|
||||
int result = 0;
|
||||
|
||||
|
||||
tids = tidsArray;
|
||||
|
||||
#pragma omp parallel private(tid) shared(tids)
|
||||
|
@ -69,7 +69,7 @@ int test_omp_for_schedule_dynamic()
|
|||
/* result += ((tmp[i] / chunk_size) - 1); */
|
||||
}
|
||||
}
|
||||
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) {
|
||||
if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) {
|
||||
result++;
|
||||
fprintf(stderr,"the last dispatch has wrong chunksize.\n");
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ int test_omp_for_schedule_guided()
|
|||
threads = 2;
|
||||
}
|
||||
|
||||
/* Now the real parallel work:
|
||||
/* Now the real parallel work:
|
||||
* Each thread will start immediately with the first chunk.
|
||||
*/
|
||||
#pragma omp parallel shared(tids,maxiter)
|
||||
|
@ -82,7 +82,7 @@ int test_omp_for_schedule_guided()
|
|||
}
|
||||
}
|
||||
/*printf ("thread %d sleeping\n", tid);*/
|
||||
#pragma omp flush(maxiter,notout)
|
||||
#pragma omp flush(maxiter,notout)
|
||||
while (notout && (count < MAX_TIME) && (maxiter == j)) {
|
||||
#pragma omp flush(maxiter,notout)
|
||||
my_sleep (SLEEPTIME);
|
||||
|
@ -126,8 +126,8 @@ int test_omp_for_schedule_guided()
|
|||
*/
|
||||
// fprintf(stderr,"# global_chunknr thread local_chunknr chunksize\n");
|
||||
for(i = 1; i <= CFSMAX_SIZE; ++i) {
|
||||
if (last_threadnr==tids[i]) {
|
||||
determined_chunksize++;
|
||||
if (last_threadnr==tids[i]) {
|
||||
determined_chunksize++;
|
||||
} else {
|
||||
/* fprintf(stderr, "%d\t%d\t%d\t%d\n", global_chunknr,
|
||||
last_threadnr, local_chunknr[last_threadnr], m); */
|
||||
|
@ -145,15 +145,15 @@ int test_omp_for_schedule_guided()
|
|||
*/
|
||||
global_chunknr = 0;
|
||||
determined_chunksize = 1;
|
||||
last_threadnr = tids[0];
|
||||
last_threadnr = tids[0];
|
||||
for (i = 1; i <= CFSMAX_SIZE; ++i) {
|
||||
/* If the threadnumber was the same as before increase the
|
||||
* detected chunksize for this chunk otherwise set the detected
|
||||
* chunksize again to one and save the number of the next
|
||||
* thread in last_threadnr.
|
||||
* detected chunksize for this chunk otherwise set the detected
|
||||
* chunksize again to one and save the number of the next
|
||||
* thread in last_threadnr.
|
||||
*/
|
||||
if (last_threadnr == tids[i]) {
|
||||
determined_chunksize++;
|
||||
if (last_threadnr == tids[i]) {
|
||||
determined_chunksize++;
|
||||
} else {
|
||||
chunksizes[global_chunknr] = determined_chunksize;
|
||||
global_chunknr++;
|
||||
|
@ -167,14 +167,14 @@ int test_omp_for_schedule_guided()
|
|||
fprintf(stderr, "found\texpected\tconstant\n");
|
||||
#endif
|
||||
|
||||
/* identify the constant c for the exponential
|
||||
/* identify the constant c for the exponential
|
||||
decrease of the chunksize */
|
||||
expected_chunk_size = openwork / threads;
|
||||
c = (double) chunksizes[0] / expected_chunk_size;
|
||||
|
||||
|
||||
for (i = 0; i < global_chunknr; i++) {
|
||||
/* calculate the new expected chunksize */
|
||||
if (expected_chunk_size > 1)
|
||||
if (expected_chunk_size > 1)
|
||||
expected_chunk_size = c * openwork / threads;
|
||||
#ifdef VERBOSE
|
||||
fprintf(stderr, "%8d\t%8d\t%lf\n", chunksizes[i],
|
||||
|
@ -199,7 +199,7 @@ int test_omp_for_schedule_guided()
|
|||
|
||||
/* calculating the remaining amount of work */
|
||||
openwork -= chunksizes[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ int test_omp_for_schedule_static()
|
|||
/**** analysing the data in array tids ****/
|
||||
|
||||
lasttid = tids[0];
|
||||
tmp_count = 0;
|
||||
tmp_count = 0;
|
||||
|
||||
for (i = 0; i < CFSMAX_SIZE + 1; ++i) {
|
||||
/* If the work was done by the same thread increase tmp_count by one. */
|
||||
|
@ -99,9 +99,9 @@ int test_omp_for_schedule_static()
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Check if the next thread had has the right thread number. When finding
|
||||
* threadnumber -1 the end should be reached.
|
||||
*/
|
||||
/* Check if the next thread had has the right thread number. When finding
|
||||
* threadnumber -1 the end should be reached.
|
||||
*/
|
||||
if (tids[i] == (lasttid + 1) % threads || tids[i] == -1) {
|
||||
/* checking for the right chunk size */
|
||||
if (tmp_count == chunk_size) {
|
||||
|
|
|
@ -48,7 +48,7 @@ int test_omp_for_schedule_static_3()
|
|||
if (threads < 2) {
|
||||
omp_set_num_threads(2);
|
||||
threads = 2;
|
||||
}
|
||||
}
|
||||
fprintf (stderr,"Using an internal count of %d\nUsing a"
|
||||
" specified chunksize of %d\n", CFSMAX_SIZE, chunk_size);
|
||||
tids[CFSMAX_SIZE] = -1; /* setting endflag */
|
||||
|
@ -95,10 +95,10 @@ int test_omp_for_schedule_static_3()
|
|||
/**** analysing the data in array tids ****/
|
||||
|
||||
lasttid = tids[0];
|
||||
tmp_count = 0;
|
||||
tmp_count = 0;
|
||||
|
||||
for (i = 0; i < CFSMAX_SIZE + 1; ++i) {
|
||||
/* If the work was done by the same thread
|
||||
/* If the work was done by the same thread
|
||||
increase tmp_count by one. */
|
||||
if (tids[i] == lasttid) {
|
||||
tmp_count++;
|
||||
|
@ -108,9 +108,9 @@ int test_omp_for_schedule_static_3()
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Check if the next thread had has the right thread number.
|
||||
* When finding threadnumber -1 the end should be reached.
|
||||
*/
|
||||
/* Check if the next thread had has the right thread number.
|
||||
* When finding threadnumber -1 the end should be reached.
|
||||
*/
|
||||
if (tids[i] == (lasttid + 1) % threads || tids[i] == -1) {
|
||||
/* checking for the right chunk size */
|
||||
if (tmp_count == chunk_size) {
|
||||
|
@ -148,7 +148,7 @@ int test_omp_for_schedule_static_3()
|
|||
}
|
||||
|
||||
/* Now we check if several loop regions in one parallel region have the
|
||||
* same logical assignement of chunks to threads. We use the nowait
|
||||
* same logical assignement of chunks to threads. We use the nowait
|
||||
* clause to increase the probability to get an error. */
|
||||
|
||||
/* First we allocate some more memmory */
|
||||
|
@ -156,7 +156,7 @@ int test_omp_for_schedule_static_3()
|
|||
tids = (int *) malloc (sizeof (int) * LOOPCOUNT);
|
||||
tids2 = (int *) malloc (sizeof (int) * LOOPCOUNT);
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
{
|
||||
int n;
|
||||
|
|
|
@ -13,8 +13,8 @@ int i;
|
|||
static int ii;
|
||||
#pragma omp threadprivate(ii)
|
||||
|
||||
/*!
|
||||
Utility function: returns true if the passed argument is larger than
|
||||
/*!
|
||||
Utility function: returns true if the passed argument is larger than
|
||||
the argument of the last call of this function.
|
||||
*/
|
||||
static int check_i_islarger2(int i)
|
||||
|
|
|
@ -49,7 +49,7 @@ int test_omp_parallel_for_reduction()
|
|||
if(known_sum!=sum) {
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with integers: Result was %d"
|
||||
" instead of %d\n",sum,known_sum);
|
||||
" instead of %d\n",sum,known_sum);
|
||||
}
|
||||
|
||||
diff = (LOOPCOUNT*(LOOPCOUNT+1))/2;
|
||||
|
@ -75,7 +75,7 @@ int test_omp_parallel_for_reduction()
|
|||
dsum += pow(dt,i);
|
||||
}
|
||||
if( fabs(dsum-dknown_sum) > rounding_error ) {
|
||||
result++;
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with doubles: Result was %f"
|
||||
" instead of %f (Difference: %E)\n",
|
||||
dsum, dknown_sum, dsum-dknown_sum);
|
||||
|
|
|
@ -13,13 +13,6 @@ int test_omp_parallel_sections_firstprivate()
|
|||
|
||||
#pragma omp parallel sections firstprivate(sum0)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
sum= sum+sum0;
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
|
@ -33,11 +26,18 @@ int test_omp_parallel_sections_firstprivate()
|
|||
{
|
||||
sum= sum+sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
sum= sum+sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
known_sum=11*3+7;
|
||||
return (known_sum==sum);
|
||||
return (known_sum==sum);
|
||||
} /* end of check_section_firstprivate*/
|
||||
|
||||
int main()
|
||||
|
|
|
@ -15,7 +15,7 @@ int test_omp_parallel_sections_lastprivate()
|
|||
|
||||
#pragma omp parallel sections private(i,sum0) lastprivate(i0)
|
||||
{
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0=0;
|
||||
for (i=1;i<400;i++) {
|
||||
|
@ -27,7 +27,7 @@ int test_omp_parallel_sections_lastprivate()
|
|||
sum= sum+sum0;
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0=0;
|
||||
for(i=400;i<700;i++) {
|
||||
|
@ -39,7 +39,7 @@ int test_omp_parallel_sections_lastprivate()
|
|||
sum= sum+sum0;
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0=0;
|
||||
for(i=700;i<1000;i++) {
|
||||
|
|
|
@ -14,7 +14,7 @@ int test_omp_parallel_sections_private()
|
|||
|
||||
#pragma omp parallel sections private(sum0, i)
|
||||
{
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0=0;
|
||||
for (i=1;i<400;i++)
|
||||
|
@ -47,7 +47,7 @@ int test_omp_parallel_sections_private()
|
|||
}
|
||||
|
||||
known_sum=(999*1000)/2+7;
|
||||
return (known_sum==sum);
|
||||
return (known_sum==sum);
|
||||
} /* end of check_section_private*/
|
||||
|
||||
int main()
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
int test_omp_parallel_sections_reduction()
|
||||
{
|
||||
int sum;
|
||||
int known_sum;
|
||||
int known_sum;
|
||||
double dpt;
|
||||
double dsum;
|
||||
double dknown_sum;
|
||||
|
@ -123,7 +123,7 @@ int test_omp_parallel_sections_reduction()
|
|||
}
|
||||
}
|
||||
if( fabs(dsum-dknown_sum) > rounding_error ) {
|
||||
result++;
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with doubles: Result was %f"
|
||||
" instead of %f (Difference: %E)\n",
|
||||
dsum, dknown_sum, dsum-dknown_sum);
|
||||
|
@ -168,7 +168,7 @@ int test_omp_parallel_sections_reduction()
|
|||
#pragma omp parallel sections private(i) reduction(*:product)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=1;i<3;i++) {
|
||||
product *= i;
|
||||
}
|
||||
|
@ -319,19 +319,19 @@ int test_omp_parallel_sections_reduction()
|
|||
#pragma omp parallel sections private(i) reduction(&:bit_and)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=0;i<300;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=300;i<700;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=700;i<1000;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
|
@ -438,19 +438,19 @@ int test_omp_parallel_sections_reduction()
|
|||
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=0;i<300;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=300;i<700;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=700;i<1000;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
|
|
|
@ -14,13 +14,6 @@ int test_omp_section_firstprivate()
|
|||
{
|
||||
#pragma omp sections firstprivate(sum0)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
|
@ -34,11 +27,18 @@ int test_omp_section_firstprivate()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
#pragma omp critical
|
||||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
known_sum = 11 * 3 + 7;
|
||||
return (known_sum == sum);
|
||||
return (known_sum == sum);
|
||||
} /* end of check_section_firstprivate*/
|
||||
|
||||
int main()
|
||||
|
|
|
@ -17,7 +17,7 @@ int test_omp_section_lastprivate()
|
|||
{
|
||||
#pragma omp sections lastprivate(i0) private(i,sum0)
|
||||
{
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0 = 0;
|
||||
for (i = 1; i < 400; i++)
|
||||
|
@ -30,7 +30,7 @@ int test_omp_section_lastprivate()
|
|||
sum = sum + sum0;
|
||||
} /*end of critical*/
|
||||
} /* end of section */
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0 = 0;
|
||||
for(i = 400; i < 700; i++)
|
||||
|
@ -43,7 +43,7 @@ int test_omp_section_lastprivate()
|
|||
sum = sum + sum0;
|
||||
} /*end of critical*/
|
||||
}
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0 = 0;
|
||||
for(i = 700; i < 1000; i++)
|
||||
|
@ -57,7 +57,7 @@ int test_omp_section_lastprivate()
|
|||
} /*end of critical*/
|
||||
} /* end of section */
|
||||
} /* end of sections*/
|
||||
} /* end of parallel*/
|
||||
} /* end of parallel*/
|
||||
known_sum = (999 * 1000) / 2;
|
||||
return ((known_sum == sum) && (i0 == 999) );
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ int test_omp_section_private()
|
|||
{
|
||||
#pragma omp sections private(sum0,i)
|
||||
{
|
||||
#pragma omp section
|
||||
#pragma omp section
|
||||
{
|
||||
sum0 = 0;
|
||||
for (i = 1; i < 400; i++)
|
||||
|
@ -25,7 +25,7 @@ int test_omp_section_private()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
sum0 = 0;
|
||||
|
@ -45,11 +45,11 @@ int test_omp_section_private()
|
|||
{
|
||||
sum = sum + sum0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} /*end of sections*/
|
||||
} /* end of parallel */
|
||||
known_sum = (999 * 1000) / 2 + 7;
|
||||
return (known_sum == sum);
|
||||
return (known_sum == sum);
|
||||
} /* end of check_section_private*/
|
||||
|
||||
int main()
|
||||
|
|
|
@ -12,7 +12,7 @@ int test_omp_sections_nowait()
|
|||
result = 0;
|
||||
count = 0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
int rank;
|
||||
rank = omp_get_thread_num ();
|
||||
|
|
|
@ -128,7 +128,7 @@ int test_omp_sections_reduction()
|
|||
}
|
||||
}
|
||||
if( fabs(dsum-dknown_sum) > rounding_error ) {
|
||||
result++;
|
||||
result++;
|
||||
fprintf(stderr,"Error in sum with doubles: Result was %f"
|
||||
" instead of %f (Difference: %E)\n",
|
||||
dsum, dknown_sum, dsum-dknown_sum);
|
||||
|
@ -177,7 +177,7 @@ int test_omp_sections_reduction()
|
|||
#pragma omp sections private(i) reduction(*:product)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=1;i<3;i++) {
|
||||
product *= i;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ int test_omp_sections_reduction()
|
|||
logics[i]=0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(||:logic_or)
|
||||
{
|
||||
|
@ -303,7 +303,7 @@ int test_omp_sections_reduction()
|
|||
logic_or = 0;
|
||||
logics[501]=1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(||:logic_or)
|
||||
{
|
||||
|
@ -336,24 +336,24 @@ int test_omp_sections_reduction()
|
|||
logics[i]=1;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(&:bit_and)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=0;i<300;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=300;i<700;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=700;i<1000;++i) {
|
||||
bit_and = (bit_and & logics[i]);
|
||||
}
|
||||
|
@ -368,7 +368,7 @@ int test_omp_sections_reduction()
|
|||
bit_and = 1;
|
||||
logics[501]=0;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(&:bit_and)
|
||||
{
|
||||
|
@ -401,7 +401,7 @@ int test_omp_sections_reduction()
|
|||
logics[i]=0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(|:bit_or)
|
||||
{
|
||||
|
@ -432,7 +432,7 @@ int test_omp_sections_reduction()
|
|||
bit_or = 0;
|
||||
logics[501]=1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(|:bit_or)
|
||||
{
|
||||
|
@ -465,24 +465,24 @@ int test_omp_sections_reduction()
|
|||
logics[i]=0;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
|
||||
{
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=0;i<300;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=300;i<700;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
}
|
||||
#pragma omp section
|
||||
{
|
||||
{
|
||||
for(i=700;i<1000;++i) {
|
||||
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ int test_omp_sections_reduction()
|
|||
exclusiv_bit_or = 0;
|
||||
logics[501]=1;
|
||||
|
||||
#pragma omp parallel
|
||||
#pragma omp parallel
|
||||
{
|
||||
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
|
||||
{
|
||||
|
|
|
@ -16,11 +16,11 @@ int test_omp_single()
|
|||
#pragma omp parallel private(i)
|
||||
{
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
#pragma omp single
|
||||
{
|
||||
#pragma omp single
|
||||
{
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
nr_iterations++;
|
||||
nr_threads_in_single--;
|
||||
result = result + nr_threads_in_single;
|
||||
|
|
|
@ -19,13 +19,13 @@ int test_omp_single_nowait()
|
|||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
#pragma omp single nowait
|
||||
{
|
||||
#pragma omp atomic
|
||||
#pragma omp atomic
|
||||
nr_iterations++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel private(i)
|
||||
#pragma omp parallel private(i)
|
||||
{
|
||||
my_iterations = 0;
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
|
|
|
@ -25,11 +25,11 @@ int test_omp_single_private()
|
|||
myit = 0;
|
||||
for (i = 0; i < LOOPCOUNT; i++) {
|
||||
#pragma omp single private(nr_threads_in_single) nowait
|
||||
{
|
||||
{
|
||||
nr_threads_in_single = 0;
|
||||
#pragma omp flush
|
||||
nr_threads_in_single++;
|
||||
#pragma omp flush
|
||||
#pragma omp flush
|
||||
myit++;
|
||||
myresult = myresult + nr_threads_in_single;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ int test_omp_single_private()
|
|||
}
|
||||
}
|
||||
return ((result == 0) && (nr_iterations == LOOPCOUNT));
|
||||
} /* end of check_single private */
|
||||
} /* end of check_single private */
|
||||
|
||||
int main()
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue