filename
stringlengths 78
241
| omp_pragma_line
stringlengths 24
416
| context_chars
int64 100
100
| text
stringlengths 152
177k
|
|---|---|---|---|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/raster3d/r3.gradient/main.c
|
#pragma omp parallel for schedule
| 100
|
== max_i - 1) {
/* compute gradient */
/* disabled openMP <LOOP-START>* (static) private (k) */
for (k = 0; k <= j; k++) {
Rast3d_gradient_double(&(blocks[k].input), step,
&(blocks[k].dx), &(blocks[k].dy),
&(blocks[k].dz));
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/raster/r.proj/main.c
|
#pragma omp parallel for schedule(static)
| 100
|
does not always work,
* segfaults in the interpolation functions
* can happen */
<LOOP-START>for (col = 0; col < outcellhd.cols; col++) {
void *obufptr =
(void *)((const unsigned char *)obuffer + col * cell_size);
double xcoord1 = xcoord2 + (col)*outcellhd.ew_res;
double ycoord1 = ycoord2;
/* project coordinates in output matrix to */
/* coordinates in input matrix */
if (GPJ_transform(&oproj, &iproj, &tproj, PJ_FWD, &xcoord1,
&ycoord1, NULL) < 0) {
G_fatal_error(_("Error in %s"), "GPJ_transform()");
Rast_set_null_value(obufptr, 1, cell_type);
}
else {
/* convert to row/column indices of input matrix */
/* column index in input matrix */
double col_idx = (xcoord1 - incellhd.west) / incellhd.ew_res;
/* row index in input matrix */
double row_idx = (incellhd.north - ycoord1) / incellhd.ns_res;
/* and resample data point */
interpolate(ibuffer, obufptr, cell_type, col_idx, row_idx,
&incellhd);
}
/* obufptr = G_incr_void_ptr(obufptr, cell_size); */
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
G_math_d_x_dot_y(double *x, double *y, double *value, int rows)
{
int i;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * y[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
*/
void G_math_d_euclid_norm(double *x, double *value, int rows)
{
int i;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * x[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
void G_math_d_asum_norm(double *x, double *value, int rows)
{
int i = 0;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += fabs(x[i]);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
void G_math_f_x_dot_y(float *x, float *y, float *value, int rows)
{
int i;
float s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * y[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
* */
void G_math_f_euclid_norm(float *x, float *value, int rows)
{
int i;
float s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * x[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) private(i) reduction(+ : s)
| 100
|
*
* */
void G_math_f_asum_norm(float *x, float *value, int rows)
{
int i;
float s = 0.0;
<LOOP-START>for (i = 0; i < rows; i++) {
s += fabs(x[i]);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
/
void G_math_i_x_dot_y(int *x, int *y, double *value, int rows)
{
int i;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * y[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
* */
void G_math_i_euclid_norm(int *x, double *value, int rows)
{
int i;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += x[i] * x[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/blas_level_1.c
|
#pragma omp parallel for schedule(static) reduction(+ : s)
| 100
|
*
* */
void G_math_i_asum_norm(int *x, double *value, int rows)
{
int i;
double s = 0.0;
<LOOP-START>for (i = rows - 1; i >= 0; i--) {
s += (double)abs(x[i]);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c
|
#pragma omp parallel for schedule(static) private(i, j, tmpval) \
| 100
|
le *b, int rows)
{
int i, j, k;
double tmpval = 0.0;
for (k = 0; k < rows - 1; k++) {
<LOOP-START>shared(k, A, b, rows)
for (i = k + 1; i < rows; i++) {
tmpval = A[i][k] / A[k][k];
b[i] = b[i] - tmpval * b[k];
for (j = k + 1; j < rows; j++) {
A[i][j] = A[i][j] - tmpval * A[k][j];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, tmpval) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c
|
#pragma omp parallel for schedule(static) private(i, j) shared(k, A, rows)
| 100
|
n(double **A, double *b UNUSED, int rows)
{
int i, j, k;
for (k = 0; k < rows - 1; k++) {
<LOOP-START>for (i = k + 1; i < rows; i++) {
A[i][k] = A[i][k] / A[k][k];
for (j = k + 1; j < rows; j++) {
A[i][j] = A[i][j] - A[i][k] * A[k][j];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j) shared(k, A, rows)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c
|
#pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \
| 100
|
ndwidth <= 0)
bandwidth = rows;
colsize = bandwidth;
for (k = 0; k < rows; k++) {
<LOOP-START>reduction(+ : sum_1)
for (j = 0; j < k; j++) {
sum_1 += A[k][j] * A[k][j];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c
|
#pragma omp parallel for schedule(static) private(i, j, sum_2) \
| 100
|
colsize = rows;
}
else {
colsize = k + bandwidth;
}
<LOOP-START>shared(A, k, sum_1, colsize)
for (i = k + 1; i < colsize; i++) {
sum_2 = 0.0;
for (j = 0; j < k; j++) {
sum_2 += A[i][j] * A[k][j];
}
A[i][k] = (A[i][k] - sum_2) / A[k][k];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, sum_2) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct.c
|
#pragma omp parallel for schedule(static) private(i, k) shared(A, rows)
| 100
|
A[k][k];
}
}
/* we need to copy the lower triangle matrix to the upper triangle */
<LOOP-START>for (k = 0; k < rows; k++) {
for (i = k + 1; i < rows; i++) {
A[k][i] = A[i][k];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, k) shared(A, rows)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_direct_cholesky_band.c
|
#pragma omp parallel for schedule(static) private(j, k, end, sum) \
| 100
|
G_fatal_error(_("Decomposition failed at row %i and col %i"), i, 0);
T[i][0] = sqrt(sum);
<LOOP-START>shared(A, T, i, bandwidth)
for (j = 1; j < bandwidth; j++) {
sum = A[i][j];
end = ((bandwidth - j) < (i + 1) ? (bandwidth - j) : (i + 1));
for (k = 1; k < end; k++)
sum -= T[i - k][k] * T[i - k][j + k];
T[i][j] = sum / T[i][0];
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(j, k, end, sum) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/sparse_matrix.c
|
#pragma omp parallel for schedule(static) private(i, j)
| 100
|
ows)
{
int i;
unsigned int j;
double **A = NULL;
A = G_alloc_matrix(rows, rows);
<LOOP-START>for (i = 0; i < rows; i++) {
for (j = 0; j < Asp[i]->cols; j++) {
A[i][Asp[i]->index[j]] = Asp[i]->values[j];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/sparse_matrix.c
|
#pragma omp parallel for schedule(static) private(i, j, nonull, count)
| 100
|
int nonull, count = 0;
G_math_spvector **Asp = NULL;
Asp = G_math_alloc_spmatrix(rows);
<LOOP-START>for (i = 0; i < rows; i++) {
nonull = 0;
/*Count the number of non zero entries */
for (j = 0; j < rows; j++) {
if (A[i][j] > epsilon)
nonull++;
}
/*Allocate the sparse vector and insert values */
G_math_spvector *v = G_math_alloc_spvector(nonull);
count = 0;
for (j = 0; j < rows; j++) {
if (A[i][j] > epsilon) {
v->index[count] = j;
v->values[count] = A[i][j];
count++;
}
}
/*Add vector to sparse matrix */
G_math_add_spvector(Asp, v, i);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, nonull, count)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/lu.c
|
#pragma omp parallel for private(i, j, big, temp) shared(n, a, vv,
| 100
|
ector(n);
*d = 1.0;
/* this pragma works, but doesn't really help speed things up */
/* <LOOP-START>* is_singular) */
for (i = 0; i < n; i++) {
big = 0.0;
for (j = 0; j < n; j++)
if ((temp = fabs(a[i][j])) > big)
big = temp;
if (big == 0.0) {
is_singular = TRUE;
break;
}
vv[i] = 1.0 / big;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, big, temp) shared(n, a, vv,<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/lu.c
|
#pragma omp parallel for private(i, k, sum, dum) shared(j, n, a, vv, big, imax)
| 100
|
}
big = 0.0;
/* not very efficient, but this pragma helps speed things up a bit */
<LOOP-START>for (i = j; i < n; i++) {
sum = a[i][j];
for (k = 0; k < j; k++)
sum -= a[i][k] * a[k][j];
a[i][j] = sum;
if ((dum = vv[i] * fabs(sum)) >= big) {
big = dum;
imax = i;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, k, sum, dum) shared(j, n, a, vv, big, imax)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_krylov.c
|
#pragma omp parallel for schedule(static) private(i, j, sum) \
| 100
|
double sum;
assert(rows >= 0);
Msp = G_math_alloc_spmatrix(rows);
if (A != NULL) {
<LOOP-START>shared(A, Msp, rows, cols, prec)
for (i = 0; i < (unsigned int)rows; i++) {
G_math_spvector *spvect = G_math_alloc_spvector(1);
switch (prec) {
case G_MATH_ROWSCALE_EUKLIDNORM_PRECONDITION:
sum = 0;
for (j = 0; j < cols; j++)
sum += A[i][j] * A[i][j];
spvect->values[0] = 1.0 / sqrt(sum);
break;
case G_MATH_ROWSCALE_ABSSUMNORM_PRECONDITION:
sum = 0;
for (j = 0; j < cols; j++)
sum += fabs(A[i][j]);
spvect->values[0] = 1.0 / (sum);
break;
case G_MATH_DIAGONAL_PRECONDITION:
default:
spvect->values[0] = 1.0 / A[i][i];
break;
}
spvect->index[0] = i;
spvect->cols = 1;
;
G_math_add_spvector(Msp, spvect, i);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, sum) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gmath/solvers_krylov.c
|
#pragma omp parallel for schedule(static) private(i, j, sum) \
| 100
|
cols = 1;
;
G_math_add_spvector(Msp, spvect, i);
}
}
else {
<LOOP-START>shared(Asp, Msp, rows, cols, prec)
for (i = 0; i < (unsigned int)rows; i++) {
G_math_spvector *spvect = G_math_alloc_spvector(1);
switch (prec) {
case G_MATH_ROWSCALE_EUKLIDNORM_PRECONDITION:
sum = 0;
for (j = 0; j < Asp[i]->cols; j++)
sum += Asp[i]->values[j] * Asp[i]->values[j];
spvect->values[0] = 1.0 / sqrt(sum);
break;
case G_MATH_ROWSCALE_ABSSUMNORM_PRECONDITION:
sum = 0;
for (j = 0; j < Asp[i]->cols; j++)
sum += fabs(Asp[i]->values[j]);
spvect->values[0] = 1.0 / (sum);
break;
case G_MATH_DIAGONAL_PRECONDITION:
default:
for (j = 0; j < Asp[i]->cols; j++)
if (i == Asp[i]->index[j])
spvect->values[0] = 1.0 / Asp[i]->values[j];
break;
}
spvect->index[0] = i;
spvect->cols = 1;
;
G_math_add_spvector(Msp, spvect, i);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, sum) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/n_les_assemble.c
|
#pragma omp parallel for private(i, j, pos, count) schedule(static)
| 100
|
_assemble_les_2d: starting the parallel assemble loop");
/* Assemble the matrix in parallel */
<LOOP-START>for (count = 0; count < cell_type_count; count++) {
i = index_ij[count][0];
j = index_ij[count][1];
/*create the entries for the */
N_data_star *items = call->callback(data, geom, i, j);
/* we need a sparse vector pointer anytime */
G_math_spvector *spvect = NULL;
/*allocate a sprase vector */
if (les_type == N_SPARSE_LES) {
spvect = G_math_alloc_spvector(items->count);
}
/* initial conditions */
les->x[count] = N_get_array_2d_d_value(start_val, i, j);
/* the entry in the vector b */
les->b[count] = items->V;
/* pos describes the position in the sparse vector.
* the first entry is always the diagonal entry of the matrix*/
pos = 0;
if (les_type == N_SPARSE_LES) {
spvect->index[pos] = count;
spvect->values[pos] = items->C;
}
else {
les->A[count][count] = items->C;
}
/* western neighbour, entry is col - 1 */
if (i > 0) {
pos = make_les_entry_2d(i, j, -1, 0, count, pos, les, spvect,
cell_count, status, start_val, items->W,
cell_type);
}
/* eastern neighbour, entry col + 1 */
if (i < geom->cols - 1) {
pos = make_les_entry_2d(i, j, 1, 0, count, pos, les, spvect,
cell_count, status, start_val, items->E,
cell_type);
}
/* northern neighbour, entry row - 1 */
if (j > 0) {
pos = make_les_entry_2d(i, j, 0, -1, count, pos, les, spvect,
cell_count, status, start_val, items->N,
cell_type);
}
/* southern neighbour, entry row + 1 */
if (j < geom->rows - 1) {
pos = make_les_entry_2d(i, j, 0, 1, count, pos, les, spvect,
cell_count, status, start_val, items->S,
cell_type);
}
/*in case of a nine point star, we have additional entries */
if (items->type == N_9_POINT_STAR) {
/* north-western neighbour, entry is col - 1 row - 1 */
if (i > 0 && j > 0) {
pos = make_les_entry_2d(i, j, -1, -1, count, pos, les, spvect,
cell_count, status, start_val,
items->NW, cell_type);
}
/* north-eastern neighbour, entry col + 1 row - 1 */
if (i < geom->cols - 1 && j > 0) {
pos = make_les_entry_2d(i, j, 1, -1, count, pos, les, spvect,
cell_count, status, start_val,
items->NE, cell_type);
}
/* south-western neighbour, entry is col - 1 row + 1 */
if (i > 0 && j < geom->rows - 1) {
pos = make_les_entry_2d(i, j, -1, 1, count, pos, les, spvect,
cell_count, status, start_val,
items->SW, cell_type);
}
/* south-eastern neighbour, entry col + 1 row + 1 */
if (i < geom->cols - 1 && j < geom->rows - 1) {
pos = make_les_entry_2d(i, j, 1, 1, count, pos, les, spvect,
cell_count, status, start_val,
items->SE, cell_type);
}
}
/*How many entries in the les */
if (les->type == N_SPARSE_LES) {
spvect->cols = pos + 1;
G_math_add_spvector(les->Asp, spvect, count);
}
if (items)
G_free(items);
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, pos, count) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/n_les_assemble.c
|
#pragma omp parallel for private(i, j, k, pos, count) schedule(static)
| 100
|
}
}
}
G_debug(2, "N_assemble_les_3d: starting the parallel assemble loop");
<LOOP-START>for (count = 0; count < cell_type_count; count++) {
i = index_ij[count][0];
j = index_ij[count][1];
k = index_ij[count][2];
/*create the entries for the */
N_data_star *items = call->callback(data, geom, i, j, k);
G_math_spvector *spvect = NULL;
/*allocate a sprase vector */
if (les_type == N_SPARSE_LES)
spvect = G_math_alloc_spvector(items->count);
/* initial conditions */
les->x[count] = N_get_array_3d_d_value(start_val, i, j, k);
/* the entry in the vector b */
les->b[count] = items->V;
/* pos describes the position in the sparse vector.
* the first entry is always the diagonal entry of the matrix*/
pos = 0;
if (les_type == N_SPARSE_LES) {
spvect->index[pos] = count;
spvect->values[pos] = items->C;
}
else {
les->A[count][count] = items->C;
}
/* western neighbour, entry is col - 1 */
if (i > 0) {
pos = make_les_entry_3d(i, j, k, -1, 0, 0, count, pos, les, spvect,
cell_count, status, start_val, items->W,
cell_type);
}
/* eastern neighbour, entry col + 1 */
if (i < geom->cols - 1) {
pos = make_les_entry_3d(i, j, k, 1, 0, 0, count, pos, les, spvect,
cell_count, status, start_val, items->E,
cell_type);
}
/* northern neighbour, entry row -1 */
if (j > 0) {
pos = make_les_entry_3d(i, j, k, 0, -1, 0, count, pos, les, spvect,
cell_count, status, start_val, items->N,
cell_type);
}
/* southern neighbour, entry row +1 */
if (j < geom->rows - 1) {
pos = make_les_entry_3d(i, j, k, 0, 1, 0, count, pos, les, spvect,
cell_count, status, start_val, items->S,
cell_type);
}
/*only for a 7 star entry needed */
if (items->type == N_7_POINT_STAR || items->type == N_27_POINT_STAR) {
/* the upper cell (top), entry depth + 1 */
if (k < geom->depths - 1) {
pos = make_les_entry_3d(i, j, k, 0, 0, 1, count, pos, les,
spvect, cell_count, status, start_val,
items->T, cell_type);
}
/* the lower cell (bottom), entry depth - 1 */
if (k > 0) {
pos = make_les_entry_3d(i, j, k, 0, 0, -1, count, pos, les,
spvect, cell_count, status, start_val,
items->B, cell_type);
}
}
/*How many entries in the les */
if (les->type == N_SPARSE_LES) {
spvect->cols = pos + 1;
G_math_add_spvector(les->Asp, spvect, count);
}
if (items)
G_free(items);
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k, pos, count) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
*data;
int i, j;
data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
N_put_array_2d_c_value(data, i, j, 1);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
data;
int i, j;
data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
N_put_array_2d_d_value(data, i, j, (double)i * j);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,
1, FCELL_TYPE);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++) {
for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
N_put_array_3d_f_value(data, i, j, k, 1.0);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gradient.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,
1, DCELL_TYPE);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++)
for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
N_put_array_3d_f_value(data, i, j, k, (float)i * j * k);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gwflow.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
OLS_LOCAL, TEST_N_NUM_ROWS_LOCAL,
TEST_N_NUM_DEPTHS_LOCAL, 1, 1);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++)
for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {
for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {
if (j == 0) {
N_put_array_3d_d_value(data->phead, i, j, k, 50);
N_put_array_3d_d_value(data->phead_start, i, j, k, 50);
N_put_array_3d_d_value(data->status, i, j, k, 2);
}
else {
N_put_array_3d_d_value(data->phead, i, j, k, 40);
N_put_array_3d_d_value(data->phead_start, i, j, k, 40);
N_put_array_3d_d_value(data->status, i, j, k, 1);
}
N_put_array_3d_d_value(data->hc_x, i, j, k, 0.0001);
N_put_array_3d_d_value(data->hc_y, i, j, k, 0.0001);
N_put_array_3d_d_value(data->hc_z, i, j, k, 0.0001);
N_put_array_3d_d_value(data->q, i, j, k, 0.0);
N_put_array_3d_d_value(data->s, i, j, k, 0.001);
N_put_array_2d_d_value(data->r, i, j, 0.0);
N_put_array_3d_d_value(data->nf, i, j, k, 0.1);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_gwflow.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
wflow_data2d(TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL,
1, 1);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {
for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {
if (j == 0) {
N_put_array_2d_d_value(data->phead, i, j, 50);
N_put_array_2d_d_value(data->phead_start, i, j, 50);
N_put_array_2d_d_value(data->status, i, j, 2);
}
else {
N_put_array_2d_d_value(data->phead, i, j, 40);
N_put_array_2d_d_value(data->phead_start, i, j, 40);
N_put_array_2d_d_value(data->status, i, j, 1);
}
N_put_array_2d_d_value(data->hc_x, i, j, 30.0001);
N_put_array_2d_d_value(data->hc_y, i, j, 30.0001);
N_put_array_2d_d_value(data->q, i, j, 0.0);
N_put_array_2d_d_value(data->s, i, j, 0.001);
N_put_array_2d_d_value(data->r, i, j, 0.0);
N_put_array_2d_d_value(data->nf, i, j, 0.1);
N_put_array_2d_d_value(data->top, i, j, 20.0);
N_put_array_2d_d_value(data->bottom, i, j, 0.0);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j) shared(cols, rows, type, a) \
| 100
|
int i, j, res = 0;
rows = a->rows;
cols = a->cols;
type = N_get_array_2d_type(a);
<LOOP-START>reduction(+ : res)
for (j = 0; j < rows; j++) {
for (i = 0; i < cols; i++) {
if (type == CELL_TYPE) {
N_put_array_2d_c_value(a, i, j, (CELL)i * (CELL)j);
if (N_get_array_2d_c_value(a, i, j) != (CELL)i * (CELL)j)
res++;
}
if (type == FCELL_TYPE) {
N_put_array_2d_f_value(a, i, j, (FCELL)i * (FCELL)j);
if (N_get_array_2d_f_value(a, i, j) != (FCELL)i * (FCELL)j)
res++;
}
if (type == DCELL_TYPE) {
N_put_array_2d_d_value(a, i, j, (DCELL)i * (DCELL)j);
if (N_get_array_2d_d_value(a, i, j) != (DCELL)i * (DCELL)j)
res++;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(cols, rows, type, a) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j) shared(rows, cols, a) reduction(+ : res)
| 100
|
array_2d *a)
{
int rows, cols;
int i, j, res = 0;
cols = a->cols;
rows = a->rows;
<LOOP-START>for (j = 0; j < rows; j++) {
for (i = 0; i < cols; i++) {
N_put_array_2d_value_null(a, i, j);
if (!N_is_array_2d_value_null(a, i, j))
res++;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(rows, cols, a) reduction(+ : res)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j) shared(cols, rows, type, a, b) \
| 100
|
int i, j, res = 0;
cols = a->cols;
rows = a->rows;
type = N_get_array_2d_type(a);
<LOOP-START>reduction(+ : res)
for (j = 0; j < rows; j++) {
for (i = 0; i < cols; i++) {
if (type == CELL_TYPE) {
if (N_get_array_2d_c_value(a, i, j) !=
N_get_array_2d_c_value(b, i, j))
res++;
}
if (type == FCELL_TYPE) {
if (N_get_array_2d_f_value(a, i, j) !=
N_get_array_2d_f_value(b, i, j))
res++;
}
if (type == DCELL_TYPE) {
if (N_get_array_2d_d_value(a, i, j) !=
N_get_array_2d_d_value(b, i, j))
res++;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(cols, rows, type, a, b) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j, k) shared(depths, rows, cols, type, a) \
| 100
|
cols = a->cols;
rows = a->rows;
depths = a->depths;
type = N_get_array_3d_type(a);
<LOOP-START>reduction(+ : res)
for (k = 0; k < depths; k++) {
for (j = 0; j < rows; j++) {
for (i = 0; i < cols; i++) {
if (type == FCELL_TYPE) {
N_put_array_3d_f_value(a, i, j, k,
(float)i * (float)j * (float)k);
if (N_get_array_3d_f_value(a, i, j, k) !=
(float)i * (float)j * (float)k)
res++;
}
if (type == DCELL_TYPE) {
N_put_array_3d_d_value(a, i, j, k,
(double)i * (double)j * (double)k);
if (N_get_array_3d_d_value(a, i, j, k) !=
(double)i * (double)j * (double)k)
res++;
}
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(depths, rows, cols, type, a) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j, k) shared(cols, rows, depths, type, a) \
| 100
|
cols = a->cols;
rows = a->rows;
depths = a->depths;
type = N_get_array_3d_type(a);
<LOOP-START>reduction(+ : res)
for (k = 0; k < depths; k++) {
for (j = 0; j < rows; j++) {
for (i = 0; i < cols; i++) {
N_put_array_3d_value_null(a, i, j, k);
if (!N_is_array_3d_value_null(a, i, j, k))
res++;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(cols, rows, depths, type, a) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_arrays.c
|
#pragma omp parallel for private(i, j, k) \
| 100
|
rows = a->rows;
cols = a->cols;
depths = a->depths;
type = N_get_array_3d_type(a);
<LOOP-START>shared(depths, rows, cols, type, a, b) reduction(+ : res)
for (k = 0; k < depths; k++) {
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
if (type == FCELL_TYPE) {
if (N_get_array_3d_f_value(a, i, j, k) !=
N_get_array_3d_f_value(b, i, j, k))
res++;
}
if (type == DCELL_TYPE) {
if (N_get_array_3d_d_value(a, i, j, k) !=
N_get_array_3d_d_value(b, i, j, k))
res++;
}
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_solute_transport.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
_transport_data3d(
TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL, TEST_N_NUM_DEPTHS_LOCAL);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++)
for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {
for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {
if (j == 0) {
N_put_array_3d_d_value(data->c, i, j, k, 1);
N_put_array_3d_d_value(data->c_start, i, j, k, 1);
N_put_array_3d_d_value(data->status, i, j, k, 3);
}
else {
N_put_array_3d_d_value(data->c, i, j, k, 0);
N_put_array_3d_d_value(data->c_start, i, j, k, 0);
N_put_array_3d_d_value(data->status, i, j, k, 1);
}
N_put_array_3d_d_value(data->diff_x, i, j, k, 0.000001);
N_put_array_3d_d_value(data->diff_y, i, j, k, 0.000001);
N_put_array_3d_d_value(data->diff_z, i, j, k, 0.000001);
N_put_array_3d_d_value(data->q, i, j, k, 0.0);
N_put_array_3d_d_value(data->cs, i, j, k, 0.0);
N_put_array_3d_d_value(data->R, i, j, k, 1.0);
N_put_array_3d_d_value(data->nf, i, j, k, 0.1);
if (j == 1 && i == 1 && k == 1)
N_put_array_3d_d_value(data->cs, i, j, k, 5.0);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_solute_transport.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
t_data2d(TEST_N_NUM_COLS_LOCAL,
TEST_N_NUM_ROWS_LOCAL);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) {
for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) {
if (j == 0) {
N_put_array_2d_d_value(data->c, i, j, 0);
N_put_array_2d_d_value(data->c_start, i, j, 0);
N_put_array_2d_d_value(data->status, i, j, 2);
}
else {
N_put_array_2d_d_value(data->c, i, j, 0);
N_put_array_2d_d_value(data->c_start, i, j, 0);
N_put_array_2d_d_value(data->status, i, j, 1);
}
N_put_array_2d_d_value(data->diff_x, i, j, 0.000001);
N_put_array_2d_d_value(data->diff_y, i, j, 0.000001);
N_put_array_2d_d_value(data->cs, i, j, 0.0);
N_put_array_2d_d_value(data->R, i, j, 1.0);
N_put_array_2d_d_value(data->q, i, j, 0.0);
N_put_array_2d_d_value(data->nf, i, j, 0.1);
N_put_array_2d_d_value(data->top, i, j, 20.0);
N_put_array_2d_d_value(data->bottom, i, j, 0.0);
if (j == 1 && i == 1)
N_put_array_2d_d_value(data->cs, i, j, 1.0);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
*data;
int i, j;
data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
if (j == 1) {
N_put_array_2d_c_value(data, i, j, 2);
}
else {
N_put_array_2d_c_value(data, i, j, 1);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c
|
#pragma omp parallel for private(i, j) shared(data)
| 100
|
data;
int i, j;
data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE);
<LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
if (j == 1) {
N_put_array_2d_d_value(data, i, j, 50);
}
else {
N_put_array_2d_d_value(data, i, j, 1);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,
1, FCELL_TYPE);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++)
for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
if (i == 0 && j == 1) {
N_put_array_3d_f_value(data, i, j, k, 2.0);
}
else {
N_put_array_3d_f_value(data, i, j, k, 1.0);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_assemble.c
|
#pragma omp parallel for private(i, j, k) shared(data)
| 100
|
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS,
1, DCELL_TYPE);
<LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++)
for (j = 0; j < TEST_N_NUM_ROWS; j++) {
for (i = 0; i < TEST_N_NUM_COLS; i++) {
if (i == 0 && j == 1) {
N_put_array_3d_f_value(data, i, j, k, 50);
}
else {
N_put_array_3d_f_value(data, i, j, k, 1);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j, k) shared(data)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_les.c
|
#pragma omp parallel for private(i, j) shared(les)
| 100
|
lloc_les(TEST_N_NUM_ROWS, N_SPARSE_LES);
G_message("\t * testing les creation in parallel\n");
<LOOP-START>for (i = 0; i < TEST_N_NUM_ROWS; i++) {
for (j = 0; j < TEST_N_NUM_ROWS; j++) {
if (i != j)
les->A[i][j] = 2e-2;
les->A[i][i] = -1e2 - i;
}
les->x[i] = 273.15 + i;
les->b[i] = 1e2 - i;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(les)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OSGeo/grass/lib/gpde/test/test_les.c
|
#pragma omp parallel for private(i, j) shared(sples, spvector)
| 100
|
->A[i][i] = -1e2 - i;
}
les->x[i] = 273.15 + i;
les->b[i] = 1e2 - i;
}
<LOOP-START>for (i = 0; i < TEST_N_NUM_ROWS; i++) {
spvector = G_math_alloc_spvector(TEST_N_NUM_ROWS);
for (j = 0; j < TEST_N_NUM_ROWS; j++)
if (i != j)
spvector->index[j] = 2e-2;
spvector->index[0] = i;
spvector->values[0] = -1e2 - i;
G_math_add_spvector(sples->Asp, spvector, i);
sples->x[i] = 273.15 + i;
sples->b[i] = 1e2 - i;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i, j) shared(sples, spvector)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c
|
#pragma omp parallel for shared(layer)
| 100
|
tf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
}
#pragma omp barrier
<LOOP-START>for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
#pragma omp parallel for shared(layer_copy)
for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
}/* fin del pragma omp */
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
/* 4.1. Suma energia de impactos */
/* Para cada particula */
for( j=0; j<storms[i].size; j++ ) {
/* Energia de impacto (en milesimas) */
float energia = (float)storms[i].posval[j*2+1] / 1000;
/* Posicion de impacto */
int posicion = storms[i].posval[j*2];
/* Para cada posicion de la capa */
for( k=0; k<layer_size; k++ ) {
/* Actualizar posicion */
actualiza( layer, k, posicion, energia );
}
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
//#pragma omp parallel for shared(layer, layer_copy)
for( k=0; k<layer_size; k++ )
layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)
for( k=1; k<layer_size-1; k++ ){
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
}
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(layer)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c
|
#pragma omp parallel for shared(layer_copy)
| 100
|
omp barrier
#pragma omp parallel for shared(layer)
for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
<LOOP-START>for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
}/* fin del pragma omp */
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
/* 4.1. Suma energia de impactos */
/* Para cada particula */
for( j=0; j<storms[i].size; j++ ) {
/* Energia de impacto (en milesimas) */
float energia = (float)storms[i].posval[j*2+1] / 1000;
/* Posicion de impacto */
int posicion = storms[i].posval[j*2];
/* Para cada posicion de la capa */
for( k=0; k<layer_size; k++ ) {
/* Actualizar posicion */
actualiza( layer, k, posicion, energia );
}
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
//#pragma omp parallel for shared(layer, layer_copy)
for( k=0; k<layer_size; k++ )
layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)
for( k=1; k<layer_size-1; k++ ){
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
}
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(layer_copy)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c
|
#pragma omp parallel for shared(layer, layer_copy)
| 100
|
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
//<LOOP-START>for( k=0; k<layer_size; k++ )
layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)
for( k=1; k<layer_size-1; k++ ){
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(layer, layer_copy)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bayons/OpenMP/Practica/energy_v2.c
|
#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)
| 100
|
layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
//<LOOP-START>for( k=1; k<layer_size-1; k++ ){
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
Index_t numElem)
{
//
// pull in the stresses appropriate to the hydro integration
//
<LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i){
sigxx[i] = sigyy[i] = sigzz[i] = - domain.p(i) - domain.q(i) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
_t>(numElem8) ;
fz_elem = Allocate<Real_t>(numElem8) ;
}
// loop over all elements
<LOOP-START>for( Index_t k=0 ; k<numElem ; ++k )
{
const Index_t* const elemToNode = domain.nodelist(k);
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
// get nodal coordinates from global arrays and copy into local arrays.
CollectDomainNodesToElemNodes(domain, elemToNode, x_local, y_local, z_local);
// Volume calculation involves extra work for numerical consistency
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
if (numthreads > 1) {
// Eliminate thread writing conflicts at the nodes by giving
// each element its own copy to write to
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
&fx_elem[k*8],
&fy_elem[k*8],
&fz_elem[k*8] ) ;
}
else {
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
fx_local, fy_local, fz_local ) ;
// copy nodal force contributions to global force arrray.
for( Index_t lnode=0 ; lnode<8 ; ++lnode ) {
Index_t gnode = elemToNode[lnode];
domain.fx(gnode) += fx_local[lnode];
domain.fy(gnode) += fy_local[lnode];
domain.fz(gnode) += fz_local[lnode];
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
need to copy the data out of the temporary
// arrays used above into the final forces field
<LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = domain.nodeElemCount(gnode) ;
Index_t *cornerList = domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
domain.fx(gnode) = fx_tmp ;
domain.fy(gnode) = fy_tmp ;
domain.fz(gnode) = fz_tmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem, hourg)
| 100
|
/*************************************************/
/* compute the hourglass modes */
<LOOP-START>for(Index_t i2=0;i2<numElem;++i2){
Real_t *fx_local, *fy_local, *fz_local ;
Real_t hgfx[8], hgfy[8], hgfz[8] ;
Real_t coefficient;
Real_t hourgam[8][4];
Real_t xd1[8], yd1[8], zd1[8] ;
const Index_t *elemToNode = domain.nodelist(i2);
Index_t i3=8*i2;
Real_t volinv=Real_t(1.0)/determ[i2];
Real_t ss1, mass1, volume13 ;
for(Index_t i1=0;i1<4;++i1){
Real_t hourmodx =
x8n[i3] * gamma[i1][0] + x8n[i3+1] * gamma[i1][1] +
x8n[i3+2] * gamma[i1][2] + x8n[i3+3] * gamma[i1][3] +
x8n[i3+4] * gamma[i1][4] + x8n[i3+5] * gamma[i1][5] +
x8n[i3+6] * gamma[i1][6] + x8n[i3+7] * gamma[i1][7];
Real_t hourmody =
y8n[i3] * gamma[i1][0] + y8n[i3+1] * gamma[i1][1] +
y8n[i3+2] * gamma[i1][2] + y8n[i3+3] * gamma[i1][3] +
y8n[i3+4] * gamma[i1][4] + y8n[i3+5] * gamma[i1][5] +
y8n[i3+6] * gamma[i1][6] + y8n[i3+7] * gamma[i1][7];
Real_t hourmodz =
z8n[i3] * gamma[i1][0] + z8n[i3+1] * gamma[i1][1] +
z8n[i3+2] * gamma[i1][2] + z8n[i3+3] * gamma[i1][3] +
z8n[i3+4] * gamma[i1][4] + z8n[i3+5] * gamma[i1][5] +
z8n[i3+6] * gamma[i1][6] + z8n[i3+7] * gamma[i1][7];
hourgam[0][i1] = gamma[i1][0] - volinv*(dvdx[i3 ] * hourmodx +
dvdy[i3 ] * hourmody +
dvdz[i3 ] * hourmodz );
hourgam[1][i1] = gamma[i1][1] - volinv*(dvdx[i3+1] * hourmodx +
dvdy[i3+1] * hourmody +
dvdz[i3+1] * hourmodz );
hourgam[2][i1] = gamma[i1][2] - volinv*(dvdx[i3+2] * hourmodx +
dvdy[i3+2] * hourmody +
dvdz[i3+2] * hourmodz );
hourgam[3][i1] = gamma[i1][3] - volinv*(dvdx[i3+3] * hourmodx +
dvdy[i3+3] * hourmody +
dvdz[i3+3] * hourmodz );
hourgam[4][i1] = gamma[i1][4] - volinv*(dvdx[i3+4] * hourmodx +
dvdy[i3+4] * hourmody +
dvdz[i3+4] * hourmodz );
hourgam[5][i1] = gamma[i1][5] - volinv*(dvdx[i3+5] * hourmodx +
dvdy[i3+5] * hourmody +
dvdz[i3+5] * hourmodz );
hourgam[6][i1] = gamma[i1][6] - volinv*(dvdx[i3+6] * hourmodx +
dvdy[i3+6] * hourmody +
dvdz[i3+6] * hourmodz );
hourgam[7][i1] = gamma[i1][7] - volinv*(dvdx[i3+7] * hourmodx +
dvdy[i3+7] * hourmody +
dvdz[i3+7] * hourmodz );
}
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1=domain.ss(i2);
mass1=domain.elemMass(i2);
volume13=CBRT(determ[i2]);
Index_t n0si2 = elemToNode[0];
Index_t n1si2 = elemToNode[1];
Index_t n2si2 = elemToNode[2];
Index_t n3si2 = elemToNode[3];
Index_t n4si2 = elemToNode[4];
Index_t n5si2 = elemToNode[5];
Index_t n6si2 = elemToNode[6];
Index_t n7si2 = elemToNode[7];
xd1[0] = domain.xd(n0si2);
xd1[1] = domain.xd(n1si2);
xd1[2] = domain.xd(n2si2);
xd1[3] = domain.xd(n3si2);
xd1[4] = domain.xd(n4si2);
xd1[5] = domain.xd(n5si2);
xd1[6] = domain.xd(n6si2);
xd1[7] = domain.xd(n7si2);
yd1[0] = domain.yd(n0si2);
yd1[1] = domain.yd(n1si2);
yd1[2] = domain.yd(n2si2);
yd1[3] = domain.yd(n3si2);
yd1[4] = domain.yd(n4si2);
yd1[5] = domain.yd(n5si2);
yd1[6] = domain.yd(n6si2);
yd1[7] = domain.yd(n7si2);
zd1[0] = domain.zd(n0si2);
zd1[1] = domain.zd(n1si2);
zd1[2] = domain.zd(n2si2);
zd1[3] = domain.zd(n3si2);
zd1[4] = domain.zd(n4si2);
zd1[5] = domain.zd(n5si2);
zd1[6] = domain.zd(n6si2);
zd1[7] = domain.zd(n7si2);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,
hourgam,
coefficient, hgfx, hgfy, hgfz);
// With the threaded version, we write into local arrays per elem
// so we don't have to worry about race conditions
if (numthreads > 1) {
fx_local = &fx_elem[i3] ;
fx_local[0] = hgfx[0];
fx_local[1] = hgfx[1];
fx_local[2] = hgfx[2];
fx_local[3] = hgfx[3];
fx_local[4] = hgfx[4];
fx_local[5] = hgfx[5];
fx_local[6] = hgfx[6];
fx_local[7] = hgfx[7];
fy_local = &fy_elem[i3] ;
fy_local[0] = hgfy[0];
fy_local[1] = hgfy[1];
fy_local[2] = hgfy[2];
fy_local[3] = hgfy[3];
fy_local[4] = hgfy[4];
fy_local[5] = hgfy[5];
fy_local[6] = hgfy[6];
fy_local[7] = hgfy[7];
fz_local = &fz_elem[i3] ;
fz_local[0] = hgfz[0];
fz_local[1] = hgfz[1];
fz_local[2] = hgfz[2];
fz_local[3] = hgfz[3];
fz_local[4] = hgfz[4];
fz_local[5] = hgfz[5];
fz_local[6] = hgfz[6];
fz_local[7] = hgfz[7];
}
else {
domain.fx(n0si2) += hgfx[0];
domain.fy(n0si2) += hgfy[0];
domain.fz(n0si2) += hgfz[0];
domain.fx(n1si2) += hgfx[1];
domain.fy(n1si2) += hgfy[1];
domain.fz(n1si2) += hgfz[1];
domain.fx(n2si2) += hgfx[2];
domain.fy(n2si2) += hgfy[2];
domain.fz(n2si2) += hgfz[2];
domain.fx(n3si2) += hgfx[3];
domain.fy(n3si2) += hgfy[3];
domain.fz(n3si2) += hgfz[3];
domain.fx(n4si2) += hgfx[4];
domain.fy(n4si2) += hgfy[4];
domain.fz(n4si2) += hgfz[4];
domain.fx(n5si2) += hgfx[5];
domain.fy(n5si2) += hgfy[5];
domain.fz(n5si2) += hgfz[5];
domain.fx(n6si2) += hgfx[6];
domain.fy(n6si2) += hgfy[6];
domain.fz(n6si2) += hgfz[6];
domain.fx(n7si2) += hgfx[7];
domain.fy(n7si2) += hgfy[7];
domain.fz(n7si2) += hgfz[7];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem, hourg)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
if (numthreads > 1) {
// Collect the data from the local arrays into the final force arrays
<LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = domain.nodeElemCount(gnode) ;
Index_t *cornerList = domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
domain.fx(gnode) += fx_tmp ;
domain.fy(gnode) += fy_tmp ;
domain.fz(gnode) += fz_tmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
_t>(numElem8) ;
Real_t *z8n = Allocate<Real_t>(numElem8) ;
/* start loop over elements */
<LOOP-START>for (Index_t i=0 ; i<numElem ; ++i){
Real_t x1[8], y1[8], z1[8] ;
Real_t pfx[8], pfy[8], pfz[8] ;
Index_t* elemToNode = domain.nodelist(i);
CollectDomainNodesToElemNodes(domain, elemToNode, x1, y1, z1);
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
for(Index_t ii=0;ii<8;++ii){
Index_t jj=8*i+ii;
dvdx[jj] = pfx[ii];
dvdy[jj] = pfy[ii];
dvdz[jj] = pfz[ii];
x8n[jj] = x1[ii];
y8n[jj] = y1[ii];
z8n[jj] = z1[ii];
}
determ[i] = domain.volo(i) * domain.v(i);
/* Do a check for negative volumes */
if ( domain.v(i) <= Real_t(0.0) ) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
determ, numElem,
domain.numNode()) ;
// check for negative element volume
<LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
domain.sizeX() + 1, domain.sizeY() + 1, domain.sizeZ() + 1,
true, false) ;
#endif
<LOOP-START>for (Index_t i=0; i<numNode; ++i) {
domain.fx(i) = Real_t(0.0) ;
domain.fy(i) = Real_t(0.0) ;
domain.fz(i) = Real_t(0.0) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
*************/
static inline
void CalcAccelerationForNodes(Domain &domain, Index_t numNode)
{
<LOOP-START>for (Index_t i = 0; i < numNode; ++i) {
domain.xdd(i) = domain.fx(i) / domain.nodalMass(i);
domain.ydd(i) = domain.fy(i) / domain.nodalMass(i);
domain.zdd(i) = domain.fz(i) / domain.nodalMass(i);
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
lcVelocityForNodes(Domain &domain, const Real_t dt, const Real_t u_cut,
Index_t numNode)
{
<LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i )
{
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = domain.xd(i) + domain.xdd(i) * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);
domain.xd(i) = xdtmp ;
ydtmp = domain.yd(i) + domain.ydd(i) * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
domain.yd(i) = ydtmp ;
zdtmp = domain.zd(i) + domain.zdd(i) * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
domain.zd(i) = zdtmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
*/
static inline
void CalcPositionForNodes(Domain &domain, const Real_t dt, Index_t numNode)
{
<LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i )
{
domain.x(i) += domain.xd(i) * dt ;
domain.y(i) += domain.yd(i) * dt ;
domain.z(i) += domain.zd(i) * dt ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem, deltaTime)
| 100
|
domain, Real_t *vnew,
Real_t deltaTime, Index_t numElem )
{
// loop over all elements
<LOOP-START>for( Index_t k=0 ; k<numElem ; ++k )
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
Real_t volume ;
Real_t relativeVolume ;
const Index_t* const elemToNode = domain.nodelist(k) ;
// get nodal coordinates from global arrays and copy into local arrays.
CollectDomainNodesToElemNodes(domain, elemToNode, x_local, y_local, z_local);
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / domain.volo(k) ;
vnew[k] = relativeVolume ;
domain.delv(k) = relativeVolume - domain.v(k) ;
// set characteristic length
domain.arealg(k) = CalcElemCharacteristicLength(x_local, y_local, z_local,
volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = elemToNode[lnode];
xd_local[lnode] = domain.xd(gnode);
yd_local[lnode] = domain.yd(gnode);
zd_local[lnode] = domain.zd(gnode);
}
Real_t dt2 = Real_t(0.5) * deltaTime;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives( x_local, y_local, z_local,
B, &detJ );
CalcElemVelocityGradient( xd_local, yd_local, zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
domain.dxx(k) = D[0];
domain.dyy(k) = D[1];
domain.dzz(k) = D[2];
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem, deltaTime)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
ltatime, numElem) ;
// element loop to do some stuff not included in the elemlib function.
<LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k )
{
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdov = domain.dxx(k) + domain.dyy(k) + domain.dzz(k) ;
Real_t vdovthird = vdov/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
domain.vdov(k) = vdov ;
domain.dxx(k) -= vdovthird ;
domain.dyy(k) -= vdovthird ;
domain.dzz(k) -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (vnew[k] <= Real_t(0.0))
{
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
notonicQGradientsForElems(Domain& domain, Real_t vnew[])
{
Index_t numElem = domain.numElem();
<LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i ) {
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
const Index_t *elemToNode = domain.nodelist(i);
Index_t n0 = elemToNode[0] ;
Index_t n1 = elemToNode[1] ;
Index_t n2 = elemToNode[2] ;
Index_t n3 = elemToNode[3] ;
Index_t n4 = elemToNode[4] ;
Index_t n5 = elemToNode[5] ;
Index_t n6 = elemToNode[6] ;
Index_t n7 = elemToNode[7] ;
Real_t x0 = domain.x(n0) ;
Real_t x1 = domain.x(n1) ;
Real_t x2 = domain.x(n2) ;
Real_t x3 = domain.x(n3) ;
Real_t x4 = domain.x(n4) ;
Real_t x5 = domain.x(n5) ;
Real_t x6 = domain.x(n6) ;
Real_t x7 = domain.x(n7) ;
Real_t y0 = domain.y(n0) ;
Real_t y1 = domain.y(n1) ;
Real_t y2 = domain.y(n2) ;
Real_t y3 = domain.y(n3) ;
Real_t y4 = domain.y(n4) ;
Real_t y5 = domain.y(n5) ;
Real_t y6 = domain.y(n6) ;
Real_t y7 = domain.y(n7) ;
Real_t z0 = domain.z(n0) ;
Real_t z1 = domain.z(n1) ;
Real_t z2 = domain.z(n2) ;
Real_t z3 = domain.z(n3) ;
Real_t z4 = domain.z(n4) ;
Real_t z5 = domain.z(n5) ;
Real_t z6 = domain.z(n6) ;
Real_t z7 = domain.z(n7) ;
Real_t xv0 = domain.xd(n0) ;
Real_t xv1 = domain.xd(n1) ;
Real_t xv2 = domain.xd(n2) ;
Real_t xv3 = domain.xd(n3) ;
Real_t xv4 = domain.xd(n4) ;
Real_t xv5 = domain.xd(n5) ;
Real_t xv6 = domain.xd(n6) ;
Real_t xv7 = domain.xd(n7) ;
Real_t yv0 = domain.yd(n0) ;
Real_t yv1 = domain.yd(n1) ;
Real_t yv2 = domain.yd(n2) ;
Real_t yv3 = domain.yd(n3) ;
Real_t yv4 = domain.yd(n4) ;
Real_t yv5 = domain.yd(n5) ;
Real_t yv6 = domain.yd(n6) ;
Real_t yv7 = domain.yd(n7) ;
Real_t zv0 = domain.zd(n0) ;
Real_t zv1 = domain.zd(n1) ;
Real_t zv2 = domain.zd(n2) ;
Real_t zv3 = domain.zd(n3) ;
Real_t zv4 = domain.zd(n4) ;
Real_t zv5 = domain.zd(n5) ;
Real_t zv6 = domain.zd(n6) ;
Real_t zv7 = domain.zd(n7) ;
Real_t vol = domain.volo(i)*vnew[i] ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;
Real_t dyj = Real_t(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;
Real_t dzj = Real_t(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;
Real_t dxi = Real_t( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;
Real_t dyi = Real_t( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;
Real_t dzi = Real_t( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;
Real_t dxk = Real_t( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;
Real_t dyk = Real_t( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;
Real_t dzk = Real_t( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
domain.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;
dyv = Real_t(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;
dzv = Real_t(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;
domain.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
domain.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;
dyv = Real_t(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;
dzv = Real_t(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;
domain.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
domain.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;
dyv = Real_t(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;
dzv = Real_t(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;
domain.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)
| 100
|
max_slope();
Real_t qlc_monoq = domain.qlc_monoq();
Real_t qqc_monoq = domain.qqc_monoq();
<LOOP-START>for ( Index_t ielem = 0 ; ielem < domain.regElemSize(r); ++ielem ) {
Index_t i = domain.regElemlist(r,ielem);
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Int_t bcMask = domain.elemBC(i) ;
Real_t delvm = 0.0, delvp =0.0;
/* phixi */
Real_t norm = Real_t(1.) / (domain.delv_xi(i)+ ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_xi(domain.lxim(i)); break ;
case XI_M_SYMM: delvm = domain.delv_xi(i) ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_xi(domain.lxip(i)) ; break ;
case XI_P_SYMM: delvp = domain.delv_xi(i) ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( domain.delv_eta(i) + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_eta(domain.letam(i)) ; break ;
case ETA_M_SYMM: delvm = domain.delv_eta(i) ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_eta(domain.letap(i)) ; break ;
case ETA_P_SYMM: delvp = domain.delv_eta(i) ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( domain.delv_zeta(i) + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_zeta(domain.lzetam(i)) ; break ;
case ZETA_M_SYMM: delvm = domain.delv_zeta(i) ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_zeta(domain.lzetap(i)) ; break ;
case ZETA_P_SYMM: delvp = domain.delv_zeta(i) ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( domain.vdov(i) > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = domain.delv_xi(i) * domain.delx_xi(i) ;
Real_t delvxeta = domain.delv_eta(i) * domain.delx_eta(i) ;
Real_t delvxzeta = domain.delv_zeta(i) * domain.delx_zeta(i) ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = domain.elemMass(i) / (domain.volo(i) * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
domain.qq(i) = qquad ;
domain.ql(i) = qlin ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length)
| 100
|
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length, Index_t *regElemList)
{
<LOOP-START>for (Index_t i = 0; i < length ; ++i) {
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)
| 100
|
)/Real_t(3.0) ;
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, emin)
| 100
|
Index_t length, Index_t *regElemList)
{
Real_t *pHalfStep = Allocate<Real_t>(length) ;
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, emin)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0)
| 100
|
ep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq_old[i] = ql_old[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, emin, e_cut)
| 100
|
.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, emin, e_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)
| 100
|
new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Index_t elem = regElemList[i];
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0, q_cut)
| 100
|
new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0, q_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(rho0, ss4o3)
| 100
|
newc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3,
Index_t len, Index_t *regElemList)
{
<LOOP-START>for (Index_t i = 0; i < len ; ++i) {
Index_t elem = regElemList[i];
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp);
}
domain.ss(elem) = ssTmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(rho0, ss4o3)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(numElemReg)
| 100
|
in,
qq_old, ql_old, rho0, eosvmax,
numElemReg, regElemList);
}
<LOOP-START>for (Index_t i=0; i<numElemReg; ++i) {
Index_t elem = regElemList[i];
domain.p(elem) = p_new[i] ;
domain.e(elem) = e_new[i] ;
domain.q(elem) = q_new[i] ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElemReg)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cdwdirect/sos_flow/examples/lulesh/lulesh.cc
|
#pragma omp parallel for firstprivate(length, v_cut)
| 100
|
rElems(Domain &domain, Real_t *vnew,
Real_t v_cut, Index_t length)
{
if (length != 0) {
<LOOP-START>for(Index_t i=0 ; i<length ; ++i) {
Real_t tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
domain.v(i) = tmpV ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, v_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/daxpy-loop.cpp
|
#pragma omp parallel for shared(x,y) private(i)
| 100
|
)
{
int i;
omp_set_num_threads(number_of_threads);
int n_per_thread = SIZE/number_of_threads;
<LOOP-START>for(i=0;i<SIZE;i++)
{
x[i]=a*x[i]+y[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(x,y) private(i)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/matrix-multiplication.cpp
|
#pragma omp parallel for shared(a,b) private(i,j,k)
| 100
|
)
{
for(j=0; j<SIZE; j++)
{
b[i][j] = rand()%1000;
}
}
}
void parallel()
{
int i,j,k;
<LOOP-START>for(i=0; i<SIZE; i++)
{
for(j=0; j<SIZE; j++)
{
res[i][j] = 0;
for(k=0; k<SIZE; k++)
{
res[i][j] += a[i][k]*b[k][j];
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(a,b) private(i,j,k)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-thread-safe.cpp
|
#pragma omp parallel for private(x, y, i) reduction(+:count)
| 100
|
return pi;
}
double parallel(int n)
{
int i, count=0;
double x,y,pi;
count = 0;
<LOOP-START>for(i=0; i<n; i++)
{
x = drandom();
y = drandom();
if((x*x + y*y) <= 1)
{
count++;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(x, y, i) reduction(+:count) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-random-generator.cpp
|
#pragma omp parallel for private(x, y, i) reduction(+:count)
| 100
|
double x,y,pi;
count = 0;
// removes synchronization issue - hence reduction clause
<LOOP-START>for(i=0; i<n; i++)
{
x = (double)rand_double(1.0);
y = (double)rand_double(1.0);
if((x*x + y*y) <= 1)
{
count++;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(x, y, i) reduction(+:count) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c
|
#pragma omp parallel for
| 100
|
4.f / 9.f;
float w1 = params->density / 9.f;
float w2 = params->density / 36.f;
<LOOP-START>for (int jj = 0; jj < params->ny; jj++) {
for (int ii = 0; ii < params->nx; ii++) {
// centre
(*cells_ptr)[(0 * params->ny * params->nx) + (ii + jj*params->nx)] = w0;
// axis directions
(*cells_ptr)[(1 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;
(*cells_ptr)[(2 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;
(*cells_ptr)[(3 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;
(*cells_ptr)[(4 * params->ny * params->nx) + (ii + jj*params->nx)] = w1;
// diagonals
(*cells_ptr)[(5 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;
(*cells_ptr)[(6 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;
(*cells_ptr)[(7 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;
(*cells_ptr)[(8 * params->ny * params->nx) + (ii + jj*params->nx)] = w2;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c
|
#pragma omp parallel for
| 100
|
x) + (ii + jj*params->nx)] = w2;
}
}
/* first set all cells in obstacle array to zero */
<LOOP-START>for (int jj = 0; jj < params->ny; jj++) {
for (int ii = 0; ii < params->nx; ii++) {
(*obstacles_ptr)[ii + jj*params->nx] = 0;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/Final Project/gaussian_blur_omp.cpp
|
#pragma omp parallel for
| 100
|
bstr(0, inputfile_name.size() - 4)+ "_blur_omp.bmp";
for (int j = 0; j < img_height; j++)
{
<LOOP-START>for (int i = 0; i < img_width; i++)
{
pic_out[3 * (j * img_width + i) + MYRED] = gaussian_filter(i, j, MYRED, resolution);
pic_out[3 * (j * img_width + i) + MYGREEN] = gaussian_filter(i, j, MYGREEN, resolution);
pic_out[3 * (j * img_width + i) + MYBLUE] = gaussian_filter(i, j, MYBLUE, resolution);
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
r Class;
logical verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_last];
<LOOP-START>for (i = 0; i < T_last; i++)
{
timer_clear(i);
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(-:colidx)
| 100
|
n, no speed up
#pragma omp for
for (j = 0; j < lastrow - firstrow + 1; j++)
{
// <LOOP-START>for (k = rowstr[j]; k < rowstr[j+1]; k+=3) //try loop unrolling
{
colidx[k] = colidx[k] - firstcol;
colidx[k + 1] = colidx[k + 1] - firstcol;
colidx[k + 2] = colidx[k + 2] - firstcol;
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(-:colidx) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
ze z to obtain x
//---------------------------------------------------------------------
// <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
ector to (1, 1, .... 1)
//---------------------------------------------------------------------
<LOOP-START>for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)
| 100
|
// tried the following two pragma, but seems not speed up with these two, inner for bad idea
// <LOOP-START>// #pragma omp parallel for reduction(+:norm_temp1, norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j]*z[j];
norm_temp2 = norm_temp2 + z[j]*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(+:norm_temp1, norm_temp2)
| 100
|
for bad idea
// #pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)
// <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j]*z[j];
norm_temp2 = norm_temp2 + z[j]*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:norm_temp1, norm_temp2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for, no speed up, inner for bad idea
| 100
|
ze z to obtain x
//---------------------------------------------------------------------
// <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++)
{
x[j] = norm_temp2 * z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for, no speed up, inner for bad idea<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
<LOOP-START>for (j = 0; j < lastrow - firstrow + 1; j++) {
sum = 0.0;
//#pragma omp parallel for reduction(+:sum) // no speed up, inner loop for bad idea(too mych overhead)
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
q[j] = sum;
}<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(+:sum)
| 100
|
neck?)!!!!!************
for (j = 0; j < lastrow - firstrow + 1; j++) {
sum = 0.0;
//<LOOP-START>for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(+:d)
| 100
|
in p.q
//---------------------------------------------------------------------
d = 0.0;
<LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j]*q[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:d)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for reduction(+:rho)
| 100
|
ha*q
//---------------------------------------------------------------------
rho = 0.0;
<LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
rho = rho + r[j]*r[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:rho)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
// p = r + beta*p
//---------------------------------------------------------------------
<LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) {
p[j] = r[j] + beta*p[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
or the use in sparse.
//---------------------------------------------------------------------
//<LOOP-START>for (iouter = 0; iouter < n; iouter++) {
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
#pragma omp parallel for
for (ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
nvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
<LOOP-START>for (ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
f triples in each row
//---------------------------------------------------------------------
//<LOOP-START>for (j = 0; j < nrows+1; j++) {
rowstr[j] = 0;
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
--------
//#pragma omp parallel for
for (j = 0; j < nrows+1; j++) {
rowstr[j] = 0;
}
//<LOOP-START>for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
//<LOOP-START>for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for
| 100
|
.. preload data pages
//---------------------------------------------------------------------
//<LOOP-START>for (j = 0; j < nrows; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
|
#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************
| 100
|
-------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
//<LOOP-START>for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
scale = size * aelt[i][nza];
for (nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if (jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = false;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
if (colidx[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
for (kk = rowstr[j+1]-2; kk >= k; kk--) {
if (colidx[kk] > -1) {
a[kk+1] = a[kk];
colidx[kk+1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = true;
break;
} else if (colidx[k] == -1) {
colidx[k] = jcol;
cont40 = true;
break;
} else if (colidx[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = true;
break;
}
}
if (cont40 == false) {
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}<LOOP-END> <OMP-START>#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************<OMP-END>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.