Merge pull request #345 from timattox/USER-DPD_another_zero_compute

USER-DPD another zero compute optimization
This commit is contained in:
sjplimp 2017-01-20 13:14:59 -07:00 committed by GitHub
commit f8d3c4c740
2 changed files with 8 additions and 1 deletions

View File

@ -43,6 +43,8 @@ using namespace LAMMPS_NS;
PairDPDfdt::PairDPDfdt(LAMMPS *lmp) : Pair(lmp)
{
random = NULL;
splitFDT_flag = false;
a0_is_zero = false;
}
/* ---------------------------------------------------------------------- */
@ -94,7 +96,7 @@ void PairDPDfdt::compute(int eflag, int vflag)
// loop over neighbors of my atoms
if (splitFDT_flag) {
for (ii = 0; ii < inum; ii++) {
if (!a0_is_zero) for (ii = 0; ii < inum; ii++) {
i = ilist[ii];
xtmp = x[i][0];
ytmp = x[i][1];
@ -287,6 +289,8 @@ void PairDPDfdt::coeff(int narg, char **arg)
double sigma_one = force->numeric(FLERR,arg[3]);
double cut_one = cut_global;
a0_is_zero = (a0_one == 0.0); // Typical use with SSA is to set a0 to zero
if (narg == 5) cut_one = force->numeric(FLERR,arg[4]);
int count = 0;
@ -371,6 +375,7 @@ void PairDPDfdt::read_restart(FILE *fp)
allocate();
a0_is_zero = true; // start with assumption that a0 is zero
int i,j;
int me = comm->me;
for (i = 1; i <= atom->ntypes; i++)
@ -386,6 +391,7 @@ void PairDPDfdt::read_restart(FILE *fp)
MPI_Bcast(&a0[i][j],1,MPI_DOUBLE,0,world);
MPI_Bcast(&sigma[i][j],1,MPI_DOUBLE,0,world);
MPI_Bcast(&cut[i][j],1,MPI_DOUBLE,0,world);
a0_is_zero = a0_is_zero && (a0[i][j] == 0.0); // verify the zero assumption
}
}
}

View File

@ -50,6 +50,7 @@ class PairDPDfdt : public Pair {
double cut_global;
int seed;
bool splitFDT_flag;
bool a0_is_zero;
void allocate();