forked from lijiext/lammps
git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@12814 f3b2605a-c512-4ea7-a41b-209d697bcdaa
This commit is contained in:
parent
9f649c3123
commit
aeb7fec23a
|
@ -166,7 +166,6 @@ void CommKokkos::forward_comm_device(int dummy)
|
|||
{
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVecKokkos *avec = (AtomVecKokkos *) atom->avec;
|
||||
double **x = atom->x;
|
||||
double *buf;
|
||||
|
@ -199,7 +198,7 @@ void CommKokkos::forward_comm_device(int dummy)
|
|||
n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
}
|
||||
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
atomKK->modified(ExecutionSpaceFromDevice<DeviceType>::
|
||||
space,X_MASK);
|
||||
} else if (ghost_velocity) {
|
||||
|
@ -212,7 +211,7 @@ void CommKokkos::forward_comm_device(int dummy)
|
|||
n = avec->pack_comm_vel(sendnum[iswap],sendlist[iswap],
|
||||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm_vel(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
} else {
|
||||
if (size_forward_recv[iswap])
|
||||
|
@ -224,7 +223,7 @@ void CommKokkos::forward_comm_device(int dummy)
|
|||
if (n)
|
||||
MPI_Send(k_buf_send.view<DeviceType>().ptr_on_device(),n,
|
||||
MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm_kokkos(recvnum[iswap],firstrecv[iswap],k_buf_recv);
|
||||
}
|
||||
|
||||
|
@ -258,16 +257,16 @@ void CommKokkos::reverse_comm()
|
|||
atomKK->sync(Device,ALL_MASK);
|
||||
}
|
||||
|
||||
void CommKokkos::forward_comm_fix(Fix *fix)
|
||||
void CommKokkos::forward_comm_fix(Fix *fix, int size)
|
||||
{
|
||||
k_sendlist.sync<LMPHostType>();
|
||||
CommBrick::forward_comm_fix(fix);
|
||||
CommBrick::forward_comm_fix(fix,size);
|
||||
}
|
||||
|
||||
void CommKokkos::reverse_comm_fix(Fix *fix)
|
||||
void CommKokkos::reverse_comm_fix(Fix *fix, int size)
|
||||
{
|
||||
k_sendlist.sync<LMPHostType>();
|
||||
CommBrick::reverse_comm_fix(fix);
|
||||
CommBrick::reverse_comm_fix(fix, size);
|
||||
}
|
||||
|
||||
void CommKokkos::forward_comm_compute(Compute *compute)
|
||||
|
@ -388,12 +387,11 @@ struct BuildExchangeListFunctor {
|
|||
template<class DeviceType>
|
||||
void CommKokkos::exchange_device()
|
||||
{
|
||||
int i,m,nsend,nrecv,nrecv1,nrecv2,nlocal;
|
||||
double lo,hi,value;
|
||||
int i,nsend,nrecv,nrecv1,nrecv2,nlocal;
|
||||
double lo,hi;
|
||||
double **x;
|
||||
double *sublo,*subhi,*buf;
|
||||
double *sublo,*subhi;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVecKokkos *avec = (AtomVecKokkos *) atom->avec;
|
||||
|
||||
// clear global->local map for owned and ghost atoms
|
||||
|
@ -496,7 +494,6 @@ void CommKokkos::exchange_device()
|
|||
|
||||
if (procgrid[dim] == 1) {
|
||||
nrecv = nsend;
|
||||
buf = buf_send;
|
||||
if (nrecv) {
|
||||
atom->nlocal=avec->
|
||||
unpack_exchange_kokkos(k_buf_send,nrecv,atom->nlocal,dim,lo,hi,
|
||||
|
@ -505,11 +502,11 @@ void CommKokkos::exchange_device()
|
|||
}
|
||||
} else {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv = nrecv1;
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv += nrecv2;
|
||||
}
|
||||
if (nrecv > maxrecv) grow_recv_kokkos(nrecv);
|
||||
|
@ -519,7 +516,7 @@ void CommKokkos::exchange_device()
|
|||
world,&request);
|
||||
MPI_Send(k_buf_send.view<DeviceType>().ptr_on_device(),nsend,
|
||||
MPI_DOUBLE,procneigh[dim][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Irecv(k_buf_recv.view<DeviceType>().ptr_on_device()+nrecv1,
|
||||
|
@ -527,10 +524,9 @@ void CommKokkos::exchange_device()
|
|||
world,&request);
|
||||
MPI_Send(k_buf_send.view<DeviceType>().ptr_on_device(),nsend,
|
||||
MPI_DOUBLE,procneigh[dim][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
buf = buf_recv;
|
||||
if (nrecv) {
|
||||
atom->nlocal = avec->
|
||||
unpack_exchange_kokkos(k_buf_recv,nrecv,atom->nlocal,dim,lo,hi,
|
||||
|
@ -643,7 +639,6 @@ void CommKokkos::borders_device() {
|
|||
double **x;
|
||||
double *buf,*mlo,*mhi;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVecKokkos *avec = (AtomVecKokkos *) atom->avec;
|
||||
|
||||
ExecutionSpace exec_space = ExecutionSpaceFromDevice<DeviceType>::space;
|
||||
|
@ -799,14 +794,14 @@ void CommKokkos::borders_device() {
|
|||
|
||||
if (sendproc[iswap] != me) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,&status);
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,MPI_STATUS_IGNORE);
|
||||
if (nrecv*size_border > maxrecv) grow_recv_kokkos(nrecv*size_border);
|
||||
if (nrecv) MPI_Irecv(k_buf_recv.view<DeviceType>().ptr_on_device(),
|
||||
nrecv*size_border,MPI_DOUBLE,
|
||||
recvproc[iswap],0,world,&request);
|
||||
if (n) MPI_Send(k_buf_send.view<DeviceType>().ptr_on_device(),n,
|
||||
MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (nrecv) MPI_Wait(&request,&status);
|
||||
if (nrecv) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else {
|
||||
nrecv = nsend;
|
||||
|
|
|
@ -39,8 +39,8 @@ class CommKokkos : public CommBrick {
|
|||
|
||||
void forward_comm_pair(class Pair *); // forward comm from a Pair
|
||||
void reverse_comm_pair(class Pair *); // reverse comm from a Pair
|
||||
void forward_comm_fix(class Fix *); // forward comm from a Fix
|
||||
void reverse_comm_fix(class Fix *); // reverse comm from a Fix
|
||||
void forward_comm_fix(class Fix *, int size=0); // forward comm from a Fix
|
||||
void reverse_comm_fix(class Fix *, int size=0); // reverse comm from a Fix
|
||||
void forward_comm_compute(class Compute *); // forward from a Compute
|
||||
void reverse_comm_compute(class Compute *); // reverse from a Compute
|
||||
void forward_comm_dump(class Dump *); // forward comm from a Dump
|
||||
|
|
|
@ -80,7 +80,6 @@ void PairCoulCutKokkos<DeviceType>::compute(int eflag_in, int vflag_in)
|
|||
|
||||
if (neighflag == FULL || neighflag == FULLCLUSTER) no_virial_fdotr_compute = 1;
|
||||
|
||||
double ecoul = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
|
|
|
@ -89,8 +89,6 @@ void PairLJCutCoulCutKokkos<DeviceType>::compute(int eflag_in, int vflag_in)
|
|||
|
||||
if (neighflag == FULL || neighflag == FULLCLUSTER) no_virial_fdotr_compute = 1;
|
||||
|
||||
double evdwl = 0.0;
|
||||
double ecoul = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ PairLJCutCoulLongKokkos<DeviceType>::PairLJCutCoulLongKokkos(LAMMPS *lmp):PairLJ
|
|||
datamask_modify = F_MASK | ENERGY_MASK | VIRIAL_MASK;
|
||||
cutsq = NULL;
|
||||
cut_ljsq = NULL;
|
||||
cut_coulsq = NULL;
|
||||
cut_coulsq = 0.0;
|
||||
|
||||
}
|
||||
|
||||
|
@ -97,8 +97,6 @@ void PairLJCutCoulLongKokkos<DeviceType>::compute(int eflag_in, int vflag_in)
|
|||
|
||||
if (neighflag == FULL || neighflag == FULLCLUSTER) no_virial_fdotr_compute = 1;
|
||||
|
||||
double evdwl = 0.0;
|
||||
double ecoul = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@ void PairLJCutKokkos<DeviceType>::compute(int eflag_in, int vflag_in)
|
|||
|
||||
if (neighflag == FULL || neighflag == FULLCLUSTER) no_virial_fdotr_compute = 1;
|
||||
|
||||
double evdwl = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
|
|
|
@ -98,7 +98,6 @@ void PairTableKokkos<DeviceType>::compute_style(int eflag_in, int vflag_in)
|
|||
|
||||
if (neighflag == FULL || neighflag == FULLCLUSTER) no_virial_fdotr_compute = 1;
|
||||
|
||||
double evdwl = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
|
|
|
@ -146,37 +146,37 @@ void GridComm::ghost_notify()
|
|||
int nplanes = inxlo - outxlo;
|
||||
if (procxlo != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,procxlo,0,
|
||||
&ghostxhi,1,MPI_INT,procxhi,0,gridcomm,&status);
|
||||
&ghostxhi,1,MPI_INT,procxhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostxhi = nplanes;
|
||||
|
||||
nplanes = outxhi - inxhi;
|
||||
if (procxhi != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,procxhi,0,
|
||||
&ghostxlo,1,MPI_INT,procxlo,0,gridcomm,&status);
|
||||
&ghostxlo,1,MPI_INT,procxlo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostxlo = nplanes;
|
||||
|
||||
nplanes = inylo - outylo;
|
||||
if (procylo != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,procylo,0,
|
||||
&ghostyhi,1,MPI_INT,procyhi,0,gridcomm,&status);
|
||||
&ghostyhi,1,MPI_INT,procyhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostyhi = nplanes;
|
||||
|
||||
nplanes = outyhi - inyhi;
|
||||
if (procyhi != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,procyhi,0,
|
||||
&ghostylo,1,MPI_INT,procylo,0,gridcomm,&status);
|
||||
&ghostylo,1,MPI_INT,procylo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostylo = nplanes;
|
||||
|
||||
nplanes = inzlo - outzlo;
|
||||
if (proczlo != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,proczlo,0,
|
||||
&ghostzhi,1,MPI_INT,proczhi,0,gridcomm,&status);
|
||||
&ghostzhi,1,MPI_INT,proczhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostzhi = nplanes;
|
||||
|
||||
nplanes = outzhi - inzhi;
|
||||
if (proczhi != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,proczhi,0,
|
||||
&ghostzlo,1,MPI_INT,proczlo,0,gridcomm,&status);
|
||||
&ghostzlo,1,MPI_INT,proczlo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else ghostzlo = nplanes;
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ void GridComm::setup()
|
|||
|
||||
if (procxlo != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,procxlo,0,
|
||||
&recvplanes,1,MPI_INT,procxhi,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,procxhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -285,7 +285,7 @@ void GridComm::setup()
|
|||
|
||||
if (procxhi != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,procxhi,0,
|
||||
&recvplanes,1,MPI_INT,procxlo,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,procxlo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -327,7 +327,7 @@ void GridComm::setup()
|
|||
|
||||
if (procylo != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,procylo,0,
|
||||
&recvplanes,1,MPI_INT,procyhi,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,procyhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -369,7 +369,7 @@ void GridComm::setup()
|
|||
|
||||
if (procyhi != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,procyhi,0,
|
||||
&recvplanes,1,MPI_INT,procylo,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,procylo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -411,7 +411,7 @@ void GridComm::setup()
|
|||
|
||||
if (proczlo != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,proczlo,0,
|
||||
&recvplanes,1,MPI_INT,proczhi,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,proczhi,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -453,7 +453,7 @@ void GridComm::setup()
|
|||
|
||||
if (proczhi != me)
|
||||
MPI_Sendrecv(&sendplanes,1,MPI_INT,proczhi,0,
|
||||
&recvplanes,1,MPI_INT,proczlo,0,gridcomm,&status);
|
||||
&recvplanes,1,MPI_INT,proczlo,0,gridcomm,MPI_STATUS_IGNORE);
|
||||
else recvplanes = sendplanes;
|
||||
|
||||
swap[nswap].nunpack =
|
||||
|
@ -500,7 +500,7 @@ void GridComm::forward_comm(KSpace *kspace, int which)
|
|||
swap[m].recvproc,0,gridcomm,&request);
|
||||
MPI_Send(buf1,nforward*swap[m].npack,MPI_FFT_SCALAR,
|
||||
swap[m].sendproc,0,gridcomm);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
kspace->unpack_forward(which,buf2,swap[m].nunpack,swap[m].unpacklist);
|
||||
|
@ -525,7 +525,7 @@ void GridComm::reverse_comm(KSpace *kspace, int which)
|
|||
swap[m].sendproc,0,gridcomm,&request);
|
||||
MPI_Send(buf1,nreverse*swap[m].nunpack,MPI_FFT_SCALAR,
|
||||
swap[m].recvproc,0,gridcomm);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
kspace->unpack_reverse(which,buf2,swap[m].npack,swap[m].packlist);
|
||||
|
|
|
@ -50,7 +50,6 @@ class GridComm : protected Pointers {
|
|||
int nforward,nreverse;
|
||||
MPI_Comm gridcomm;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
// in = inclusive indices of 3d grid chunk that I own
|
||||
// out = inclusive indices of 3d grid chunk I own plus ghosts I use
|
||||
|
|
|
@ -1580,10 +1580,16 @@ void *PairLJLongTIP4PLong::extract(const char *str, int &dim)
|
|||
&mix_flag, &cut_lj_global, NULL};
|
||||
int i;
|
||||
|
||||
for (i=0; ids[i]&&strcmp(ids[i], str); ++i);
|
||||
if (i <= 2) dim = 2;
|
||||
else dim = 0;
|
||||
return ptrs[i];
|
||||
i=0;
|
||||
while (ids[i] != NULL) {
|
||||
if (i <=2) dim = 2;
|
||||
else dim = 0;
|
||||
|
||||
if (strcmp(ids[i],str) == 0)
|
||||
return ptrs[i];
|
||||
|
||||
++i;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,6 @@ void remap_3d(FFT_SCALAR *in, FFT_SCALAR *out, FFT_SCALAR *buf,
|
|||
// use point-to-point communication
|
||||
|
||||
if (!plan->usecollective) {
|
||||
MPI_Status status;
|
||||
int i,isend,irecv;
|
||||
FFT_SCALAR *scratch;
|
||||
|
||||
|
@ -105,7 +104,7 @@ void remap_3d(FFT_SCALAR *in, FFT_SCALAR *out, FFT_SCALAR *buf,
|
|||
// unpack all messages from scratch -> out
|
||||
|
||||
for (i = 0; i < plan->nrecv; i++) {
|
||||
MPI_Waitany(plan->nrecv,plan->request,&irecv,&status);
|
||||
MPI_Waitany(plan->nrecv,plan->request,&irecv,MPI_STATUS_IGNORE);
|
||||
plan->unpack(&scratch[plan->recv_bufloc[irecv]],
|
||||
&out[plan->recv_offset[irecv]],&plan->unpackplan[irecv]);
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ void PairComb::compute(int eflag, int vflag)
|
|||
int i,j,k,ii,jj,kk,inum,jnum,iparam_i;
|
||||
int itype,jtype,ktype,iparam_ij,iparam_ijk;
|
||||
tagint itag,jtag;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,ecoul,fpair;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
|
||||
double rsq,rsq1,rsq2;
|
||||
double delr1[3],delr2[3],fi[3],fj[3],fk[3];
|
||||
double zeta_ij,prefactor;
|
||||
|
@ -141,7 +141,7 @@ void PairComb::compute(int eflag, int vflag)
|
|||
double vionij,fvionij,sr1,sr2,sr3,Eov,Fov;
|
||||
int sht_jnum, *sht_jlist, nj;
|
||||
|
||||
evdwl = ecoul = 0.0;
|
||||
evdwl = 0.0;
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = vflag_atom = 0;
|
||||
|
||||
|
@ -1637,7 +1637,7 @@ double PairComb::yasu_char(double *qf_fix, int &igroup)
|
|||
double xtmp,ytmp,ztmp;
|
||||
double rsq1,delr1[3];
|
||||
int *ilist,*jlist,*numneigh,**firstneigh;
|
||||
double iq,jq,fqi,fqj,fqij,fqjj;
|
||||
double iq,jq,fqi,fqij,fqjj;
|
||||
double potal,fac11,fac11e,sr1,sr2,sr3;
|
||||
int mr1,mr2,mr3,inty,nj;
|
||||
|
||||
|
@ -1672,7 +1672,7 @@ double PairComb::yasu_char(double *qf_fix, int &igroup)
|
|||
|
||||
// loop over full neighbor list of my atoms
|
||||
|
||||
fqi = fqj = fqij = fqjj = 0.0;
|
||||
fqi = fqij = fqjj = 0.0;
|
||||
|
||||
for (ii = 0; ii < inum; ii ++) {
|
||||
i = ilist[ii];
|
||||
|
@ -1871,9 +1871,9 @@ void PairComb::qfo_short(Param *param, int i, int j, double rsq,
|
|||
double iq, double jq, double &fqij, double &fqjj)
|
||||
{
|
||||
double r,tmp_fc,tmp_exp1,tmp_exp2;
|
||||
double bigA,Asi,Asj,vrcs;
|
||||
double Asi,Asj,vrcs;
|
||||
double romi = param->addrep,rrcs = param->bigr + param->bigd;
|
||||
double qi,qj,Di,Dj,bigB,Bsi,Bsj;
|
||||
double qi,qj,Di,Dj,Bsi,Bsj;
|
||||
double QUchi,QOchi,QUchj,QOchj,YYDiqp,YYDjqp;
|
||||
double YYAsiqp,YYAsjqp,YYBsiqp,YYBsjqp;
|
||||
double caj,cbj,bij,cfqr,cfqs;
|
||||
|
@ -1883,7 +1883,7 @@ void PairComb::qfo_short(Param *param, int i, int j, double rsq,
|
|||
double rslp,rslp2,rslp4,arr1,arr2,fc2j,fc3j;
|
||||
|
||||
qi = iq; qj = jq; r = sqrt(rsq);
|
||||
Di = Dj = Asi = Asj = bigA = Bsi = Bsj = bigB = 0.0;
|
||||
Di = Dj = Asi = Asj = Bsi = Bsj = 0.0;
|
||||
QUchi = QOchi = QUchj = QOchj = YYDiqp = YYDjqp =0.0;
|
||||
YYAsiqp = YYAsjqp = YYBsiqp = YYBsjqp = 0.0;
|
||||
caj = cbj = vrcs = cfqr = cfqs = 0.0;
|
||||
|
|
|
@ -955,7 +955,7 @@ void PairComb3::compute(int eflag, int vflag)
|
|||
int sht_jnum,*sht_jlist,sht_lnum,*sht_llist;
|
||||
int sht_mnum,*sht_mlist,sht_pnum,*sht_plist;
|
||||
int *ilist,*jlist,*numneigh,**firstneigh,mr1,mr2,mr3,inty,nj;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,ecoul,fpair;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
|
||||
double rsq,rsq1,rsq2,rsq3,iq,jq,yaself;
|
||||
double eng_tmp,vionij,fvionij,sr1,sr2,sr3;
|
||||
double zeta_ij,prefac_ij1,prefac_ij2,prefac_ij3,prefac_ij4,prefac_ij5;
|
||||
|
@ -986,7 +986,7 @@ void PairComb3::compute(int eflag, int vflag)
|
|||
double delrl[3], delrm[3], delrp[3], ddprx[3], srmu;
|
||||
double zet_addi,zet_addj;
|
||||
|
||||
evdwl = ecoul = eng_tmp = 0.0;
|
||||
evdwl = eng_tmp = 0.0;
|
||||
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = vflag_atom = 0;
|
||||
|
@ -1953,7 +1953,7 @@ double PairComb3::self(Param *param, double qi)
|
|||
void PairComb3::comb_fa(double r, Param *parami, Param *paramj, double iq,
|
||||
double jq, double &att_eng, double &att_force)
|
||||
{
|
||||
double bigB,Bsi,FBsi;
|
||||
double Bsi;
|
||||
double qi,qj,Di,Dj;
|
||||
double AlfDiAlfDj, YYBn, YYBj;
|
||||
double alfij1= parami->alpha1;
|
||||
|
@ -1965,7 +1965,7 @@ void PairComb3::comb_fa(double r, Param *parami, Param *paramj, double iq,
|
|||
if (r > parami->bigr + parami->bigd) Bsi = 0.0;
|
||||
|
||||
qi = iq; qj = jq;
|
||||
Di = Dj = Bsi = FBsi = bigB = 0.0;
|
||||
Di = Dj = Bsi = 0.0;
|
||||
Di = parami->DU + pow(fabs(parami->bD*(parami->QU-qi)),parami->nD);
|
||||
Dj = paramj->DU + pow(fabs(paramj->bD*(paramj->QU-qj)),paramj->nD);
|
||||
YYBn = (parami->aB-fabs(pow(parami->bB*(qi-parami->Qo),10)));
|
||||
|
@ -3265,7 +3265,7 @@ double PairComb3::combqeq(double *qf_fix, int &igroup)
|
|||
int *ilist,*jlist,*numneigh,**firstneigh;
|
||||
int mr1,mr2,mr3,inty,nj;
|
||||
double xtmp,ytmp,ztmp,rsq1,delrj[3];
|
||||
double iq,jq,fqi,fqj,fqij,fqji,sr1,sr2,sr3;
|
||||
double iq,jq,fqi,fqij,fqji,sr1,sr2,sr3;
|
||||
double potal,fac11,fac11e;
|
||||
int sht_jnum,*sht_jlist;
|
||||
tagint itag, jtag;
|
||||
|
@ -3300,7 +3300,7 @@ double PairComb3::combqeq(double *qf_fix, int &igroup)
|
|||
|
||||
// loop over full neighbor list of my atoms
|
||||
|
||||
fqi = fqj = fqij = fqji = 0.0;
|
||||
fqi = fqij = fqji = 0.0;
|
||||
|
||||
for (ii = 0; ii < inum; ii ++) {
|
||||
i = ilist[ii];
|
||||
|
|
|
@ -144,7 +144,6 @@ void FixThermalConductivity::end_of_step()
|
|||
{
|
||||
int i,j,m,insert;
|
||||
double coord,ke;
|
||||
MPI_Status status;
|
||||
struct {
|
||||
double value;
|
||||
int proc;
|
||||
|
@ -283,7 +282,7 @@ void FixThermalConductivity::end_of_step()
|
|||
if (rmass) sbuf[3] = rmass[j];
|
||||
else sbuf[3] = mass[type[j]];
|
||||
MPI_Sendrecv(sbuf,4,MPI_DOUBLE,all[1].proc,0,
|
||||
rbuf,4,MPI_DOUBLE,all[1].proc,0,world,&status);
|
||||
rbuf,4,MPI_DOUBLE,all[1].proc,0,world,MPI_STATUS_IGNORE);
|
||||
vcm[0] = (sbuf[3]*sbuf[0] + rbuf[3]*rbuf[0]) / (sbuf[3] + rbuf[3]);
|
||||
vcm[1] = (sbuf[3]*sbuf[1] + rbuf[3]*rbuf[1]) / (sbuf[3] + rbuf[3]);
|
||||
vcm[2] = (sbuf[3]*sbuf[2] + rbuf[3]*rbuf[2]) / (sbuf[3] + rbuf[3]);
|
||||
|
@ -302,7 +301,7 @@ void FixThermalConductivity::end_of_step()
|
|||
if (rmass) sbuf[3] = rmass[j];
|
||||
else sbuf[3] = mass[type[j]];
|
||||
MPI_Sendrecv(sbuf,4,MPI_DOUBLE,all[0].proc,0,
|
||||
rbuf,4,MPI_DOUBLE,all[0].proc,0,world,&status);
|
||||
rbuf,4,MPI_DOUBLE,all[0].proc,0,world,MPI_STATUS_IGNORE);
|
||||
vcm[0] = (sbuf[3]*sbuf[0] + rbuf[3]*rbuf[0]) / (sbuf[3] + rbuf[3]);
|
||||
vcm[1] = (sbuf[3]*sbuf[1] + rbuf[3]*rbuf[1]) / (sbuf[3] + rbuf[3]);
|
||||
vcm[2] = (sbuf[3]*sbuf[2] + rbuf[3]*rbuf[2]) / (sbuf[3] + rbuf[3]);
|
||||
|
|
|
@ -154,7 +154,6 @@ void FixViscosity::end_of_step()
|
|||
{
|
||||
int i,m,insert;
|
||||
double coord,delta;
|
||||
MPI_Status status;
|
||||
struct {
|
||||
double value;
|
||||
int proc;
|
||||
|
@ -276,7 +275,7 @@ void FixViscosity::end_of_step()
|
|||
if (rmass) sbuf[1] = rmass[ipos];
|
||||
else sbuf[1] = mass[type[ipos]];
|
||||
MPI_Sendrecv(sbuf,2,MPI_DOUBLE,all[1].proc,0,
|
||||
rbuf,2,MPI_DOUBLE,all[1].proc,0,world,&status);
|
||||
rbuf,2,MPI_DOUBLE,all[1].proc,0,world,MPI_STATUS_IGNORE);
|
||||
vcm = (sbuf[1]*sbuf[0] + rbuf[1]*rbuf[0]) / (sbuf[1] + rbuf[1]);
|
||||
v[ipos][vdim] = 2.0 * vcm - sbuf[0];
|
||||
pswap += sbuf[1] * (vcm - sbuf[0]);
|
||||
|
@ -287,7 +286,7 @@ void FixViscosity::end_of_step()
|
|||
if (rmass) sbuf[1] = rmass[ineg];
|
||||
else sbuf[1] = mass[type[ineg]];
|
||||
MPI_Sendrecv(sbuf,2,MPI_DOUBLE,all[0].proc,0,
|
||||
rbuf,2,MPI_DOUBLE,all[0].proc,0,world,&status);
|
||||
rbuf,2,MPI_DOUBLE,all[0].proc,0,world,MPI_STATUS_IGNORE);
|
||||
vcm = (sbuf[1]*sbuf[0] + rbuf[1]*rbuf[0]) / (sbuf[1] + rbuf[1]);
|
||||
v[ineg][vdim] = 2.0 * vcm - sbuf[0];
|
||||
pswap -= sbuf[1] * (vcm - sbuf[0]);
|
||||
|
|
|
@ -275,8 +275,6 @@ void DumpAtomMPIIO::write_header(bigint ndump)
|
|||
|
||||
void DumpAtomMPIIO::header_binary(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc((2*sizeof(bigint)) + (9*sizeof(int)) + (6*sizeof(double)));
|
||||
|
@ -320,7 +318,7 @@ void DumpAtomMPIIO::header_binary(bigint ndump)
|
|||
}
|
||||
else { // write data
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -330,8 +328,6 @@ void DumpAtomMPIIO::header_binary(bigint ndump)
|
|||
|
||||
void DumpAtomMPIIO::header_binary_triclinic(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc((2*sizeof(bigint)) + (9*sizeof(int)) + (6*sizeof(double)));
|
||||
|
@ -386,7 +382,7 @@ void DumpAtomMPIIO::header_binary_triclinic(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -396,8 +392,6 @@ void DumpAtomMPIIO::header_binary_triclinic(bigint ndump)
|
|||
|
||||
void DumpAtomMPIIO::header_item(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -416,7 +410,7 @@ void DumpAtomMPIIO::header_item(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -426,9 +420,6 @@ void DumpAtomMPIIO::header_item(bigint ndump)
|
|||
|
||||
void DumpAtomMPIIO::header_item_triclinic(bigint ndump)
|
||||
{
|
||||
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -447,7 +438,7 @@ void DumpAtomMPIIO::header_item_triclinic(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -465,7 +456,6 @@ void DumpAtomMPIIO::write_data(int n, double *mybuf)
|
|||
|
||||
void DumpAtomMPIIO::write_binary(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
n *= size_one;
|
||||
|
||||
if (performEstimate) {
|
||||
|
@ -483,7 +473,7 @@ void DumpAtomMPIIO::write_binary(int n, double *mybuf)
|
|||
memory->create(bufWithSize,byteBufSize,"dump:bufWithSize");
|
||||
memcpy(bufWithSize,(char*)(&n),sizeof(int));
|
||||
memcpy(&((char*)bufWithSize)[sizeof(int)],mybuf,(n*sizeof(double)));
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,bufWithSize,byteBufSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,bufWithSize,byteBufSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
memory->destroy(bufWithSize);
|
||||
|
||||
if (flush_flag)
|
||||
|
@ -495,8 +485,6 @@ void DumpAtomMPIIO::write_binary(int n, double *mybuf)
|
|||
|
||||
void DumpAtomMPIIO::write_string(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
#if defined(_OPENMP)
|
||||
|
@ -516,7 +504,7 @@ void DumpAtomMPIIO::write_string(int n, double *mybuf)
|
|||
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
|
||||
}
|
||||
else {
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
if (flush_flag)
|
||||
MPI_File_sync(mpifh);
|
||||
}
|
||||
|
|
|
@ -279,8 +279,6 @@ void DumpCFGMPIIO::write_header(bigint n)
|
|||
// for unwrapped coords, set to UNWRAPEXPAND (10.0)
|
||||
// so molecules are not split across periodic box boundaries
|
||||
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -313,7 +311,7 @@ void DumpCFGMPIIO::write_header(bigint n)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -329,7 +327,6 @@ void DumpCFGMPIIO::write_header(bigint n)
|
|||
int DumpCFGMPIIO::convert_string_omp(int n, double *mybuf)
|
||||
{
|
||||
|
||||
MPI_Status mpiStatus;
|
||||
char **mpifh_buffer_line_per_thread;
|
||||
int mpifhStringCount;
|
||||
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
|
||||
|
@ -483,8 +480,6 @@ void DumpCFGMPIIO::write_data(int n, double *mybuf)
|
|||
|
||||
void DumpCFGMPIIO::write_string(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
#if defined(_OPENMP)
|
||||
|
@ -504,7 +499,7 @@ void DumpCFGMPIIO::write_string(int n, double *mybuf)
|
|||
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
|
||||
}
|
||||
else {
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
if (flush_flag)
|
||||
MPI_File_sync(mpifh);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -319,8 +319,6 @@ void DumpCustomMPIIO::write_header(bigint ndump)
|
|||
|
||||
void DumpCustomMPIIO::header_binary(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc((2*sizeof(bigint)) + (9*sizeof(int)) + (6*sizeof(double)));
|
||||
|
@ -364,7 +362,7 @@ void DumpCustomMPIIO::header_binary(bigint ndump)
|
|||
}
|
||||
else { // write data
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -374,8 +372,6 @@ void DumpCustomMPIIO::header_binary(bigint ndump)
|
|||
|
||||
void DumpCustomMPIIO::header_binary_triclinic(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc((2*sizeof(bigint)) + (9*sizeof(int)) + (6*sizeof(double)));
|
||||
|
@ -430,7 +426,7 @@ void DumpCustomMPIIO::header_binary_triclinic(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -440,8 +436,6 @@ void DumpCustomMPIIO::header_binary_triclinic(bigint ndump)
|
|||
|
||||
void DumpCustomMPIIO::header_item(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -460,7 +454,7 @@ void DumpCustomMPIIO::header_item(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -470,8 +464,6 @@ void DumpCustomMPIIO::header_item(bigint ndump)
|
|||
|
||||
void DumpCustomMPIIO::header_item_triclinic(bigint ndump)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -490,7 +482,7 @@ void DumpCustomMPIIO::header_item_triclinic(bigint ndump)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -507,7 +499,6 @@ void DumpCustomMPIIO::write_data(int n, double *mybuf)
|
|||
|
||||
void DumpCustomMPIIO::write_binary(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
n *= size_one;
|
||||
|
||||
if (performEstimate) {
|
||||
|
@ -525,7 +516,7 @@ void DumpCustomMPIIO::write_binary(int n, double *mybuf)
|
|||
memory->create(bufWithSize,byteBufSize,"dump:bufWithSize");
|
||||
memcpy(bufWithSize,(char*)(&n),sizeof(int));
|
||||
memcpy(&((char*)bufWithSize)[sizeof(int)],mybuf,(n*sizeof(double)));
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,bufWithSize,byteBufSize,MPI_BYTE,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,bufWithSize,byteBufSize,MPI_BYTE,MPI_STATUS_IGNORE);
|
||||
memory->destroy(bufWithSize);
|
||||
|
||||
if (flush_flag)
|
||||
|
@ -537,8 +528,6 @@ void DumpCustomMPIIO::write_binary(int n, double *mybuf)
|
|||
|
||||
void DumpCustomMPIIO::write_string(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
#if defined(_OPENMP)
|
||||
|
@ -558,7 +547,7 @@ void DumpCustomMPIIO::write_string(int n, double *mybuf)
|
|||
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
|
||||
}
|
||||
else {
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
if (flush_flag)
|
||||
MPI_File_sync(mpifh);
|
||||
}
|
||||
|
|
|
@ -249,8 +249,6 @@ void DumpXYZMPIIO::init_style()
|
|||
|
||||
void DumpXYZMPIIO::write_header(bigint n)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
headerBuffer = (char *) malloc(MAX_TEXT_HEADER_SIZE);
|
||||
|
@ -262,7 +260,7 @@ void DumpXYZMPIIO::write_header(bigint n)
|
|||
else { // write data
|
||||
|
||||
if (me == 0)
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at(mpifh,mpifo,headerBuffer,headerSize,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
mpifo += headerSize;
|
||||
free(headerBuffer);
|
||||
}
|
||||
|
@ -279,8 +277,6 @@ void DumpXYZMPIIO::write_data(int n, double *mybuf)
|
|||
|
||||
void DumpXYZMPIIO::write_string(int n, double *mybuf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
if (performEstimate) {
|
||||
|
||||
#if defined(_OPENMP)
|
||||
|
@ -300,7 +296,7 @@ void DumpXYZMPIIO::write_string(int n, double *mybuf)
|
|||
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
|
||||
}
|
||||
else { // write data
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,&mpiStatus);
|
||||
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
|
||||
if (flush_flag)
|
||||
MPI_File_sync(mpifh);
|
||||
}
|
||||
|
|
|
@ -81,7 +81,6 @@ void RestartMPIIO::openForWrite(char *filename)
|
|||
|
||||
void RestartMPIIO::write(MPI_Offset headerOffset, int send_size, double *buf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
bigint incPrefix = 0;
|
||||
bigint bigintSendSize = (bigint) send_size;
|
||||
MPI_Scan(&bigintSendSize,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
|
||||
|
@ -103,7 +102,7 @@ void RestartMPIIO::write(MPI_Offset headerOffset, int send_size, double *buf)
|
|||
|
||||
err = MPI_File_write_at_all(mpifh,headerOffset +
|
||||
((incPrefix-bigintSendSize)*sizeof(double)),
|
||||
buf,send_size,MPI_DOUBLE,&mpiStatus);
|
||||
buf,send_size,MPI_DOUBLE,MPI_STATUS_IGNORE);
|
||||
if (err != MPI_SUCCESS) {
|
||||
char str[MPI_MAX_ERROR_STRING+128];
|
||||
char mpiErrorString[MPI_MAX_ERROR_STRING];
|
||||
|
@ -126,8 +125,6 @@ void RestartMPIIO::write(MPI_Offset headerOffset, int send_size, double *buf)
|
|||
|
||||
void RestartMPIIO::read(MPI_Offset chunkOffset, bigint chunkSize, double *buf)
|
||||
{
|
||||
MPI_Status mpiStatus;
|
||||
|
||||
int intChunkSize;
|
||||
bigint remainingSize = 0;
|
||||
if (chunkSize > INT_MAX) {
|
||||
|
@ -137,7 +134,7 @@ void RestartMPIIO::read(MPI_Offset chunkOffset, bigint chunkSize, double *buf)
|
|||
else intChunkSize = (int) chunkSize;
|
||||
|
||||
int err = MPI_File_read_at_all(mpifh,chunkOffset,buf,intChunkSize,
|
||||
MPI_DOUBLE,&mpiStatus);
|
||||
MPI_DOUBLE,MPI_STATUS_IGNORE);
|
||||
if (err != MPI_SUCCESS) {
|
||||
char str[MPI_MAX_ERROR_STRING+128];
|
||||
char mpiErrorString[MPI_MAX_ERROR_STRING];
|
||||
|
@ -161,7 +158,7 @@ void RestartMPIIO::read(MPI_Offset chunkOffset, bigint chunkSize, double *buf)
|
|||
remainingSize = 0;
|
||||
}
|
||||
int err = MPI_File_read_at(mpifh,currentOffset,&buf[bufOffset],
|
||||
currentChunkSize,MPI_DOUBLE,&mpiStatus);
|
||||
currentChunkSize,MPI_DOUBLE,MPI_STATUS_IGNORE);
|
||||
if (err != MPI_SUCCESS) {
|
||||
char str[MPI_MAX_ERROR_STRING+128];
|
||||
char mpiErrorString[MPI_MAX_ERROR_STRING];
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -222,12 +222,12 @@ double FixQEqSlater::calculate_H(double zei, double zej, double zj,
|
|||
double erfcr = erfc(alpha*r);
|
||||
double qqrd2e = force->qqrd2e;
|
||||
|
||||
double etmp, etmp1, etmp2, etmp3, etmp4;
|
||||
double etmp1, etmp2;
|
||||
double e1, e2, e3, e4;
|
||||
double ci_jfi, ci_fifj;
|
||||
|
||||
e1 = e2 = e3 = e4 = 0.0;
|
||||
etmp = etmp1 = etmp2 = etmp3 = etmp4 = 0.0;
|
||||
etmp1 = etmp2 = 0.0;
|
||||
|
||||
ci_jfi = -zei*exp2zir - rinv*exp2zir;
|
||||
|
||||
|
@ -394,11 +394,9 @@ void FixQEqSlater::sparse_matvec( sparse_matrix *A, double *x, double *b )
|
|||
{
|
||||
int i, j, itr_j;
|
||||
int nn, NN;
|
||||
int *ilist;
|
||||
|
||||
nn = atom->nlocal;
|
||||
NN = atom->nlocal + atom->nghost;
|
||||
ilist = list->ilist;
|
||||
|
||||
double r = cutoff;
|
||||
double woself = 0.50*erfc(alpha*r)/r + alpha/MY_PIS;
|
||||
|
|
|
@ -112,7 +112,6 @@ void FixReaxBonds::OutputReaxBonds(bigint ntimestep, FILE *fp)
|
|||
double cutof3;
|
||||
double *buf;
|
||||
MPI_Request irequest;
|
||||
MPI_Status istatus;
|
||||
|
||||
MPI_Comm_size(world,&nprocs);
|
||||
|
||||
|
@ -198,7 +197,7 @@ void FixReaxBonds::OutputReaxBonds(bigint ntimestep, FILE *fp)
|
|||
} else {
|
||||
MPI_Irecv(&buf[0],nbuf,MPI_DOUBLE,inode,0,world,&irequest);
|
||||
MPI_Send(&itmp,0,MPI_INT,inode,0,world);
|
||||
MPI_Wait(&irequest,&istatus);
|
||||
MPI_Wait(&irequest,MPI_STATUS_IGNORE);
|
||||
nlocal_tmp = nint(buf[j++]);
|
||||
}
|
||||
|
||||
|
@ -237,7 +236,7 @@ void FixReaxBonds::OutputReaxBonds(bigint ntimestep, FILE *fp)
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&itmp,0,MPI_INT,0,0,world,&istatus);
|
||||
MPI_Recv(&itmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0],nbuf_local,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,6 @@ void FixNEB::min_post_force(int vflag)
|
|||
double vprev,vnext,vmax,vmin;
|
||||
double delx,dely,delz;
|
||||
double delta1[3],delta2[3];
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
// veng = PE of this replica
|
||||
|
@ -142,11 +141,11 @@ void FixNEB::min_post_force(int vflag)
|
|||
veng = pe->compute_scalar();
|
||||
|
||||
if (ireplica < nreplica-1) MPI_Send(&veng,1,MPI_DOUBLE,procnext,0,uworld);
|
||||
if (ireplica > 0) MPI_Recv(&vprev,1,MPI_DOUBLE,procprev,0,uworld,&status);
|
||||
if (ireplica > 0) MPI_Recv(&vprev,1,MPI_DOUBLE,procprev,0,uworld,MPI_STATUS_IGNORE);
|
||||
|
||||
if (ireplica > 0) MPI_Send(&veng,1,MPI_DOUBLE,procprev,0,uworld);
|
||||
if (ireplica < nreplica-1)
|
||||
MPI_Recv(&vnext,1,MPI_DOUBLE,procnext,0,uworld,&status);
|
||||
MPI_Recv(&vnext,1,MPI_DOUBLE,procnext,0,uworld,MPI_STATUS_IGNORE);
|
||||
|
||||
// xprev,xnext = atom coords of adjacent replicas
|
||||
// assume order of atoms in all replicas is the same
|
||||
|
@ -161,13 +160,13 @@ void FixNEB::min_post_force(int vflag)
|
|||
MPI_Irecv(xprev[0],3*nlocal,MPI_DOUBLE,procprev,0,uworld,&request);
|
||||
if (ireplica < nreplica-1)
|
||||
MPI_Send(x[0],3*nlocal,MPI_DOUBLE,procnext,0,uworld);
|
||||
if (ireplica > 0) MPI_Wait(&request,&status);
|
||||
if (ireplica > 0) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if (ireplica < nreplica-1)
|
||||
MPI_Irecv(xnext[0],3*nlocal,MPI_DOUBLE,procnext,0,uworld,&request);
|
||||
if (ireplica > 0)
|
||||
MPI_Send(x[0],3*nlocal,MPI_DOUBLE,procprev,0,uworld);
|
||||
if (ireplica < nreplica-1) MPI_Wait(&request,&status);
|
||||
if (ireplica < nreplica-1) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
// trigger potential energy computation on next timestep
|
||||
|
||||
|
|
|
@ -181,7 +181,6 @@ void Temper::command(int narg, char **arg)
|
|||
|
||||
int i,which,partner,swap,partner_set_temp,partner_world;
|
||||
double pe,pe_partner,boltz_factor,new_temp;
|
||||
MPI_Status status;
|
||||
|
||||
if (me_universe == 0 && universe->uscreen)
|
||||
fprintf(universe->uscreen,"Setting up tempering ...\n");
|
||||
|
@ -254,7 +253,7 @@ void Temper::command(int narg, char **arg)
|
|||
if (me_universe > partner)
|
||||
MPI_Send(&pe,1,MPI_DOUBLE,partner,0,universe->uworld);
|
||||
else
|
||||
MPI_Recv(&pe_partner,1,MPI_DOUBLE,partner,0,universe->uworld,&status);
|
||||
MPI_Recv(&pe_partner,1,MPI_DOUBLE,partner,0,universe->uworld,MPI_STATUS_IGNORE);
|
||||
|
||||
if (me_universe < partner) {
|
||||
boltz_factor = (pe - pe_partner) *
|
||||
|
@ -267,7 +266,7 @@ void Temper::command(int narg, char **arg)
|
|||
if (me_universe < partner)
|
||||
MPI_Send(&swap,1,MPI_INT,partner,0,universe->uworld);
|
||||
else
|
||||
MPI_Recv(&swap,1,MPI_INT,partner,0,universe->uworld,&status);
|
||||
MPI_Recv(&swap,1,MPI_INT,partner,0,universe->uworld,MPI_STATUS_IGNORE);
|
||||
|
||||
#ifdef TEMPER_DEBUG
|
||||
if (me_universe < partner)
|
||||
|
|
|
@ -498,8 +498,6 @@ void VerletSplit::rk_setup()
|
|||
|
||||
void VerletSplit::r2k_comm()
|
||||
{
|
||||
MPI_Status status;
|
||||
|
||||
int n = 0;
|
||||
if (master) n = atom->nlocal;
|
||||
MPI_Gatherv(atom->x[0],n*3,MPI_DOUBLE,atom->x[0],xsize,xdisp,
|
||||
|
@ -513,7 +511,7 @@ void VerletSplit::r2k_comm()
|
|||
MPI_Send(flags,2,MPI_INT,0,0,block);
|
||||
} else if (!master) {
|
||||
int flags[2];
|
||||
MPI_Recv(flags,2,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(flags,2,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
eflag = flags[0]; vflag = flags[1];
|
||||
}
|
||||
|
||||
|
@ -524,8 +522,8 @@ void VerletSplit::r2k_comm()
|
|||
MPI_Send(domain->boxlo,3,MPI_DOUBLE,0,0,block);
|
||||
MPI_Send(domain->boxhi,3,MPI_DOUBLE,0,0,block);
|
||||
} else if (!master) {
|
||||
MPI_Recv(domain->boxlo,3,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(domain->boxhi,3,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(domain->boxlo,3,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
MPI_Recv(domain->boxhi,3,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
domain->set_global_box();
|
||||
domain->set_local_box();
|
||||
force->kspace->setup();
|
||||
|
|
|
@ -2484,10 +2484,10 @@ void FixRigidSmall::write_restart_file(char *file)
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
MPI_Irecv(&buf[0][0],maxrow*ncol,MPI_DOUBLE,iproc,0,world,&request);
|
||||
|
@ -2506,7 +2506,7 @@ void FixRigidSmall::write_restart_file(char *file)
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
|
||||
|
|
|
@ -741,7 +741,6 @@ void PairSNAP::load_balance()
|
|||
int* procgrid = comm->procgrid;
|
||||
|
||||
int nlocal_up,nlocal_down;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
double sub_mid[3];
|
||||
|
@ -802,9 +801,9 @@ void PairSNAP::load_balance()
|
|||
// two stage process, first upstream movement, then downstream
|
||||
|
||||
MPI_Sendrecv(&nlocal,1,MPI_INT,sendproc,0,
|
||||
&nlocal_up,1,MPI_INT,recvproc,0,world,&status);
|
||||
&nlocal_up,1,MPI_INT,recvproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(&nlocal,1,MPI_INT,recvproc,0,
|
||||
&nlocal_down,1,MPI_INT,sendproc,0,world,&status);
|
||||
&nlocal_down,1,MPI_INT,sendproc,0,world,MPI_STATUS_IGNORE);
|
||||
nsend = 0;
|
||||
|
||||
// send upstream
|
||||
|
@ -835,7 +834,7 @@ void PairSNAP::load_balance()
|
|||
if (nlocal < nlocal_down-1) {
|
||||
nlocal++;
|
||||
int get_tag = -1;
|
||||
MPI_Recv(&get_tag,1,MPI_INT,sendproc,0,world,&status);
|
||||
MPI_Recv(&get_tag,1,MPI_INT,sendproc,0,world,MPI_STATUS_IGNORE);
|
||||
|
||||
// if get_tag -1 the other process didnt have local atoms to send
|
||||
|
||||
|
@ -907,9 +906,9 @@ void PairSNAP::load_balance()
|
|||
// second pass through the grid
|
||||
|
||||
MPI_Sendrecv(&nlocal,1,MPI_INT,sendproc,0,
|
||||
&nlocal_up,1,MPI_INT,recvproc,0,world,&status);
|
||||
&nlocal_up,1,MPI_INT,recvproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(&nlocal,1,MPI_INT,recvproc,0,
|
||||
&nlocal_down,1,MPI_INT,sendproc,0,world,&status);
|
||||
&nlocal_down,1,MPI_INT,sendproc,0,world,MPI_STATUS_IGNORE);
|
||||
|
||||
// send downstream
|
||||
|
||||
|
@ -938,7 +937,7 @@ void PairSNAP::load_balance()
|
|||
nlocal++;
|
||||
int get_tag = -1;
|
||||
|
||||
MPI_Recv(&get_tag,1,MPI_INT,recvproc,0,world,&status);
|
||||
MPI_Recv(&get_tag,1,MPI_INT,recvproc,0,world,MPI_STATUS_IGNORE);
|
||||
|
||||
if (get_tag >= 0) {
|
||||
if (ghostinum >= ghostilist_max) {
|
||||
|
|
|
@ -1061,7 +1061,6 @@ void FixSRD::vbin_comm(int ishift)
|
|||
{
|
||||
BinComm *bcomm1,*bcomm2;
|
||||
MPI_Request request1,request2;
|
||||
MPI_Status status;
|
||||
|
||||
// send/recv bins in both directions in each dimension
|
||||
// don't send if nsend = 0
|
||||
|
@ -1109,11 +1108,11 @@ void FixSRD::vbin_comm(int ishift)
|
|||
bcomm2->sendproc,0,world);
|
||||
}
|
||||
if (bcomm1->nrecv) {
|
||||
MPI_Wait(&request1,&status);
|
||||
MPI_Wait(&request1,MPI_STATUS_IGNORE);
|
||||
vbin_unpack(rbuf1,vbin,bcomm1->nrecv,bcomm1->recvlist);
|
||||
}
|
||||
if (bcomm2->nrecv) {
|
||||
MPI_Wait(&request2,&status);
|
||||
MPI_Wait(&request2,MPI_STATUS_IGNORE);
|
||||
vbin_unpack(rbuf2,vbin,bcomm2->nrecv,bcomm2->recvlist);
|
||||
}
|
||||
}
|
||||
|
@ -1164,7 +1163,6 @@ void FixSRD::xbin_comm(int ishift, int nval)
|
|||
{
|
||||
BinComm *bcomm1,*bcomm2;
|
||||
MPI_Request request1,request2;
|
||||
MPI_Status status;
|
||||
|
||||
// send/recv bins in both directions in each dimension
|
||||
// don't send if nsend = 0
|
||||
|
@ -1212,11 +1210,11 @@ void FixSRD::xbin_comm(int ishift, int nval)
|
|||
bcomm2->sendproc,0,world);
|
||||
}
|
||||
if (bcomm1->nrecv) {
|
||||
MPI_Wait(&request1,&status);
|
||||
MPI_Wait(&request1,MPI_STATUS_IGNORE);
|
||||
xbin_unpack(rbuf1,vbin,bcomm1->nrecv,bcomm1->recvlist,nval);
|
||||
}
|
||||
if (bcomm2->nrecv) {
|
||||
MPI_Wait(&request2,&status);
|
||||
MPI_Wait(&request2,MPI_STATUS_IGNORE);
|
||||
xbin_unpack(rbuf2,vbin,bcomm2->nrecv,bcomm2->recvlist,nval);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -397,17 +397,9 @@ int AtomVecAngleCuda::unpack_exchange(double *buf)
|
|||
if(cuda->oncpu)
|
||||
return AtomVecAngle::unpack_exchange(buf);
|
||||
|
||||
double *sublo,*subhi;
|
||||
int dim=cuda->shared_data.exchange_dim;
|
||||
if(domain->box_change)
|
||||
Cuda_AtomVecAngleCuda_Init(&cuda->shared_data);
|
||||
if (domain->triclinic == 0) {
|
||||
sublo = domain->sublo;
|
||||
subhi = domain->subhi;
|
||||
} else {
|
||||
sublo = domain->sublo_lamda;
|
||||
subhi = domain->subhi_lamda;
|
||||
}
|
||||
|
||||
int mfirst=0;
|
||||
for(int pi=0;pi<(comm->procgrid[dim]>2?2:1);pi++)
|
||||
|
|
|
@ -352,18 +352,9 @@ int AtomVecAtomicCuda::unpack_exchange(double *buf)
|
|||
if(cuda->oncpu)
|
||||
return AtomVecAtomic::unpack_exchange(buf);
|
||||
|
||||
double *sublo,*subhi;
|
||||
|
||||
int dim=cuda->shared_data.exchange_dim;
|
||||
if(domain->box_change)
|
||||
Cuda_AtomVecAtomicCuda_Init(&cuda->shared_data);
|
||||
if (domain->triclinic == 0) {
|
||||
sublo = domain->sublo;
|
||||
subhi = domain->subhi;
|
||||
} else {
|
||||
sublo = domain->sublo_lamda;
|
||||
subhi = domain->subhi_lamda;
|
||||
}
|
||||
|
||||
int mfirst=0;
|
||||
for(int pi=0;pi<(comm->procgrid[dim]>2?2:1);pi++)
|
||||
|
|
|
@ -355,18 +355,10 @@ int AtomVecChargeCuda::unpack_exchange(double *buf)
|
|||
{
|
||||
if(cuda->oncpu)
|
||||
return AtomVecCharge::unpack_exchange(buf);
|
||||
double *sublo,*subhi;
|
||||
|
||||
int dim=cuda->shared_data.exchange_dim;
|
||||
if(domain->box_change)
|
||||
Cuda_AtomVecChargeCuda_Init(&cuda->shared_data);
|
||||
if (domain->triclinic == 0) {
|
||||
sublo = domain->sublo;
|
||||
subhi = domain->subhi;
|
||||
} else {
|
||||
sublo = domain->sublo_lamda;
|
||||
subhi = domain->subhi_lamda;
|
||||
}
|
||||
|
||||
int mfirst=0;
|
||||
for(int pi=0;pi<(comm->procgrid[dim]>2?2:1);pi++)
|
||||
|
|
|
@ -420,17 +420,9 @@ int AtomVecFullCuda::unpack_exchange(double *buf)
|
|||
if(cuda->oncpu)
|
||||
return AtomVecFull::unpack_exchange(buf);
|
||||
|
||||
double *sublo,*subhi;
|
||||
int dim=cuda->shared_data.exchange_dim;
|
||||
if(domain->box_change)
|
||||
Cuda_AtomVecFullCuda_Init(&cuda->shared_data);
|
||||
if (domain->triclinic == 0) {
|
||||
sublo = domain->sublo;
|
||||
subhi = domain->subhi;
|
||||
} else {
|
||||
sublo = domain->sublo_lamda;
|
||||
subhi = domain->subhi_lamda;
|
||||
}
|
||||
|
||||
int mfirst=0;
|
||||
for(int pi=0;pi<(comm->procgrid[dim]>2?2:1);pi++)
|
||||
|
|
|
@ -102,8 +102,6 @@ CommCuda::~CommCuda()
|
|||
|
||||
void CommCuda::init()
|
||||
{
|
||||
int factor = 1;
|
||||
if(cuda->shared_data.overlap_comm) factor=maxswap;
|
||||
if(not buf_send)
|
||||
grow_send(maxsend,0);
|
||||
if(not buf_recv)
|
||||
|
@ -176,16 +174,11 @@ void CommCuda::forward_comm(int mode)
|
|||
|
||||
void CommCuda::forward_comm_cuda()
|
||||
{
|
||||
static int count=0;
|
||||
static double kerneltime=0.0;
|
||||
static double copytime=0.0;
|
||||
my_times time1,time2,time3;
|
||||
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
|
||||
cuda->shared_data.domain.xy=domain->xy;
|
||||
cuda->shared_data.domain.xz=domain->xz;
|
||||
|
@ -231,7 +224,7 @@ my_gettime(CLOCK_REALTIME,&time2);
|
|||
|
||||
//printf("RecvSize: %i SendSize: %i\n",size_forward_recv_now,n);
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
my_gettime(CLOCK_REALTIME,&time3);
|
||||
cuda->shared_data.cuda_timings.comm_forward_mpi_upper+=
|
||||
|
@ -255,7 +248,7 @@ cuda->shared_data.cuda_timings.comm_forward_mpi_lower+=
|
|||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm_vel(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
else
|
||||
|
@ -271,7 +264,7 @@ cuda->shared_data.cuda_timings.comm_forward_mpi_lower+=
|
|||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
|
||||
|
@ -308,16 +301,11 @@ cuda->shared_data.cuda_timings.comm_forward_mpi_lower+=
|
|||
|
||||
void CommCuda::forward_comm_pack_cuda()
|
||||
{
|
||||
static int count=0;
|
||||
static double kerneltime=0.0;
|
||||
static double copytime=0.0;
|
||||
my_times time1,time2,time3;
|
||||
my_times time1,time2;
|
||||
int n; // initialize comm buffers & exchange memory
|
||||
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
|
||||
cuda->shared_data.domain.xy=domain->xy;
|
||||
cuda->shared_data.domain.xz=domain->xz;
|
||||
|
@ -375,7 +363,7 @@ my_gettime(CLOCK_REALTIME,&time2);
|
|||
cuda->shared_data.comm.buf_send[iswap],pbc_flag[iswap],pbc[iswap]);
|
||||
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
|
||||
|
@ -411,15 +399,10 @@ my_gettime(CLOCK_REALTIME,&time2);
|
|||
|
||||
void CommCuda::forward_comm_transfer_cuda()
|
||||
{
|
||||
static int count=0;
|
||||
static double kerneltime=0.0;
|
||||
static double copytime=0.0;
|
||||
my_times time1,time2,time3;
|
||||
my_times time1,time2,time3;
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
cuda->shared_data.domain.xy=domain->xy;
|
||||
cuda->shared_data.domain.xz=domain->xz;
|
||||
cuda->shared_data.domain.yz=domain->yz;
|
||||
|
@ -464,7 +447,7 @@ my_gettime(CLOCK_REALTIME,&time2);
|
|||
cuda->shared_data.cuda_timings.comm_forward_download+=
|
||||
time2.tv_sec-time1.tv_sec+1.0*(time2.tv_nsec-time1.tv_nsec)/1000000000;
|
||||
MPI_Send(buf_send,cuda->shared_data.comm.send_size[iswap],MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
//printf("D: %i \n",cuda->shared_data.comm.send_size[iswap]/1024*4);
|
||||
CudaWrapper_UploadCudaDataAsync((void*) buf_recv,cuda->shared_data.comm.buf_recv_dev[iswap], size_forward_recv_now*sizeof(double),2);
|
||||
my_gettime(CLOCK_REALTIME,&time1);
|
||||
|
@ -498,7 +481,7 @@ my_gettime(CLOCK_REALTIME,&time1);
|
|||
my_gettime(CLOCK_REALTIME,&time2);
|
||||
|
||||
MPI_Send(cuda->shared_data.comm.buf_send[iswap],cuda->shared_data.comm.send_size[iswap],MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
my_gettime(CLOCK_REALTIME,&time3);
|
||||
cuda->shared_data.cuda_timings.comm_forward_mpi_upper+=
|
||||
|
@ -520,7 +503,7 @@ cuda->shared_data.cuda_timings.comm_forward_mpi_lower+=
|
|||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
|
||||
|
@ -549,15 +532,9 @@ cuda->shared_data.cuda_timings.comm_forward_mpi_lower+=
|
|||
|
||||
void CommCuda::forward_comm_unpack_cuda()
|
||||
{
|
||||
static int count=0;
|
||||
static double kerneltime=0.0;
|
||||
static double copytime=0.0;
|
||||
my_times time1,time2,time3;
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
|
||||
cuda->shared_data.domain.xy=domain->xy;
|
||||
cuda->shared_data.domain.xz=domain->xz;
|
||||
|
@ -599,7 +576,7 @@ void CommCuda::forward_comm_unpack_cuda()
|
|||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
|
||||
|
@ -636,7 +613,6 @@ void CommCuda::forward_comm_pair(Pair *pair)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = pair->comm_forward;
|
||||
|
||||
|
@ -658,7 +634,7 @@ void CommCuda::forward_comm_pair(Pair *pair)
|
|||
MPI_Irecv(buf_recv,nrecv,MPI_DOUBLE,recvproc[iswap],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -677,9 +653,7 @@ void CommCuda::reverse_comm()
|
|||
{
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **f = atom->f;
|
||||
double *buf;
|
||||
|
||||
if(not comm_f_only && not avec->cudable) cuda->downloadAll(); //not yet implemented in CUDA but only needed for non standard atom styles
|
||||
|
@ -709,7 +683,7 @@ void CommCuda::reverse_comm()
|
|||
size_reverse_send_now=(size_reverse_send_now+1)*sizeof(F_CFLOAT)/sizeof(double);
|
||||
MPI_Send(buf,size_reverse_send_now,MPI_DOUBLE,
|
||||
recvproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
Cuda_CommCuda_UnpackReverse(&cuda->shared_data,sendnum[iswap],iswap,buf_recv);
|
||||
|
||||
} else {
|
||||
|
@ -717,7 +691,7 @@ void CommCuda::reverse_comm()
|
|||
sendproc[iswap],0,world,&request);
|
||||
n = avec->pack_reverse(recvnum[iswap],firstrecv[iswap],buf_send);
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
avec->unpack_reverse(sendnum[iswap],sendlist[iswap],buf_recv);
|
||||
}
|
||||
|
@ -761,14 +735,11 @@ void CommCuda::exchange()
|
|||
|
||||
void CommCuda::exchange_cuda()
|
||||
{
|
||||
int i,m,nsend,nrecv,nrecv1,nrecv2,nlocal;
|
||||
double lo,hi,value;
|
||||
double **x;
|
||||
double *sublo,*subhi,*buf;
|
||||
int nsend,nrecv,nrecv1,nrecv2,nlocal;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
my_times time1,time2,time3;
|
||||
my_times time1,time2;
|
||||
|
||||
// clear global->local map for owned and ghost atoms
|
||||
// b/c atoms migrate to new procs in exchange() and
|
||||
|
@ -780,23 +751,13 @@ void CommCuda::exchange_cuda()
|
|||
|
||||
if (map_style) atom->map_clear();
|
||||
|
||||
// subbox bounds for orthogonal or triclinic
|
||||
|
||||
if (triclinic == 0) {
|
||||
sublo = domain->sublo;
|
||||
subhi = domain->subhi;
|
||||
} else {
|
||||
sublo = domain->sublo_lamda;
|
||||
subhi = domain->subhi_lamda;
|
||||
}
|
||||
|
||||
// loop over dimensions
|
||||
|
||||
for (int dim = 0; dim < 3; dim++) {
|
||||
// fill buffer with atoms leaving my box, using < and >=
|
||||
// when atom is deleted, fill it in with last atom
|
||||
|
||||
cuda->shared_data.exchange_dim=dim;
|
||||
cuda->shared_data.exchange_dim=dim;
|
||||
|
||||
nlocal = atom->nlocal;
|
||||
avec->maxsend=&maxsend;
|
||||
|
@ -819,11 +780,11 @@ void CommCuda::exchange_cuda()
|
|||
|
||||
} else {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv = nrecv1;
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv += nrecv2;
|
||||
}
|
||||
if (nrecv+1 > maxrecv) grow_recv(nrecv+1);
|
||||
|
@ -831,13 +792,13 @@ void CommCuda::exchange_cuda()
|
|||
MPI_Irecv(buf_recv,nrecv1,MPI_DOUBLE,procneigh[dim][1],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Irecv(&buf_recv[nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if((nrecv1==0)||(nrecv2==0)) buf_recv[nrecv]=0;
|
||||
}
|
||||
|
@ -899,16 +860,12 @@ void CommCuda::borders()
|
|||
|
||||
void CommCuda::borders_cuda()
|
||||
{
|
||||
int i,n,itype,iswap,dim,ineed,twoneed,smax,rmax;
|
||||
int nsend,nrecv,nfirst,nlast,ngroup;
|
||||
double lo,hi;
|
||||
int *type;
|
||||
double **x;
|
||||
double *buf,*mlo,*mhi;
|
||||
int n,iswap,dim,ineed,twoneed,smax,rmax;
|
||||
int nsend,nrecv,nfirst,nlast;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
my_times time1,time2,time3;
|
||||
my_times time1,time2;
|
||||
|
||||
// clear old ghosts
|
||||
|
||||
|
@ -931,15 +888,6 @@ void CommCuda::borders_cuda()
|
|||
// for later swaps in a dim, only check newly arrived ghosts
|
||||
// store sent atom indices in list for use in future timesteps
|
||||
|
||||
x = atom->x;
|
||||
if (style == SINGLE) {
|
||||
lo = slablo[iswap];
|
||||
hi = slabhi[iswap];
|
||||
} else {
|
||||
type = atom->type;
|
||||
mlo = multilo[iswap];
|
||||
mhi = multihi[iswap];
|
||||
}
|
||||
if (ineed % 2 == 0) {
|
||||
nfirst = nlast;
|
||||
nlast = atom->nlocal + atom->nghost;
|
||||
|
@ -975,13 +923,13 @@ void CommCuda::borders_cuda()
|
|||
my_gettime(CLOCK_REALTIME,&time1);
|
||||
if (sendproc[iswap] != me) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,&status);
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,MPI_STATUS_IGNORE);
|
||||
if (nrecv*size_border > maxrecv)
|
||||
grow_recv(nrecv*size_border);
|
||||
MPI_Irecv(buf_recv,nrecv*size_border,MPI_DOUBLE,
|
||||
recvproc[iswap],0,world,&request);
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else {
|
||||
nrecv = nsend;
|
||||
|
@ -1034,16 +982,12 @@ cuda->shared_data.cuda_timings.comm_border_mpi+=
|
|||
|
||||
void CommCuda::borders_cuda_overlap_forward_comm()
|
||||
{
|
||||
int i,n,itype,iswap,dim,ineed,twoneed,smax,rmax;
|
||||
int nsend,nrecv,nfirst,nlast,ngroup;
|
||||
double lo,hi;
|
||||
int *type;
|
||||
double **x;
|
||||
double *buf,*mlo,*mhi;
|
||||
int n,iswap,dim,ineed,twoneed,smax,rmax;
|
||||
int nsend,nrecv,nfirst,nlast;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
my_times time1,time2,time3;
|
||||
my_times time1,time2;
|
||||
|
||||
// clear old ghosts
|
||||
|
||||
|
@ -1066,15 +1010,6 @@ void CommCuda::borders_cuda_overlap_forward_comm()
|
|||
// for later swaps in a dim, only check newly arrived ghosts
|
||||
// store sent atom indices in list for use in future timesteps
|
||||
|
||||
x = atom->x;
|
||||
if (style == SINGLE) {
|
||||
lo = slablo[iswap];
|
||||
hi = slabhi[iswap];
|
||||
} else {
|
||||
type = atom->type;
|
||||
mlo = multilo[iswap];
|
||||
mhi = multihi[iswap];
|
||||
}
|
||||
if (ineed % 2 == 0) {
|
||||
nfirst = nlast;
|
||||
nlast = atom->nlocal + atom->nghost;
|
||||
|
@ -1111,13 +1046,13 @@ void CommCuda::borders_cuda_overlap_forward_comm()
|
|||
my_gettime(CLOCK_REALTIME,&time1);
|
||||
if (sendproc[iswap] != me) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,&status);
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,MPI_STATUS_IGNORE);
|
||||
if (nrecv*size_border > maxrecv)
|
||||
grow_recv(nrecv*size_border);
|
||||
MPI_Irecv(buf_recv,nrecv*size_border,MPI_DOUBLE,
|
||||
recvproc[iswap],0,world,&request);
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else {
|
||||
nrecv = nsend;
|
||||
|
@ -1171,12 +1106,11 @@ cuda->shared_data.cuda_timings.comm_border_mpi+=
|
|||
|
||||
|
||||
|
||||
void CommCuda::forward_comm_fix(Fix *fix)
|
||||
void CommCuda::forward_comm_fix(Fix *fix, int size)
|
||||
{
|
||||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = fix->comm_forward;
|
||||
|
||||
|
@ -1206,7 +1140,7 @@ void CommCuda::forward_comm_fix(Fix *fix)
|
|||
MPI_Irecv(buf_recv,nsize*recvnum[iswap],MPI_DOUBLE,recvproc[iswap],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
virtual void borders(); // setup list of atoms to communicate
|
||||
virtual void borders_cuda(); // setup list of atoms to communicate
|
||||
virtual void borders_cuda_overlap_forward_comm();
|
||||
virtual void forward_comm_fix(class Fix *); // forward comm from a Fix
|
||||
virtual void forward_comm_fix(class Fix *, int size=0); // forward comm from a Fix
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -63,29 +63,55 @@ Cuda::Cuda(LAMMPS* lmp) : Pointers(lmp)
|
|||
|
||||
Cuda_Cuda_GetCompileSettings(&shared_data);
|
||||
|
||||
if(shared_data.compile_settings.prec_glob != sizeof(CUDA_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: Global Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_glob, sizeof(CUDA_CFLOAT) / 4);
|
||||
if (universe->me == 0) {
|
||||
|
||||
if(shared_data.compile_settings.prec_x != sizeof(X_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: X Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_x, sizeof(X_CFLOAT) / 4);
|
||||
if(shared_data.compile_settings.prec_glob != sizeof(CUDA_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: Global Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_glob, (int) sizeof(CUDA_CFLOAT) / 4);
|
||||
|
||||
if(shared_data.compile_settings.prec_v != sizeof(V_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: V Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_v, sizeof(V_CFLOAT) / 4);
|
||||
if(shared_data.compile_settings.prec_x != sizeof(X_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: X Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_x, (int) sizeof(X_CFLOAT) / 4);
|
||||
|
||||
if(shared_data.compile_settings.prec_f != sizeof(F_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: F Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_f, sizeof(F_CFLOAT) / 4);
|
||||
if(shared_data.compile_settings.prec_v != sizeof(V_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: V Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_v, (int) sizeof(V_CFLOAT) / 4);
|
||||
|
||||
if(shared_data.compile_settings.prec_pppm != sizeof(PPPM_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: PPPM Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_pppm, sizeof(PPPM_CFLOAT) / 4);
|
||||
if(shared_data.compile_settings.prec_f != sizeof(F_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: F Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_f, (int) sizeof(F_CFLOAT) / 4);
|
||||
|
||||
if(shared_data.compile_settings.prec_fft != sizeof(FFT_CFLOAT) / 4) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: FFT Precision: cuda %i cpp %i\n\n", shared_data.compile_settings.prec_fft, sizeof(FFT_CFLOAT) / 4);
|
||||
if(shared_data.compile_settings.prec_pppm != sizeof(PPPM_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: PPPM Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_pppm, (int) sizeof(PPPM_CFLOAT) / 4);
|
||||
|
||||
if(shared_data.compile_settings.prec_fft != sizeof(FFT_CFLOAT) / 4)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: FFT Precision: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.prec_fft, (int) sizeof(FFT_CFLOAT) / 4);
|
||||
|
||||
#ifdef FFT_CUFFT
|
||||
|
||||
if(shared_data.compile_settings.cufft != 1) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: cufft: cuda %i cpp %i\n\n", shared_data.compile_settings.cufft, 1);
|
||||
|
||||
if(shared_data.compile_settings.cufft != 1)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: cufft: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.cufft, 1);
|
||||
#else
|
||||
|
||||
if(shared_data.compile_settings.cufft != 0) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: cufft: cuda %i cpp %i\n\n", shared_data.compile_settings.cufft, 0);
|
||||
|
||||
if(shared_data.compile_settings.cufft != 0)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: cufft: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.cufft, 0);
|
||||
#endif
|
||||
|
||||
if(shared_data.compile_settings.arch != CUDA_ARCH) printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n # CUDA WARNING: arch: cuda %i cpp %i\n\n", shared_data.compile_settings.cufft, CUDA_ARCH);
|
||||
if(shared_data.compile_settings.arch != CUDA_ARCH)
|
||||
printf("\n\n # CUDA WARNING: Compile Settings of cuda and cpp code differ! \n"
|
||||
" # CUDA WARNING: arch: cuda %i cpp %i\n\n",
|
||||
shared_data.compile_settings.cufft, CUDA_ARCH);
|
||||
}
|
||||
|
||||
cu_x = 0;
|
||||
cu_v = 0;
|
||||
|
@ -454,7 +480,6 @@ void Cuda::checkResize()
|
|||
{
|
||||
MYDBG(printf("# CUDA: Cuda::checkResize ...\n");)
|
||||
cuda_shared_atom* cu_atom = & shared_data.atom;
|
||||
cuda_shared_pair* cu_pair = & shared_data.pair;
|
||||
cu_atom->q_flag = atom->q_flag;
|
||||
cu_atom->rmass_flag = atom->rmass ? 1 : 0;
|
||||
cu_atom->nall = atom->nlocal + atom->nghost;
|
||||
|
|
|
@ -29,8 +29,8 @@ class DomainCuda : public Domain {
|
|||
void reset_box();
|
||||
void pbc();
|
||||
|
||||
void lamda2x(int);
|
||||
void x2lamda(int);
|
||||
virtual void lamda2x(int);
|
||||
virtual void x2lamda(int);
|
||||
|
||||
protected:
|
||||
class Cuda *cuda;
|
||||
|
|
|
@ -170,10 +170,6 @@ void FixAveForceCuda::init()
|
|||
if (xstyle == EQUAL || ystyle == EQUAL || zstyle == EQUAL) varflag = EQUAL;
|
||||
else varflag = CONSTANT;
|
||||
|
||||
// ncount = total # of atoms in group
|
||||
|
||||
int *mask = atom->mask;
|
||||
int nlocal = atom->nlocal;
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
|
|
@ -833,8 +833,6 @@ void FixNHCuda::final_integrate()
|
|||
|
||||
void FixNHCuda::initial_integrate_respa(int vflag, int ilevel, int iloop)
|
||||
{
|
||||
int i;
|
||||
|
||||
// set timesteps by level
|
||||
|
||||
dtv = step_respa[ilevel];
|
||||
|
@ -1430,7 +1428,6 @@ double FixNHCuda::compute_vector(int n)
|
|||
}
|
||||
}
|
||||
|
||||
int i;
|
||||
double volume;
|
||||
double kt = boltz * t_target;
|
||||
double lkt = tdof * kt;
|
||||
|
@ -1653,7 +1650,7 @@ void FixNHCuda::nhc_temp_integrate()
|
|||
void FixNHCuda::nhc_press_integrate()
|
||||
{
|
||||
int ich,i;
|
||||
double expfac,factor_etap,wmass,kecurrent;
|
||||
double expfac,factor_etap,kecurrent;
|
||||
double kt = boltz * t_target;
|
||||
double lkt_press = kt;
|
||||
|
||||
|
|
|
@ -63,8 +63,6 @@ void NeighborCuda::full_bin_cuda(NeighList *list)
|
|||
}
|
||||
int bin_dim_tmp[3];
|
||||
int bin_nmax_tmp;
|
||||
//printf("Hallo\n");
|
||||
timespec starttime,endtime;
|
||||
do
|
||||
{
|
||||
do
|
||||
|
@ -98,8 +96,6 @@ void NeighborCuda::full_bin_cuda(NeighList *list)
|
|||
|
||||
// cuda->cu_debugdata->memset_device(0);
|
||||
int maxneighbors=slist->maxneighbors;
|
||||
int *ilist = list->ilist;
|
||||
int *numneigh = list->numneigh;
|
||||
|
||||
if((nex_type!=slist->nex_type)||
|
||||
(nex_group!=slist->nex_group)||
|
||||
|
@ -136,12 +132,8 @@ void NeighborCuda::full_bin_cuda(NeighList *list)
|
|||
//printf("C %i %i %i\n",nex_type,nex_group,nex_mol);
|
||||
}
|
||||
int overflow = 0;
|
||||
int inum = 0;
|
||||
int npnt = 0;
|
||||
do
|
||||
{
|
||||
npnt=0;
|
||||
inum=0;
|
||||
overflow=0;
|
||||
clist->grow_device();
|
||||
slist->cutneighsq=cutneighsq;
|
||||
|
|
|
@ -154,8 +154,8 @@ void PairEAMAlloyCuda::read_file(char *filename)
|
|||
|
||||
char **words = new char*[file->nelements+1];
|
||||
nwords = 0;
|
||||
char *first = strtok(line," \t\n\r\f");
|
||||
while (words[nwords++] = strtok(NULL," \t\n\r\f")) continue;
|
||||
strtok(line," \t\n\r\f");
|
||||
while ((words[nwords++] = strtok(NULL," \t\n\r\f"))) continue;
|
||||
|
||||
file->elements = new char*[file->nelements];
|
||||
for (int i = 0; i < file->nelements; i++) {
|
||||
|
|
|
@ -154,8 +154,8 @@ void PairEAMFSCuda::read_file(char *filename)
|
|||
|
||||
char **words = new char*[file->nelements+1];
|
||||
nwords = 0;
|
||||
char *first = strtok(line," \t\n\r\f");
|
||||
while (words[nwords++] = strtok(NULL," \t\n\r\f")) continue;
|
||||
strtok(line," \t\n\r\f");
|
||||
while ((words[nwords++] = strtok(NULL," \t\n\r\f"))) continue;
|
||||
|
||||
file->elements = new char*[file->nelements];
|
||||
for (int i = 0; i < file->nelements; i++) {
|
||||
|
|
|
@ -105,7 +105,7 @@ void PairTersoffZBLCuda::read_file(char *file)
|
|||
|
||||
// strip comment, skip line if blank
|
||||
|
||||
if (ptr = strchr(line,'#')) *ptr = '\0';
|
||||
if ((ptr = strchr(line,'#'))) *ptr = '\0';
|
||||
nwords = atom->count_words(line);
|
||||
if (nwords == 0) continue;
|
||||
|
||||
|
@ -124,7 +124,7 @@ void PairTersoffZBLCuda::read_file(char *file)
|
|||
if (eof) break;
|
||||
MPI_Bcast(&n,1,MPI_INT,0,world);
|
||||
MPI_Bcast(line,n,MPI_CHAR,0,world);
|
||||
if (ptr = strchr(line,'#')) *ptr = '\0';
|
||||
if ((ptr = strchr(line,'#'))) *ptr = '\0';
|
||||
nwords = atom->count_words(line);
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ void PairTersoffZBLCuda::read_file(char *file)
|
|||
|
||||
nwords = 0;
|
||||
words[nwords++] = strtok(line," \t\n\r\f");
|
||||
while (words[nwords++] = strtok(NULL," \t\n\r\f")) continue;
|
||||
while ((words[nwords++] = strtok(NULL," \t\n\r\f"))) continue;
|
||||
|
||||
// ielement,jelement,kelement = 1st args
|
||||
// if all 3 args are in element list, then parse this line
|
||||
|
|
|
@ -411,48 +411,47 @@ void PPPMCuda::init()
|
|||
// if no neighbor proc, value is from self since I have ghosts regardless
|
||||
|
||||
int nplanes;
|
||||
MPI_Status status;
|
||||
|
||||
nplanes = nxlo_in - nxlo_out;
|
||||
if (comm->procneigh[0][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[0][0],0,
|
||||
&nxhi_ghost,1,MPI_INT,comm->procneigh[0][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nxhi_ghost = nplanes;
|
||||
|
||||
nplanes = nxhi_out - nxhi_in;
|
||||
if (comm->procneigh[0][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[0][1],0,
|
||||
&nxlo_ghost,1,MPI_INT,comm->procneigh[0][0],
|
||||
0,world,&status);
|
||||
0,world,MPI_STATUS_IGNORE);
|
||||
else nxlo_ghost = nplanes;
|
||||
|
||||
nplanes = nylo_in - nylo_out;
|
||||
if (comm->procneigh[1][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[1][0],0,
|
||||
&nyhi_ghost,1,MPI_INT,comm->procneigh[1][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nyhi_ghost = nplanes;
|
||||
|
||||
nplanes = nyhi_out - nyhi_in;
|
||||
if (comm->procneigh[1][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[1][1],0,
|
||||
&nylo_ghost,1,MPI_INT,comm->procneigh[1][0],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nylo_ghost = nplanes;
|
||||
|
||||
nplanes = nzlo_in - nzlo_out;
|
||||
if (comm->procneigh[2][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[2][0],0,
|
||||
&nzhi_ghost,1,MPI_INT,comm->procneigh[2][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nzhi_ghost = nplanes;
|
||||
|
||||
nplanes = nzhi_out - nzhi_in;
|
||||
if (comm->procneigh[2][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[2][1],0,
|
||||
&nzlo_ghost,1,MPI_INT,comm->procneigh[2][0],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nzlo_ghost = nplanes;
|
||||
|
||||
// test that ghost overlap is not bigger than my sub-domain
|
||||
|
@ -1291,7 +1290,7 @@ void PPPMCuda::poisson(int eflag, int vflag)
|
|||
{
|
||||
|
||||
#ifndef FFT_CUFFT
|
||||
PPPMOld::poisson();
|
||||
PPPMOld::poisson(eflag,vflag);
|
||||
return;
|
||||
#endif
|
||||
#ifdef FFT_CUFFT
|
||||
|
@ -1413,10 +1412,6 @@ void PPPMCuda::slabcorr(int eflag)
|
|||
double dipole_all;
|
||||
MPI_Allreduce(&dipole,&dipole_all,1,MPI_DOUBLE,MPI_SUM,world);
|
||||
|
||||
// compute corrections
|
||||
|
||||
double e_slabcorr = 2.0*MY_PI*dipole_all*dipole_all/volume;
|
||||
|
||||
//if (eflag) energy += qqrd2e*scale * e_slabcorr;
|
||||
// need to add a correction to make non-neutral systems and per-atom energy translationally invariant
|
||||
if (eflag || fabs(qsum) > SMALL)
|
||||
|
|
|
@ -98,7 +98,7 @@ class PPPMCuda : public PPPMOld {
|
|||
|
||||
virtual void particle_map();
|
||||
virtual void make_rho();
|
||||
void poisson(int, int);
|
||||
virtual void poisson(int, int);
|
||||
virtual void fieldforce();
|
||||
virtual void slabcorr(int);
|
||||
double*** vdx_brick_tmp;
|
||||
|
|
|
@ -332,48 +332,47 @@ void PPPMOld::init()
|
|||
// if no neighbor proc, value is from self since I have ghosts regardless
|
||||
|
||||
int nplanes;
|
||||
MPI_Status status;
|
||||
|
||||
nplanes = nxlo_in - nxlo_out;
|
||||
if (comm->procneigh[0][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[0][0],0,
|
||||
&nxhi_ghost,1,MPI_INT,comm->procneigh[0][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nxhi_ghost = nplanes;
|
||||
|
||||
nplanes = nxhi_out - nxhi_in;
|
||||
if (comm->procneigh[0][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[0][1],0,
|
||||
&nxlo_ghost,1,MPI_INT,comm->procneigh[0][0],
|
||||
0,world,&status);
|
||||
0,world,MPI_STATUS_IGNORE);
|
||||
else nxlo_ghost = nplanes;
|
||||
|
||||
nplanes = nylo_in - nylo_out;
|
||||
if (comm->procneigh[1][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[1][0],0,
|
||||
&nyhi_ghost,1,MPI_INT,comm->procneigh[1][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nyhi_ghost = nplanes;
|
||||
|
||||
nplanes = nyhi_out - nyhi_in;
|
||||
if (comm->procneigh[1][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[1][1],0,
|
||||
&nylo_ghost,1,MPI_INT,comm->procneigh[1][0],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nylo_ghost = nplanes;
|
||||
|
||||
nplanes = nzlo_in - nzlo_out;
|
||||
if (comm->procneigh[2][0] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[2][0],0,
|
||||
&nzhi_ghost,1,MPI_INT,comm->procneigh[2][1],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nzhi_ghost = nplanes;
|
||||
|
||||
nplanes = nzhi_out - nzhi_in;
|
||||
if (comm->procneigh[2][1] != me)
|
||||
MPI_Sendrecv(&nplanes,1,MPI_INT,comm->procneigh[2][1],0,
|
||||
&nzlo_ghost,1,MPI_INT,comm->procneigh[2][0],0,
|
||||
world,&status);
|
||||
world,MPI_STATUS_IGNORE);
|
||||
else nzlo_ghost = nplanes;
|
||||
|
||||
// test that ghost overlap is not bigger than my sub-domain
|
||||
|
@ -704,7 +703,7 @@ void PPPMOld::compute(int eflag, int vflag)
|
|||
// return gradients (electric fields) in 3d brick decomposition
|
||||
// also performs per-atom calculations via poisson_peratom()
|
||||
|
||||
poisson();
|
||||
poisson(eflag,vflag);
|
||||
|
||||
// all procs communicate E-field values
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
@ -1198,7 +1197,6 @@ void PPPMOld::brick2fft()
|
|||
{
|
||||
int i,n,ix,iy,iz;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
// pack my ghosts for +x processor
|
||||
// pass data to self or +x processor
|
||||
|
@ -1215,7 +1213,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[0][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[0][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1239,7 +1237,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[0][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[0][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1263,7 +1261,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[1][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[1][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1287,7 +1285,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[1][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[1][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1311,7 +1309,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[2][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[2][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1335,7 +1333,7 @@ void PPPMOld::brick2fft()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[2][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[2][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1366,7 +1364,6 @@ void PPPMOld::fillbrick()
|
|||
{
|
||||
int i,n,ix,iy,iz;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
// pack my real cells for +z processor
|
||||
// pass data to self or +z processor
|
||||
|
@ -1386,7 +1383,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[2][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[2][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1416,7 +1413,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[2][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[2][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1446,7 +1443,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[1][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[1][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1476,7 +1473,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[1][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[1][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1506,7 +1503,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[0][0],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[0][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1536,7 +1533,7 @@ void PPPMOld::fillbrick()
|
|||
else {
|
||||
MPI_Irecv(buf2,nbuf,MPI_FFT_SCALAR,comm->procneigh[0][1],0,world,&request);
|
||||
MPI_Send(buf1,n,MPI_FFT_SCALAR,comm->procneigh[0][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1557,7 +1554,6 @@ void PPPMOld::fillbrick_peratom()
|
|||
{
|
||||
int i,n,ix,iy,iz;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
// pack my real cells for +z processor
|
||||
// pass data to self or +z processor
|
||||
|
@ -1584,7 +1580,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[2][0],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[2][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1627,7 +1623,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[2][1],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[2][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1670,7 +1666,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[1][0],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[1][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1713,7 +1709,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[1][1],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[1][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1756,7 +1752,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[0][0],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[0][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1799,7 +1795,7 @@ void PPPMOld::fillbrick_peratom()
|
|||
MPI_Irecv(buf4,nbuf_peratom,MPI_FFT_SCALAR,
|
||||
comm->procneigh[0][1],0,world,&request);
|
||||
MPI_Send(buf3,n,MPI_FFT_SCALAR,comm->procneigh[0][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
n = 0;
|
||||
|
@ -1914,7 +1910,7 @@ void PPPMOld::make_rho()
|
|||
FFT-based Poisson solver
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void PPPMOld::poisson()
|
||||
void PPPMOld::poisson(int,int)
|
||||
{
|
||||
int i,j,k,n;
|
||||
double eng;
|
||||
|
@ -2245,7 +2241,6 @@ void PPPMOld::fieldforce_peratom()
|
|||
|
||||
double *q = atom->q;
|
||||
double **x = atom->x;
|
||||
double **f = atom->f;
|
||||
|
||||
int nlocal = atom->nlocal;
|
||||
|
||||
|
@ -2578,7 +2573,7 @@ void PPPMOld::compute_group_group(int groupbit_A, int groupbit_B, int BA_flag)
|
|||
error->all(FLERR,"Cannot (yet) use K-space slab "
|
||||
"correction with compute group/group");
|
||||
|
||||
int i,j;
|
||||
int i;
|
||||
|
||||
if (!group_allocate_flag) {
|
||||
allocate_groups();
|
||||
|
@ -2590,11 +2585,6 @@ void PPPMOld::compute_group_group(int groupbit_A, int groupbit_B, int BA_flag)
|
|||
f2group[1] = 0; //force in y-direction
|
||||
f2group[2] = 0; //force in z-direction
|
||||
|
||||
double *q = atom->q;
|
||||
int nlocal = atom->nlocal;
|
||||
int *mask = atom->mask;
|
||||
|
||||
|
||||
// map my particle charge onto my local 3d density grid
|
||||
|
||||
make_rho_groups(groupbit_A,groupbit_B,BA_flag);
|
||||
|
@ -2758,7 +2748,6 @@ void PPPMOld::make_rho_groups(int groupbit_A, int groupbit_B, int BA_flag)
|
|||
void PPPMOld::poisson_groups(int BA_flag)
|
||||
{
|
||||
int i,j,k,n;
|
||||
double eng;
|
||||
|
||||
// reuse memory (already declared)
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ class PPPMOld : public KSpace {
|
|||
virtual void brick2fft();
|
||||
virtual void fillbrick();
|
||||
virtual void fillbrick_peratom();
|
||||
virtual void poisson();
|
||||
virtual void poisson(int,int);
|
||||
virtual void poisson_peratom();
|
||||
virtual void fieldforce();
|
||||
virtual void fieldforce_peratom();
|
||||
|
|
|
@ -105,8 +105,6 @@ void VerletCuda::setup()
|
|||
atom->setup();
|
||||
|
||||
cuda_shared_atom* cu_atom = & cuda->shared_data.atom;
|
||||
cuda_shared_domain* cu_domain = & cuda->shared_data.domain;
|
||||
cuda_shared_pair* cu_pair = & cuda->shared_data.pair;
|
||||
cu_atom->update_nlocal = 1;
|
||||
cu_atom->update_nmax = 1;
|
||||
|
||||
|
@ -408,8 +406,6 @@ void VerletCuda::setup_minimal(int flag)
|
|||
//cuda->allocate();
|
||||
|
||||
cuda_shared_atom* cu_atom = & cuda->shared_data.atom;
|
||||
cuda_shared_domain* cu_domain = & cuda->shared_data.domain;
|
||||
cuda_shared_pair* cu_pair = & cuda->shared_data.pair;
|
||||
cu_atom->update_nlocal = 1;
|
||||
cu_atom->update_nmax = 1;
|
||||
|
||||
|
@ -590,20 +586,9 @@ void VerletCuda::run(int n)
|
|||
|
||||
cuda->setTimingsZero();
|
||||
|
||||
static double testtime = 0.0;
|
||||
// my_gettime(CLOCK_REALTIME,&starttime);
|
||||
// my_gettime(CLOCK_REALTIME,&endtime);
|
||||
// testtime+=endtime.tv_sec-starttime.tv_sec+1.0*(endtime.tv_nsec-starttime.tv_nsec)/1000000000;
|
||||
// printf("Time: %lf\n",testtime);*/
|
||||
|
||||
|
||||
cuda_shared_domain* cu_domain = & cuda->shared_data.domain;
|
||||
|
||||
int nflag, ntimestep, sortflag;
|
||||
|
||||
int n_initial_integrate = modify_cuda->n_initial_integrate;
|
||||
int n_post_integrate = modify_cuda->n_post_integrate;
|
||||
int n_final_integrate = modify_cuda->n_final_integrate;
|
||||
int n_pre_exchange = modify_cuda->n_pre_exchange;
|
||||
int n_pre_neighbor = modify_cuda->n_pre_neighbor;
|
||||
int n_pre_force = modify_cuda->n_pre_force;
|
||||
|
@ -694,7 +679,7 @@ void VerletCuda::run(int n)
|
|||
my_gettime(CLOCK_REALTIME, &starttime);
|
||||
timer->stamp();
|
||||
comm->forward_comm(1);
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.comm_forward_total +=
|
||||
endtime.tv_sec - starttime.tv_sec + 1.0 * (endtime.tv_nsec - starttime.tv_nsec) / 1000000000;
|
||||
|
@ -714,7 +699,7 @@ void VerletCuda::run(int n)
|
|||
//start force calculation asynchronus
|
||||
cuda->shared_data.comm.comm_phase = 1;
|
||||
force->pair->compute(eflag, vflag);
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
//CudaWrapper_Sync();
|
||||
|
||||
//download comm buffers from GPU, perform MPI communication and upload buffers again
|
||||
|
@ -723,11 +708,11 @@ void VerletCuda::run(int n)
|
|||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.comm_forward_total +=
|
||||
endtime.tv_sec - starttime.tv_sec + 1.0 * (endtime.tv_nsec - starttime.tv_nsec) / 1000000000;
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
|
||||
//wait for force calculation
|
||||
CudaWrapper_Sync();
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
|
||||
//unpack communication buffers
|
||||
my_gettime(CLOCK_REALTIME, &starttime);
|
||||
|
@ -736,7 +721,7 @@ void VerletCuda::run(int n)
|
|||
cuda->shared_data.cuda_timings.comm_forward_total +=
|
||||
endtime.tv_sec - starttime.tv_sec + 1.0 * (endtime.tv_nsec - starttime.tv_nsec) / 1000000000;
|
||||
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
MYDBG(printf("# CUDA VerletCuda::iterate: communicate done\n");)
|
||||
cuda->shared_data.cuda_timings.test1 +=
|
||||
endtotal.tv_sec - starttotal.tv_sec + 1.0 * (endtotal.tv_nsec - starttotal.tv_nsec) / 1000000000;
|
||||
|
@ -747,7 +732,7 @@ void VerletCuda::run(int n)
|
|||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.comm_forward_total +=
|
||||
endtime.tv_sec - starttime.tv_sec + 1.0 * (endtime.tv_nsec - starttime.tv_nsec) / 1000000000;
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
MYDBG(printf("# CUDA VerletCuda::iterate: communicate done\n");)
|
||||
}
|
||||
} else {
|
||||
|
@ -837,7 +822,7 @@ void VerletCuda::run(int n)
|
|||
cuda->shared_data.buffer_new = 2;
|
||||
|
||||
MYDBG(printf("# CUDA VerletCuda::iterate: neighbor build\n");)
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.test2 +=
|
||||
endtime.tv_sec - starttime.tv_sec + 1.0 * (endtime.tv_nsec - starttime.tv_nsec) / 1000000000;
|
||||
|
@ -845,7 +830,7 @@ void VerletCuda::run(int n)
|
|||
//rebuild neighbor list
|
||||
test_atom(testatom, "Pre Neighbor");
|
||||
neighbor->build(0);
|
||||
timer->stamp(TIME_NEIGHBOR);
|
||||
timer->stamp(Timer::NEIGH);
|
||||
MYDBG(printf("# CUDA VerletCuda::iterate: neighbor done\n");)
|
||||
//if bonded interactions are used (in this case collect_forces_later is true), transfer data which only changes upon exchange/border routines from GPU to CPU
|
||||
if(cuda->shared_data.pair.collect_forces_later) {
|
||||
|
@ -932,7 +917,7 @@ void VerletCuda::run(int n)
|
|||
if(not cuda->shared_data.pair.collect_forces_later)
|
||||
CudaWrapper_Sync();
|
||||
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
}
|
||||
|
||||
//calculate bonded interactions
|
||||
|
@ -942,11 +927,11 @@ void VerletCuda::run(int n)
|
|||
if(n_pre_force == 0) Verlet::force_clear();
|
||||
else cuda->cu_f->downloadAsync(2);
|
||||
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
|
||||
if(neighbor->lastcall == update->ntimestep) {
|
||||
neighbor->build_topology();
|
||||
timer->stamp(TIME_NEIGHBOR);
|
||||
timer->stamp(Timer::NEIGH);
|
||||
}
|
||||
|
||||
test_atom(testatom, "pre bond force");
|
||||
|
@ -959,7 +944,7 @@ void VerletCuda::run(int n)
|
|||
|
||||
if(force->improper) force->improper->compute(eflag, vflag);
|
||||
|
||||
timer->stamp(TIME_BOND);
|
||||
timer->stamp(Timer::BOND);
|
||||
}
|
||||
|
||||
//collect forces in case pair force and bonded interactions were overlapped, and either no KSPACE or a GPU KSPACE style is used
|
||||
|
@ -984,7 +969,7 @@ void VerletCuda::run(int n)
|
|||
|
||||
if(vflag) cuda->cu_virial->download();
|
||||
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
|
||||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.pair_force_collection +=
|
||||
|
@ -1002,7 +987,7 @@ void VerletCuda::run(int n)
|
|||
if(n_pre_force == 0) Verlet::force_clear();
|
||||
else cuda->cu_f->downloadAsync(2);
|
||||
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
}
|
||||
|
||||
force->kspace->compute(eflag, vflag);
|
||||
|
@ -1010,7 +995,7 @@ void VerletCuda::run(int n)
|
|||
if((not cuda->shared_data.pppm.cudable_force) && (not cuda->shared_data.pair.collect_forces_later))
|
||||
cuda->uploadAll();
|
||||
|
||||
timer->stamp(TIME_KSPACE);
|
||||
timer->stamp(Timer::KSPACE);
|
||||
}
|
||||
|
||||
//collect forces in case pair forces and kspace was overlaped
|
||||
|
@ -1033,7 +1018,7 @@ void VerletCuda::run(int n)
|
|||
|
||||
if(vflag) cuda->cu_virial->download();
|
||||
|
||||
timer->stamp(TIME_PAIR);
|
||||
timer->stamp(Timer::PAIR);
|
||||
|
||||
my_gettime(CLOCK_REALTIME, &endtime);
|
||||
cuda->shared_data.cuda_timings.pair_force_collection +=
|
||||
|
@ -1043,7 +1028,7 @@ void VerletCuda::run(int n)
|
|||
//send forces on ghost atoms back to other GPU: THIS SHOULD NEVER HAPPEN
|
||||
if(force->newton) {
|
||||
comm->reverse_comm();
|
||||
timer->stamp(TIME_COMM);
|
||||
timer->stamp(Timer::COMM);
|
||||
}
|
||||
|
||||
test_atom(testatom, "post force");
|
||||
|
@ -1069,7 +1054,7 @@ void VerletCuda::run(int n)
|
|||
|
||||
timer->stamp();
|
||||
output->write(ntimestep);
|
||||
timer->stamp(TIME_OUTPUT);
|
||||
timer->stamp(Timer::OUTPUT);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1117,8 +1102,7 @@ void VerletCuda::force_clear()
|
|||
|
||||
if(cuda->cu_torque) cuda->cu_torque->memset_device(0);
|
||||
|
||||
return;
|
||||
|
||||
#if 0
|
||||
//The rest should not be necessary
|
||||
int i;
|
||||
|
||||
|
@ -1179,9 +1163,10 @@ void VerletCuda::force_clear()
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void VerletCuda::test_atom(int aatom, char* string) //printing properties of one atom for test purposes
|
||||
void VerletCuda::test_atom(int aatom, const char* string) //printing properties of one atom for test purposes
|
||||
{
|
||||
if(not dotestatom) return;
|
||||
|
||||
|
@ -1192,7 +1177,9 @@ void VerletCuda::test_atom(int aatom, char* string) //printing properties of on
|
|||
for(int i = 0; i < atom->nlocal + atom->nghost; i++) {
|
||||
if((atom->tag[i] == aatom) && (i < atom->nlocal)) {
|
||||
|
||||
printf("%i # CUDA %s: %i %i %e %e %e %i ", comm->me, string, update->ntimestep, atom->tag[i], atom->x[i][0], atom->v[i][0], atom->f[i][0], i);
|
||||
printf("%i # CUDA %s: " BIGINT_FORMAT " %i %e %e %e %i ",
|
||||
comm->me, string, update->ntimestep, atom->tag[i],
|
||||
atom->x[i][0], atom->v[i][0], atom->f[i][0], i);
|
||||
|
||||
if(atom->molecular && (i < atom->nlocal)) {
|
||||
printf(" // %i %i %i ", atom->num_bond[i], atom->num_angle[i], atom->num_dihedral[i]);
|
||||
|
|
|
@ -43,7 +43,7 @@ class VerletCuda : public Verlet
|
|||
void setup_minimal(int);
|
||||
void run(int);
|
||||
|
||||
void test_atom(int atom,char* astring); //debugging purpose
|
||||
void test_atom(int atom,const char* astring); //debugging purpose
|
||||
int dotestatom; //debugging purpose
|
||||
|
||||
protected:
|
||||
|
|
|
@ -516,8 +516,6 @@ void VerletSplitIntel::rk_setup()
|
|||
|
||||
void VerletSplitIntel::r2k_comm()
|
||||
{
|
||||
MPI_Status status;
|
||||
|
||||
int n = 0;
|
||||
if (master) n = atom->nlocal;
|
||||
MPI_Gatherv(atom->x[0],n*3,MPI_DOUBLE,atom->x[0],xsize,xdisp,
|
||||
|
@ -531,7 +529,7 @@ void VerletSplitIntel::r2k_comm()
|
|||
MPI_Send(flags,2,MPI_INT,0,0,block);
|
||||
} else if (!master) {
|
||||
int flags[2];
|
||||
MPI_Recv(flags,2,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(flags,2,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
eflag = flags[0]; vflag = flags[1];
|
||||
}
|
||||
|
||||
|
@ -542,8 +540,8 @@ void VerletSplitIntel::r2k_comm()
|
|||
MPI_Send(domain->boxlo,3,MPI_DOUBLE,0,0,block);
|
||||
MPI_Send(domain->boxhi,3,MPI_DOUBLE,0,0,block);
|
||||
} else if (!master) {
|
||||
MPI_Recv(domain->boxlo,3,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(domain->boxhi,3,MPI_DOUBLE,1,0,block,&status);
|
||||
MPI_Recv(domain->boxlo,3,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
MPI_Recv(domain->boxhi,3,MPI_DOUBLE,1,0,block,MPI_STATUS_IGNORE);
|
||||
domain->set_global_box();
|
||||
domain->set_local_box();
|
||||
force->kspace->setup();
|
||||
|
|
|
@ -802,7 +802,6 @@ void FixLbFluid::calc_fluidforce(void)
|
|||
double **x = atom->x;
|
||||
int i,j,k,m;
|
||||
MPI_Request requests[20];
|
||||
MPI_Status statuses[20];
|
||||
double forceloc[3],force[3];
|
||||
double torqueloc[3],torque[3];
|
||||
|
||||
|
@ -901,7 +900,7 @@ void FixLbFluid::calc_fluidforce(void)
|
|||
MPI_Irecv(&Fftempx[2][0][0][0],1,passxtemp,comm->procneigh[0][0],30,world,&requests[7]);
|
||||
MPI_Irecv(&Fftempx[3][0][0][0],1,passxtemp,comm->procneigh[0][0],40,world,&requests[8]);
|
||||
MPI_Irecv(&Fftempx[4][0][0][0],1,passxtemp,comm->procneigh[0][0],50,world,&requests[9]);
|
||||
MPI_Waitall(10,requests,statuses);
|
||||
MPI_Waitall(10,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(j=0; j<subNby+3; j++){
|
||||
for(k=0; k<subNbz+3; k++){
|
||||
|
@ -927,7 +926,7 @@ void FixLbFluid::calc_fluidforce(void)
|
|||
MPI_Irecv(&Fftempy[0][2][0][0],1,passytemp,comm->procneigh[1][0],30,world,&requests[7]);
|
||||
MPI_Irecv(&Fftempy[0][3][0][0],1,passytemp,comm->procneigh[1][0],40,world,&requests[8]);
|
||||
MPI_Irecv(&Fftempy[0][4][0][0],1,passytemp,comm->procneigh[1][0],50,world,&requests[9]);
|
||||
MPI_Waitall(10,requests,statuses);
|
||||
MPI_Waitall(10,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<subNbx+3; i++){
|
||||
for(k=0; k<subNbz+3; k++){
|
||||
|
@ -953,7 +952,7 @@ void FixLbFluid::calc_fluidforce(void)
|
|||
MPI_Irecv(&Fftempz[0][0][2][0],1,passztemp,comm->procneigh[2][0],30,world,&requests[7]);
|
||||
MPI_Irecv(&Fftempz[0][0][3][0],1,passztemp,comm->procneigh[2][0],40,world,&requests[8]);
|
||||
MPI_Irecv(&Fftempz[0][0][4][0],1,passztemp,comm->procneigh[2][0],50,world,&requests[9]);
|
||||
MPI_Waitall(10,requests,statuses);
|
||||
MPI_Waitall(10,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<subNbx+3; i++){
|
||||
for(j=0; j<subNby+3; j++){
|
||||
|
@ -1837,7 +1836,6 @@ void FixLbFluid::initialize_feq(void)
|
|||
{
|
||||
int i,j,k,p;
|
||||
MPI_Request requests[8];
|
||||
MPI_Status statuses[8];
|
||||
int numrequests;
|
||||
|
||||
// If using the standary LB integrator, do not need to send feqn.
|
||||
|
@ -1870,7 +1868,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Isend(&feqn[subNbx-2][1][1][0],1,passxf,comm->procneigh[0][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
}
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -1884,7 +1882,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Isend(&feqn[0][subNby-2][1][0],1,passyf,comm->procneigh[1][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqn[0][subNby-1][1][0],1,passyf,comm->procneigh[1][1],10,world,&requests[7]);
|
||||
}
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -1898,7 +1896,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Isend(&feqn[0][0][subNbz-2][0],1,passzf,comm->procneigh[2][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqn[0][0][subNbz-1][0],1,passzf,comm->procneigh[2][1],10,world,&requests[7]);
|
||||
}
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
//Save feqold.
|
||||
if(typeLB == 2){
|
||||
|
@ -1926,7 +1924,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Irecv(&feqoldn[0][1][1][0],1,passxf,comm->procneigh[0][0],20,world,&requests[5]);
|
||||
MPI_Isend(&feqoldn[subNbx-2][1][1][0],1,passxf,comm->procneigh[0][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqoldn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
MPI_Waitall(8,requests,statuses);
|
||||
MPI_Waitall(8,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<8; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -1938,7 +1936,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Irecv(&feqoldn[0][0][1][0],1,passyf,comm->procneigh[1][0],20,world,&requests[5]);
|
||||
MPI_Isend(&feqoldn[0][subNby-2][1][0],1,passyf,comm->procneigh[1][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqoldn[0][subNby-1][1][0],1,passyf,comm->procneigh[1][1],10,world,&requests[7]);
|
||||
MPI_Waitall(8,requests,statuses);
|
||||
MPI_Waitall(8,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<8; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -1950,7 +1948,7 @@ void FixLbFluid::initialize_feq(void)
|
|||
MPI_Irecv(&feqoldn[0][0][0][0],1,passzf,comm->procneigh[2][0],20,world,&requests[5]);
|
||||
MPI_Isend(&feqoldn[0][0][subNbz-2][0],1,passzf,comm->procneigh[2][1],20,world,&requests[6]);
|
||||
MPI_Irecv(&feqoldn[0][0][subNbz-1][0],1,passzf,comm->procneigh[2][1],10,world,&requests[7]);
|
||||
MPI_Waitall(8,requests,statuses);
|
||||
MPI_Waitall(8,requests,MPI_STATUS_IGNORE);
|
||||
}
|
||||
parametercalc_full();
|
||||
}
|
||||
|
@ -2325,9 +2323,7 @@ void FixLbFluid::equilibriumdist19(int xstart, int xend, int ystart, int yend, i
|
|||
void FixLbFluid::parametercalc_full(void)
|
||||
{
|
||||
MPI_Request requests[4];
|
||||
MPI_Status statuses[4];
|
||||
MPI_Request requests2[12];
|
||||
MPI_Status statuses2[12];
|
||||
int numrequests;
|
||||
int i;
|
||||
|
||||
|
@ -2343,7 +2339,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
MPI_Isend(&f_lb[subNbx-2][1][1][0],1,passxf,comm->procneigh[0][1],20,world,&requests[2]);
|
||||
MPI_Irecv(&f_lb[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[3]);
|
||||
parametercalc_part(1,subNbx-1,1,subNby-1,1,subNbz-1);
|
||||
MPI_Waitall(4,requests,statuses);
|
||||
MPI_Waitall(4,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<4; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2353,7 +2349,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
MPI_Irecv(&f_lb[0][subNby-1][1][0],1,passyf,comm->procneigh[1][1],10,world,&requests[3]);
|
||||
parametercalc_part(0,1,1,subNby-1,1,subNbz-1);
|
||||
parametercalc_part(subNbx-1,subNbx,1,subNby-1,1,subNbz-1);
|
||||
MPI_Waitall(4,requests,statuses);
|
||||
MPI_Waitall(4,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<4; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2363,7 +2359,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
MPI_Irecv(&f_lb[0][0][subNbz-1][0],1,passzf,comm->procneigh[2][1],10,world,&requests[3]);
|
||||
parametercalc_part(0,subNbx,0,1,1,subNbz-1);
|
||||
parametercalc_part(0,subNbx,subNby-1,subNby,1,subNbz-1);
|
||||
MPI_Waitall(4,requests,statuses);
|
||||
MPI_Waitall(4,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
parametercalc_part(0,subNbx,0,subNby,0,1);
|
||||
parametercalc_part(0,subNbx,0,subNby,subNbz-1,subNbz);
|
||||
|
@ -2391,7 +2387,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
MPI_Irecv(&density_lb[subNbx+1][0][0],1,passxrho,comm->procneigh[0][1],50,world,&requests2[10]);
|
||||
MPI_Irecv(&density_lb[subNbx+2][0][0],1,passxrho,comm->procneigh[0][0],60,world,&requests2[11]);
|
||||
}
|
||||
MPI_Waitall(numrequests,requests2,statuses2);
|
||||
MPI_Waitall(numrequests,requests2,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests2[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2409,7 +2405,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
MPI_Irecv(&density_lb[0][subNby+1][0],1,passyrho,comm->procneigh[1][1],50,world,&requests2[10]);
|
||||
MPI_Irecv(&density_lb[0][subNby+2][0],1,passyrho,comm->procneigh[1][0],60,world,&requests2[11]);
|
||||
}
|
||||
MPI_Waitall(numrequests,requests2,statuses2);
|
||||
MPI_Waitall(numrequests,requests2,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<12; i++)
|
||||
requests2[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2438,7 +2434,7 @@ void FixLbFluid::parametercalc_full(void)
|
|||
requestcount=requestcount+3;
|
||||
}
|
||||
}
|
||||
MPI_Waitall(requestcount,requests2,statuses2);
|
||||
MPI_Waitall(requestcount,requests2,MPI_STATUS_IGNORE);
|
||||
|
||||
}
|
||||
|
||||
|
@ -2670,7 +2666,6 @@ void FixLbFluid::update_full15(void)
|
|||
MPI_Request req_send15,req_recv15;
|
||||
MPI_Request req_send25,req_recv25;
|
||||
MPI_Request requests[8];
|
||||
MPI_Status statuses[8];
|
||||
int numrequests;
|
||||
double tmp1;
|
||||
MPI_Status status;
|
||||
|
@ -2706,7 +2701,7 @@ void FixLbFluid::update_full15(void)
|
|||
MPI_Irecv(&feqn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
}
|
||||
update_periodic(2,subNbx-2,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
|
@ -2723,7 +2718,7 @@ void FixLbFluid::update_full15(void)
|
|||
}
|
||||
update_periodic(1,2,2,subNby-2,2,subNbz-2);
|
||||
update_periodic(subNbx-2,subNbx-1,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2739,7 +2734,7 @@ void FixLbFluid::update_full15(void)
|
|||
}
|
||||
update_periodic(1,subNbx-1,1,2,2,subNbz-2);
|
||||
update_periodic(1,subNbx-1,subNby-2,subNby-1,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
if(typeLB==1){
|
||||
update_periodic(1,subNbx-1,1,subNby-1,1,2);
|
||||
|
@ -2981,7 +2976,7 @@ void FixLbFluid::update_full15(void)
|
|||
MPI_Irecv(&feqn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
}
|
||||
update_periodic(2,subNbx-2,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -2997,7 +2992,7 @@ void FixLbFluid::update_full15(void)
|
|||
}
|
||||
update_periodic(1,2,2,subNby-2,2,subNbz-2);
|
||||
update_periodic(subNbx-2,subNbx-1,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -3013,7 +3008,7 @@ void FixLbFluid::update_full15(void)
|
|||
}
|
||||
update_periodic(1,subNbx-1,1,2,2,subNbz-2);
|
||||
update_periodic(1,subNbx-1,subNby-2,subNby-1,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
update_periodic(1,subNbx-1,1,subNby-1,1,2);
|
||||
update_periodic(1,subNbx-1,1,subNby-1,subNbz-2,subNbz-1);
|
||||
|
@ -3031,7 +3026,6 @@ void FixLbFluid::update_full19(void)
|
|||
MPI_Request req_send15,req_recv15;
|
||||
MPI_Request req_send25,req_recv25;
|
||||
MPI_Request requests[8];
|
||||
MPI_Status statuses[8];
|
||||
int numrequests;
|
||||
double tmp1,tmp2,tmp3;
|
||||
MPI_Status status;
|
||||
|
@ -3067,7 +3061,7 @@ void FixLbFluid::update_full19(void)
|
|||
MPI_Irecv(&feqn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
}
|
||||
update_periodic(2,subNbx-2,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -3083,7 +3077,7 @@ void FixLbFluid::update_full19(void)
|
|||
}
|
||||
update_periodic(1,2,2,subNby-2,2,subNbz-2);
|
||||
update_periodic(subNbx-2,subNbx-1,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -3099,7 +3093,7 @@ void FixLbFluid::update_full19(void)
|
|||
}
|
||||
update_periodic(1,subNbx-1,1,2,2,subNbz-2);
|
||||
update_periodic(1,subNbx-1,subNby-2,subNby-1,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
if(typeLB==1){
|
||||
update_periodic(1,subNbx-1,1,subNby-1,1,2);
|
||||
|
@ -3331,7 +3325,7 @@ void FixLbFluid::update_full19(void)
|
|||
MPI_Irecv(&feqn[subNbx-1][1][1][0],1,passxf,comm->procneigh[0][1],10,world,&requests[7]);
|
||||
}
|
||||
update_periodic(2,subNbx-2,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -3347,7 +3341,7 @@ void FixLbFluid::update_full19(void)
|
|||
}
|
||||
update_periodic(1,2,2,subNby-2,2,subNbz-2);
|
||||
update_periodic(subNbx-2,subNbx-1,2,subNby-2,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for(i=0; i<numrequests; i++)
|
||||
requests[i]=MPI_REQUEST_NULL;
|
||||
|
@ -3363,7 +3357,7 @@ void FixLbFluid::update_full19(void)
|
|||
}
|
||||
update_periodic(1,subNbx-1,1,2,2,subNbz-2);
|
||||
update_periodic(1,subNbx-1,subNby-2,subNby-1,2,subNbz-2);
|
||||
MPI_Waitall(numrequests,requests,statuses);
|
||||
MPI_Waitall(numrequests,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
update_periodic(1,subNbx-1,1,subNby-1,1,2);
|
||||
update_periodic(1,subNbx-1,1,subNby-1,subNbz-2,subNbz-1);
|
||||
|
|
|
@ -602,7 +602,6 @@ void FixAveSpatialSphere::end_of_step()
|
|||
}
|
||||
|
||||
//bin each atom
|
||||
double **x= atom->x;
|
||||
int *mask= atom->mask;
|
||||
int nlocal= atom->nlocal;
|
||||
|
||||
|
@ -869,10 +868,8 @@ void FixAveSpatialSphere::setup_bins()
|
|||
}
|
||||
|
||||
//set the bin coordinates
|
||||
for(int i= 0; i < nbins; i++) {
|
||||
for(int i= 0; i < nbins; i++)
|
||||
coord[i]= r_min + (i+0.5)*deltar;
|
||||
double temp_r= r_min + (i+1)*deltar;
|
||||
}
|
||||
|
||||
set_bin_volumes();
|
||||
}
|
||||
|
@ -942,17 +939,13 @@ void FixAveSpatialSphere::bin_atoms()
|
|||
int *mask = atom->mask;
|
||||
int nlocal = atom->nlocal;
|
||||
|
||||
double *boxlo,*boxhi,*prd,*prd_half;
|
||||
double *prd,*prd_half;
|
||||
//deal with the periodic boundary conditions
|
||||
if(domain->periodicity[0] || domain->periodicity[1] || domain->periodicity[2]) {
|
||||
if(scaleflag == REDUCED) {
|
||||
boxlo= domain->boxlo_lamda;
|
||||
boxhi= domain->boxhi_lamda;
|
||||
prd= domain->prd_lamda;
|
||||
prd_half= domain->prd_half_lamda;
|
||||
} else {
|
||||
boxlo= domain->boxlo;
|
||||
boxhi= domain->boxhi;
|
||||
prd= domain->prd;
|
||||
prd_half= domain->prd_half;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -713,12 +713,12 @@ void FixIMD::setup(int)
|
|||
taginthash_init(hashtable, num_coords);
|
||||
idmap = (void *)hashtable;
|
||||
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
int tmp, ndata;
|
||||
struct commdata *buf = static_cast<struct commdata *>(comm_buf);
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
tagint *taglist = new tagint[num_coords];
|
||||
int numtag=0; /* counter to map atom tags to a 0-based consecutive index list */
|
||||
|
||||
|
@ -763,7 +763,7 @@ void FixIMD::setup(int)
|
|||
}
|
||||
}
|
||||
/* blocking receive to wait until it is our turn to send data. */
|
||||
MPI_Recv(&tmp, 0, MPI_INT, 0, 0, world, &status);
|
||||
MPI_Recv(&tmp, 0, MPI_INT, 0, 0, world, MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(comm_buf, nme*size_one, MPI_BYTE, 0, 0, world);
|
||||
}
|
||||
|
||||
|
@ -1012,12 +1012,12 @@ void FixIMD::post_force(int vflag)
|
|||
comm_buf = memory->smalloc(maxbuf,"imd:comm_buf");
|
||||
}
|
||||
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
int tmp, ndata;
|
||||
buf = static_cast<struct commdata *>(comm_buf);
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
/* collect data into new array. we bypass the IMD API to save
|
||||
* us one extra copy of the data. */
|
||||
msglen = 3*sizeof(float)*num_coords+IMDHEADERSIZE;
|
||||
|
@ -1145,7 +1145,7 @@ void FixIMD::post_force(int vflag)
|
|||
}
|
||||
}
|
||||
/* blocking receive to wait until it is our turn to send data. */
|
||||
MPI_Recv(&tmp, 0, MPI_INT, 0, 0, world, &status);
|
||||
MPI_Recv(&tmp, 0, MPI_INT, 0, 0, world, MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(comm_buf, nme*size_one, MPI_BYTE, 0, 0, world);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -602,8 +602,6 @@ void FixPIMD::comm_init()
|
|||
|
||||
void FixPIMD::comm_exec(double **ptr)
|
||||
{
|
||||
MPI_Status status;
|
||||
|
||||
int nlocal = atom->nlocal;
|
||||
|
||||
if(nlocal > max_nlocal)
|
||||
|
@ -629,7 +627,7 @@ void FixPIMD::comm_exec(double **ptr)
|
|||
int nsend;
|
||||
|
||||
MPI_Sendrecv( &(nlocal), 1, MPI_INT, plan_send[iplan], 0,
|
||||
&(nsend), 1, MPI_INT, plan_recv[iplan], 0, universe->uworld, &status);
|
||||
&(nsend), 1, MPI_INT, plan_recv[iplan], 0, universe->uworld, MPI_STATUS_IGNORE);
|
||||
|
||||
// allocate arrays
|
||||
|
||||
|
@ -643,7 +641,7 @@ void FixPIMD::comm_exec(double **ptr)
|
|||
// send tags
|
||||
|
||||
MPI_Sendrecv( atom->tag, nlocal, MPI_INT, plan_send[iplan], 0,
|
||||
tag_send, nsend, MPI_INT, plan_recv[iplan], 0, universe->uworld, &status);
|
||||
tag_send, nsend, MPI_INT, plan_recv[iplan], 0, universe->uworld, MPI_STATUS_IGNORE);
|
||||
|
||||
// wrap positions
|
||||
|
||||
|
@ -671,7 +669,7 @@ void FixPIMD::comm_exec(double **ptr)
|
|||
// sendrecv x
|
||||
|
||||
MPI_Sendrecv( buf_send, nsend*3, MPI_DOUBLE, plan_recv[iplan], 0,
|
||||
buf_recv, nlocal*3, MPI_DOUBLE, plan_send[iplan], 0, universe->uworld, &status);
|
||||
buf_recv, nlocal*3, MPI_DOUBLE, plan_send[iplan], 0, universe->uworld, MPI_STATUS_IGNORE);
|
||||
|
||||
// copy x
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -234,10 +234,10 @@ void DumpMolfile::write()
|
|||
sort();
|
||||
|
||||
int tmp,nlines;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
MPI_Irecv(buf,maxbuf*size_one,MPI_DOUBLE,iproc,0,world,&request);
|
||||
|
@ -251,7 +251,7 @@ void DumpMolfile::write()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(buf,nme*size_one,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -395,8 +395,6 @@ void FixQMMM::exchange_positions()
|
|||
}
|
||||
|
||||
if (qmmm_role == QMMM_ROLE_MASTER) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
int i,tmp;
|
||||
|
||||
/* check and potentially grow local communication buffers. */
|
||||
|
@ -408,6 +406,8 @@ void FixQMMM::exchange_positions()
|
|||
struct commdata *buf = static_cast<struct commdata *>(comm_buf);
|
||||
|
||||
if (comm->me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
// insert local atoms into comm buffer
|
||||
for (i=0; i<nlocal; ++i) {
|
||||
if (mask[i] & groupbit) {
|
||||
|
@ -642,8 +642,6 @@ void FixQMMM::init()
|
|||
taginthash_init(qm_hash, num_qm);
|
||||
qm_idmap = (void *)qm_hash;
|
||||
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
const int nlocal = atom->nlocal;
|
||||
int i, j, tmp, ndata, qm_ntag;
|
||||
tagint *tag = atom->tag;
|
||||
|
@ -651,6 +649,8 @@ void FixQMMM::init()
|
|||
struct commdata *buf = static_cast<struct commdata *>(comm_buf);
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
tagint *qm_taglist = new tagint[num_qm];
|
||||
qm_ntag = 0;
|
||||
for (i=0; i < nlocal; ++i) {
|
||||
|
|
|
@ -255,7 +255,6 @@ void FixReaxCBonds::RecvBuffer(double *buf, int nbuf, int nbuf_local,
|
|||
|
||||
double cutof3 = reaxc->control->bg_cut;
|
||||
MPI_Request irequest, irequest2;
|
||||
MPI_Status istatus;
|
||||
|
||||
if (me == 0 ){
|
||||
fprintf(fp,"# Timestep " BIGINT_FORMAT " \n",ntimestep);
|
||||
|
@ -275,7 +274,7 @@ void FixReaxCBonds::RecvBuffer(double *buf, int nbuf, int nbuf_local,
|
|||
nlocal_tmp = nlocal;
|
||||
} else {
|
||||
MPI_Irecv(&buf[0],nbuf,MPI_DOUBLE,inode,0,world,&irequest);
|
||||
MPI_Wait(&irequest,&istatus);
|
||||
MPI_Wait(&irequest,MPI_STATUS_IGNORE);
|
||||
nlocal_tmp = nint(buf[0]);
|
||||
}
|
||||
j = 2;
|
||||
|
@ -308,7 +307,7 @@ void FixReaxCBonds::RecvBuffer(double *buf, int nbuf, int nbuf_local,
|
|||
}
|
||||
} else {
|
||||
MPI_Isend(&buf[0],nbuf_local,MPI_DOUBLE,0,0,world,&irequest2);
|
||||
MPI_Wait(&irequest2,&istatus);
|
||||
MPI_Wait(&irequest2,MPI_STATUS_IGNORE);
|
||||
}
|
||||
if(me ==0) fprintf(fp,"# \n");
|
||||
|
||||
|
|
|
@ -264,7 +264,6 @@ int Write_Init_Desc( reax_system *system, control_params *control,
|
|||
{
|
||||
int i, me, np, cnt, buffer_len, buffer_req;
|
||||
reax_atom *p_atom;
|
||||
//MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
me = system->my_rank;
|
||||
|
|
|
@ -407,14 +407,13 @@ void Comm::set_proc_grid(int outflag)
|
|||
// recv 3d proc grid of another partition if my 3d grid depends on it
|
||||
|
||||
if (recv_from_partition >= 0) {
|
||||
MPI_Status status;
|
||||
if (me == 0) {
|
||||
MPI_Recv(other_procgrid,3,MPI_INT,
|
||||
universe->root_proc[recv_from_partition],0,
|
||||
universe->uworld,&status);
|
||||
universe->uworld,MPI_STATUS_IGNORE);
|
||||
MPI_Recv(other_coregrid,3,MPI_INT,
|
||||
universe->root_proc[recv_from_partition],0,
|
||||
universe->uworld,&status);
|
||||
universe->uworld,MPI_STATUS_IGNORE);
|
||||
}
|
||||
MPI_Bcast(other_procgrid,3,MPI_INT,0,world);
|
||||
MPI_Bcast(other_coregrid,3,MPI_INT,0,world);
|
||||
|
|
|
@ -444,7 +444,6 @@ void CommBrick::forward_comm(int dummy)
|
|||
{
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
double *buf;
|
||||
|
@ -465,7 +464,7 @@ void CommBrick::forward_comm(int dummy)
|
|||
n = avec->pack_comm(sendnum[iswap],sendlist[iswap],
|
||||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
} else if (ghost_velocity) {
|
||||
if (size_forward_recv[iswap])
|
||||
MPI_Irecv(buf_recv,size_forward_recv[iswap],MPI_DOUBLE,
|
||||
|
@ -473,7 +472,7 @@ void CommBrick::forward_comm(int dummy)
|
|||
n = avec->pack_comm_vel(sendnum[iswap],sendlist[iswap],
|
||||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm_vel(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
} else {
|
||||
if (size_forward_recv[iswap])
|
||||
|
@ -482,7 +481,7 @@ void CommBrick::forward_comm(int dummy)
|
|||
n = avec->pack_comm(sendnum[iswap],sendlist[iswap],
|
||||
buf_send,pbc_flag[iswap],pbc[iswap]);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_forward_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap],firstrecv[iswap],buf_recv);
|
||||
}
|
||||
|
||||
|
@ -513,7 +512,6 @@ void CommBrick::reverse_comm()
|
|||
{
|
||||
int n;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **f = atom->f;
|
||||
double *buf;
|
||||
|
@ -534,14 +532,14 @@ void CommBrick::reverse_comm()
|
|||
MPI_Send(buf,size_reverse_send[iswap],MPI_DOUBLE,
|
||||
recvproc[iswap],0,world);
|
||||
}
|
||||
if (size_reverse_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_reverse_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
} else {
|
||||
if (size_reverse_recv[iswap])
|
||||
MPI_Irecv(buf_recv,size_reverse_recv[iswap],MPI_DOUBLE,
|
||||
sendproc[iswap],0,world,&request);
|
||||
n = avec->pack_reverse(recvnum[iswap],firstrecv[iswap],buf_send);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
if (size_reverse_recv[iswap]) MPI_Wait(&request,&status);
|
||||
if (size_reverse_recv[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
avec->unpack_reverse(sendnum[iswap],sendlist[iswap],buf_recv);
|
||||
|
||||
|
@ -576,7 +574,6 @@ void CommBrick::exchange()
|
|||
double **x;
|
||||
double *sublo,*subhi;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
|
||||
// clear global->local map for owned and ghost atoms
|
||||
|
@ -644,11 +641,11 @@ void CommBrick::exchange()
|
|||
if (procgrid[dim] == 1) nrecv = 0;
|
||||
else {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv = nrecv1;
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv += nrecv2;
|
||||
}
|
||||
if (nrecv > maxrecv) grow_recv(nrecv);
|
||||
|
@ -656,13 +653,13 @@ void CommBrick::exchange()
|
|||
MPI_Irecv(buf_recv,nrecv1,MPI_DOUBLE,procneigh[dim][1],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Irecv(&buf_recv[nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -701,7 +698,6 @@ void CommBrick::borders()
|
|||
double **x;
|
||||
double *buf,*mlo,*mhi;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
|
||||
// do swaps over all 3 dimensions
|
||||
|
@ -816,12 +812,12 @@ void CommBrick::borders()
|
|||
|
||||
if (sendproc[iswap] != me) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,&status);
|
||||
&nrecv,1,MPI_INT,recvproc[iswap],0,world,MPI_STATUS_IGNORE);
|
||||
if (nrecv*size_border > maxrecv) grow_recv(nrecv*size_border);
|
||||
if (nrecv) MPI_Irecv(buf_recv,nrecv*size_border,MPI_DOUBLE,
|
||||
recvproc[iswap],0,world,&request);
|
||||
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (nrecv) MPI_Wait(&request,&status);
|
||||
if (nrecv) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else {
|
||||
nrecv = nsend;
|
||||
|
@ -872,7 +868,6 @@ void CommBrick::forward_comm_pair(Pair *pair)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = pair->comm_forward;
|
||||
|
||||
|
@ -892,7 +887,7 @@ void CommBrick::forward_comm_pair(Pair *pair)
|
|||
recvproc[iswap],0,world,&request);
|
||||
if (sendnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -912,7 +907,6 @@ void CommBrick::reverse_comm_pair(Pair *pair)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = MAX(pair->comm_reverse,pair->comm_reverse_off);
|
||||
|
||||
|
@ -931,7 +925,7 @@ void CommBrick::reverse_comm_pair(Pair *pair)
|
|||
world,&request);
|
||||
if (recvnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -955,7 +949,6 @@ void CommBrick::forward_comm_fix(Fix *fix, int size)
|
|||
int iswap,n,nsize;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
if (size) nsize = size;
|
||||
else nsize = fix->comm_forward;
|
||||
|
@ -976,7 +969,7 @@ void CommBrick::forward_comm_fix(Fix *fix, int size)
|
|||
world,&request);
|
||||
if (sendnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1000,7 +993,6 @@ void CommBrick::reverse_comm_fix(Fix *fix, int size)
|
|||
int iswap,n,nsize;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
if (size) nsize = size;
|
||||
else nsize = fix->comm_reverse;
|
||||
|
@ -1020,7 +1012,7 @@ void CommBrick::reverse_comm_fix(Fix *fix, int size)
|
|||
world,&request);
|
||||
if (recvnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1040,7 +1032,6 @@ void CommBrick::forward_comm_compute(Compute *compute)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = compute->comm_forward;
|
||||
|
||||
|
@ -1060,7 +1051,7 @@ void CommBrick::forward_comm_compute(Compute *compute)
|
|||
world,&request);
|
||||
if (sendnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1080,7 +1071,6 @@ void CommBrick::reverse_comm_compute(Compute *compute)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = compute->comm_reverse;
|
||||
|
||||
|
@ -1099,7 +1089,7 @@ void CommBrick::reverse_comm_compute(Compute *compute)
|
|||
world,&request);
|
||||
if (recvnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1119,7 +1109,6 @@ void CommBrick::forward_comm_dump(Dump *dump)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = dump->comm_forward;
|
||||
|
||||
|
@ -1139,7 +1128,7 @@ void CommBrick::forward_comm_dump(Dump *dump)
|
|||
world,&request);
|
||||
if (sendnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1159,7 +1148,6 @@ void CommBrick::reverse_comm_dump(Dump *dump)
|
|||
int iswap,n;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = dump->comm_reverse;
|
||||
|
||||
|
@ -1178,7 +1166,7 @@ void CommBrick::reverse_comm_dump(Dump *dump)
|
|||
world,&request);
|
||||
if (recvnum[iswap])
|
||||
MPI_Send(buf_send,n,MPI_DOUBLE,recvproc[iswap],0,world);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (sendnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1197,7 +1185,6 @@ void CommBrick::forward_comm_array(int nsize, double **array)
|
|||
int i,j,k,m,iswap,last;
|
||||
double *buf;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
// insure send/recv bufs are big enough for nsize
|
||||
// based on smax/rmax from most recent borders() invocation
|
||||
|
@ -1229,7 +1216,7 @@ void CommBrick::forward_comm_array(int nsize, double **array)
|
|||
if (sendnum[iswap])
|
||||
MPI_Send(buf_send,nsize*sendnum[iswap],MPI_DOUBLE,
|
||||
sendproc[iswap],0,world);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,&status);
|
||||
if (recvnum[iswap]) MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
buf = buf_recv;
|
||||
} else buf = buf_send;
|
||||
|
||||
|
@ -1251,7 +1238,6 @@ int CommBrick::exchange_variable(int n, double *inbuf, double *&outbuf)
|
|||
{
|
||||
int nsend,nrecv,nrecv1,nrecv2;
|
||||
MPI_Request request;
|
||||
MPI_Status status;
|
||||
|
||||
nrecv = n;
|
||||
if (nrecv > maxrecv) grow_recv(nrecv);
|
||||
|
@ -1271,11 +1257,11 @@ int CommBrick::exchange_variable(int n, double *inbuf, double *&outbuf)
|
|||
|
||||
nsend = nrecv;
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
|
||||
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv += nrecv1;
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
|
||||
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,MPI_STATUS_IGNORE);
|
||||
nrecv += nrecv2;
|
||||
} else nrecv2 = 0;
|
||||
|
||||
|
@ -1284,13 +1270,13 @@ int CommBrick::exchange_variable(int n, double *inbuf, double *&outbuf)
|
|||
MPI_Irecv(&buf_recv[nsend],nrecv1,MPI_DOUBLE,procneigh[dim][1],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_recv,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
|
||||
if (procgrid[dim] > 2) {
|
||||
MPI_Irecv(&buf_recv[nsend+nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,
|
||||
world,&request);
|
||||
MPI_Send(buf_recv,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -436,9 +436,7 @@ void CommTiled::setup()
|
|||
if (nmax > maxreqstat) {
|
||||
maxreqstat = nmax;
|
||||
delete [] requests;
|
||||
delete [] statuses;
|
||||
requests = new MPI_Request[maxreqstat];
|
||||
statuses = new MPI_Status[maxreqstat];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -450,7 +448,6 @@ void CommTiled::setup()
|
|||
void CommTiled::forward_comm(int dummy)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **x = atom->x;
|
||||
|
||||
|
@ -483,7 +480,7 @@ void CommTiled::forward_comm(int dummy)
|
|||
x[firstrecv[iswap][nrecv]],pbc_flag[iswap][nsend],
|
||||
pbc[iswap][nsend]);
|
||||
}
|
||||
if (recvother[iswap]) MPI_Waitall(nrecv,requests,statuses);
|
||||
if (recvother[iswap]) MPI_Waitall(nrecv,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
} else if (ghost_velocity) {
|
||||
if (recvother[iswap]) {
|
||||
|
@ -507,7 +504,7 @@ void CommTiled::forward_comm(int dummy)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm_vel(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[size_forward*
|
||||
forward_recv_offset[iswap][irecv]]);
|
||||
|
@ -536,7 +533,7 @@ void CommTiled::forward_comm(int dummy)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
avec->unpack_comm(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[size_forward*
|
||||
forward_recv_offset[iswap][irecv]]);
|
||||
|
@ -554,7 +551,6 @@ void CommTiled::forward_comm(int dummy)
|
|||
void CommTiled::reverse_comm()
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
AtomVec *avec = atom->avec;
|
||||
double **f = atom->f;
|
||||
|
||||
|
@ -588,7 +584,7 @@ void CommTiled::reverse_comm()
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
avec->unpack_reverse(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[size_reverse*
|
||||
reverse_recv_offset[iswap][irecv]]);
|
||||
|
@ -617,7 +613,7 @@ void CommTiled::reverse_comm()
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
avec->unpack_reverse(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[size_reverse*
|
||||
reverse_recv_offset[iswap][irecv]]);
|
||||
|
@ -736,7 +732,7 @@ void CommTiled::exchange()
|
|||
exchproc[dim][m],0,world,&requests[m]);
|
||||
for (m = 0; m < nexch; m++)
|
||||
MPI_Send(&nsend,1,MPI_INT,exchproc[dim][m],0,world);
|
||||
MPI_Waitall(nexch,requests,statuses);
|
||||
MPI_Waitall(nexch,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
nrecv = 0;
|
||||
for (m = 0; m < nexch; m++) nrecv += exchnum[dim][m];
|
||||
|
@ -750,7 +746,7 @@ void CommTiled::exchange()
|
|||
}
|
||||
for (m = 0; m < nexch; m++)
|
||||
MPI_Send(buf_send,nsend,MPI_DOUBLE,exchproc[dim][m],0,world);
|
||||
MPI_Waitall(nexch,requests,statuses);
|
||||
MPI_Waitall(nexch,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
// check incoming atoms to see if I own it and they are in my box
|
||||
// if so, add to my list
|
||||
|
@ -870,7 +866,7 @@ void CommTiled::borders()
|
|||
for (m = 0; m < nsend; m++)
|
||||
MPI_Send(&sendnum[iswap][m],1,MPI_INT,sendproc[iswap][m],0,world);
|
||||
if (sendself[iswap]) recvnum[iswap][nrecv] = sendnum[iswap][nsend];
|
||||
if (recvother[iswap]) MPI_Waitall(nrecv,requests,statuses);
|
||||
if (recvother[iswap]) MPI_Waitall(nrecv,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
// setup other per swap/proc values from sendnum and recvnum
|
||||
|
||||
|
@ -931,7 +927,7 @@ void CommTiled::borders()
|
|||
buf_send);
|
||||
}
|
||||
if (recvother[iswap]) {
|
||||
MPI_Waitall(nrecv,requests,statuses);
|
||||
MPI_Waitall(nrecv,requests,MPI_STATUS_IGNORE);
|
||||
for (m = 0; m < nrecv; m++)
|
||||
avec->unpack_border_vel(recvnum[iswap][m],firstrecv[iswap][m],
|
||||
&buf_recv[size_border*
|
||||
|
@ -960,7 +956,7 @@ void CommTiled::borders()
|
|||
buf_send);
|
||||
}
|
||||
if (recvother[iswap]) {
|
||||
MPI_Waitall(nrecv,requests,statuses);
|
||||
MPI_Waitall(nrecv,requests,MPI_STATUS_IGNORE);
|
||||
for (m = 0; m < nrecv; m++)
|
||||
avec->unpack_border(recvnum[iswap][m],firstrecv[iswap][m],
|
||||
&buf_recv[size_border*
|
||||
|
@ -997,7 +993,6 @@ void CommTiled::borders()
|
|||
void CommTiled::forward_comm_pair(Pair *pair)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = pair->comm_forward;
|
||||
|
||||
|
@ -1029,7 +1024,7 @@ void CommTiled::forward_comm_pair(Pair *pair)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
pair->unpack_forward_comm(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
forward_recv_offset[iswap][irecv]]);
|
||||
|
@ -1046,7 +1041,6 @@ void CommTiled::forward_comm_pair(Pair *pair)
|
|||
void CommTiled::reverse_comm_pair(Pair *pair)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = MAX(pair->comm_reverse,pair->comm_reverse_off);
|
||||
|
||||
|
@ -1075,7 +1069,7 @@ void CommTiled::reverse_comm_pair(Pair *pair)
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
pair->unpack_reverse_comm(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
reverse_recv_offset[iswap][irecv]]);
|
||||
|
@ -1096,7 +1090,6 @@ void CommTiled::reverse_comm_pair(Pair *pair)
|
|||
void CommTiled::forward_comm_fix(Fix *fix, int size)
|
||||
{
|
||||
int i,irecv,n,nsize,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
if (size) nsize = size;
|
||||
else nsize = fix->comm_forward;
|
||||
|
@ -1127,7 +1120,7 @@ void CommTiled::forward_comm_fix(Fix *fix, int size)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
fix->unpack_forward_comm(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
forward_recv_offset[iswap][irecv]]);
|
||||
|
@ -1148,7 +1141,6 @@ void CommTiled::forward_comm_fix(Fix *fix, int size)
|
|||
void CommTiled::reverse_comm_fix(Fix *fix, int size)
|
||||
{
|
||||
int i,irecv,n,nsize,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
if (size) nsize = size;
|
||||
else nsize = fix->comm_reverse;
|
||||
|
@ -1178,7 +1170,7 @@ void CommTiled::reverse_comm_fix(Fix *fix, int size)
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
fix->unpack_reverse_comm(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
reverse_recv_offset[iswap][irecv]]);
|
||||
|
@ -1195,7 +1187,6 @@ void CommTiled::reverse_comm_fix(Fix *fix, int size)
|
|||
void CommTiled::forward_comm_compute(Compute *compute)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = compute->comm_forward;
|
||||
|
||||
|
@ -1226,7 +1217,7 @@ void CommTiled::forward_comm_compute(Compute *compute)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
compute->
|
||||
unpack_forward_comm(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
|
@ -1244,7 +1235,6 @@ void CommTiled::forward_comm_compute(Compute *compute)
|
|||
void CommTiled::reverse_comm_compute(Compute *compute)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = compute->comm_reverse;
|
||||
|
||||
|
@ -1273,7 +1263,7 @@ void CommTiled::reverse_comm_compute(Compute *compute)
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
compute->
|
||||
unpack_reverse_comm(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
|
@ -1291,7 +1281,6 @@ void CommTiled::reverse_comm_compute(Compute *compute)
|
|||
void CommTiled::forward_comm_dump(Dump *dump)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = dump->comm_forward;
|
||||
|
||||
|
@ -1322,7 +1311,7 @@ void CommTiled::forward_comm_dump(Dump *dump)
|
|||
}
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
dump->unpack_forward_comm(recvnum[iswap][irecv],firstrecv[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
forward_recv_offset[iswap][irecv]]);
|
||||
|
@ -1339,7 +1328,6 @@ void CommTiled::forward_comm_dump(Dump *dump)
|
|||
void CommTiled::reverse_comm_dump(Dump *dump)
|
||||
{
|
||||
int i,irecv,n,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
int nsize = dump->comm_reverse;
|
||||
|
||||
|
@ -1368,7 +1356,7 @@ void CommTiled::reverse_comm_dump(Dump *dump)
|
|||
}
|
||||
if (sendother[iswap]) {
|
||||
for (i = 0; i < nsend; i++) {
|
||||
MPI_Waitany(nsend,requests,&irecv,&status);
|
||||
MPI_Waitany(nsend,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
dump->unpack_reverse_comm(sendnum[iswap][irecv],sendlist[iswap][irecv],
|
||||
&buf_recv[nsize*
|
||||
reverse_recv_offset[iswap][irecv]]);
|
||||
|
@ -1384,7 +1372,6 @@ void CommTiled::reverse_comm_dump(Dump *dump)
|
|||
void CommTiled::forward_comm_array(int nsize, double **array)
|
||||
{
|
||||
int i,j,k,m,iatom,last,irecv,nsend,nrecv;
|
||||
MPI_Status status;
|
||||
|
||||
// insure send/recv bufs are big enough for nsize
|
||||
// based on smaxone/rmaxall from most recent borders() invocation
|
||||
|
@ -1435,7 +1422,7 @@ void CommTiled::forward_comm_array(int nsize, double **array)
|
|||
|
||||
if (recvother[iswap]) {
|
||||
for (i = 0; i < nrecv; i++) {
|
||||
MPI_Waitany(nrecv,requests,&irecv,&status);
|
||||
MPI_Waitany(nrecv,requests,&irecv,MPI_STATUS_IGNORE);
|
||||
m = nsize*forward_recv_offset[iswap][irecv];
|
||||
last = firstrecv[iswap][irecv] + recvnum[iswap][irecv];
|
||||
for (iatom = firstrecv[iswap][irecv]; iatom < last; iatom++)
|
||||
|
@ -1919,7 +1906,6 @@ void CommTiled::allocate_swap(int n)
|
|||
|
||||
maxreqstat = 0;
|
||||
requests = NULL;
|
||||
statuses = NULL;
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
nprocmax[i] = DELTA_PROCS;
|
||||
|
@ -2044,7 +2030,6 @@ void CommTiled::deallocate_swap(int n)
|
|||
delete [] sendlist;
|
||||
|
||||
delete [] requests;
|
||||
delete [] statuses;
|
||||
|
||||
delete [] nprocmax;
|
||||
|
||||
|
|
|
@ -92,7 +92,6 @@ class CommTiled : public Comm {
|
|||
|
||||
int maxreqstat; // max size of Request and Status vectors
|
||||
MPI_Request *requests;
|
||||
MPI_Status *statuses;
|
||||
|
||||
struct RCBinfo {
|
||||
double mysplit[3][2]; // fractional RCB bounding box for one proc
|
||||
|
|
|
@ -97,7 +97,6 @@ void ComputeVCMMolecule::compute_array()
|
|||
{
|
||||
tagint imol;
|
||||
double massone;
|
||||
double unwrap[3];
|
||||
|
||||
invoked_array = update->ntimestep;
|
||||
|
||||
|
@ -108,7 +107,6 @@ void ComputeVCMMolecule::compute_array()
|
|||
int *mask = atom->mask;
|
||||
tagint *molecule = atom->molecule;
|
||||
int *type = atom->type;
|
||||
imageint *image = atom->image;
|
||||
double *mass = atom->mass;
|
||||
double *rmass = atom->rmass;
|
||||
int nlocal = atom->nlocal;
|
||||
|
|
|
@ -383,7 +383,7 @@ void Dump::write()
|
|||
if (flush_flag) fflush(fp);
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(buf,nme*size_one,MPI_DOUBLE,fileproc,0,world);
|
||||
}
|
||||
|
||||
|
@ -404,7 +404,7 @@ void Dump::write()
|
|||
if (flush_flag) fflush(fp);
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(sbuf,nsme,MPI_CHAR,fileproc,0,world);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,8 +38,8 @@ class FixRecenter : public Fix {
|
|||
int group2bit,scaleflag;
|
||||
int xflag,yflag,zflag;
|
||||
int xinitflag,yinitflag,zinitflag;
|
||||
double xcom,ycom,zcom,xinit,yinit,zinit,masstotal,distance,shift[3];
|
||||
int nlevels_respa;
|
||||
double xcom,ycom,zcom,xinit,yinit,zinit,masstotal,distance,shift[3];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ FixTempCSVR::FixTempCSVR(LAMMPS *lmp, int narg, char **arg) :
|
|||
|
||||
nevery = 1;
|
||||
global_freq = nevery;
|
||||
dynamic_group_allow = 1;
|
||||
|
||||
tstr = NULL;
|
||||
if (strstr(arg[3],"v_") == arg[3]) {
|
||||
|
|
|
@ -294,7 +294,6 @@ void Image::clear()
|
|||
void Image::merge()
|
||||
{
|
||||
MPI_Request requests[3];
|
||||
MPI_Status statuses[3];
|
||||
|
||||
int nhalf = 1;
|
||||
while (nhalf < nprocs) nhalf *= 2;
|
||||
|
@ -307,8 +306,8 @@ void Image::merge()
|
|||
if (ssao)
|
||||
MPI_Irecv(surfacecopy,npixels*2,MPI_DOUBLE,
|
||||
me+nhalf,0,world,&requests[2]);
|
||||
if (ssao) MPI_Waitall(3,requests,statuses);
|
||||
else MPI_Waitall(2,requests,statuses);
|
||||
if (ssao) MPI_Waitall(3,requests,MPI_STATUS_IGNORE);
|
||||
else MPI_Waitall(2,requests,MPI_STATUS_IGNORE);
|
||||
|
||||
for (int i = 0; i < npixels; i++) {
|
||||
if (depthBuffer[i] < 0 || (depthcopy[i] >= 0 &&
|
||||
|
|
|
@ -679,7 +679,6 @@ void ProcMap::output(char *file, int *procgrid, int ***grid2proc)
|
|||
int tmp;
|
||||
int vec[6];
|
||||
char procname[MPI_MAX_PROCESSOR_NAME+1];
|
||||
MPI_Status status;
|
||||
|
||||
vec[0] = me;
|
||||
vec[1] = universe->me;
|
||||
|
@ -696,9 +695,9 @@ void ProcMap::output(char *file, int *procgrid, int ***grid2proc)
|
|||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
MPI_Send(&tmp,0,MPI_INT,iproc,0,world);
|
||||
MPI_Recv(vec,6,MPI_INT,iproc,0,world,&status);
|
||||
MPI_Recv(vec,6,MPI_INT,iproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Recv(procname,MPI_MAX_PROCESSOR_NAME+1,MPI_CHAR,
|
||||
iproc,0,world,&status);
|
||||
iproc,0,world,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
fprintf(fp,"%d %d %d: %d %d %d: %s\n",
|
||||
|
@ -706,7 +705,7 @@ void ProcMap::output(char *file, int *procgrid, int ***grid2proc)
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Send(vec,6,MPI_INT,0,0,world);
|
||||
MPI_Send(procname,strlen(procname)+1,MPI_CHAR,0,0,world);
|
||||
}
|
||||
|
|
11
src/rcb.cpp
11
src/rcb.cpp
|
@ -115,7 +115,6 @@ void RCB::compute(int dimension, int n, double **x, double *wt,
|
|||
double tolerance;
|
||||
MPI_Comm comm,comm_half;
|
||||
MPI_Request request,request2;
|
||||
MPI_Status status;
|
||||
Median med,medme;
|
||||
|
||||
// create list of my Dots
|
||||
|
@ -469,9 +468,9 @@ void RCB::compute(int dimension, int n, double **x, double *wt,
|
|||
MPI_Send(&outgoing,1,MPI_INT,procpartner,0,world);
|
||||
incoming = 0;
|
||||
if (readnumber) {
|
||||
MPI_Recv(&incoming,1,MPI_INT,procpartner,0,world,&status);
|
||||
MPI_Recv(&incoming,1,MPI_INT,procpartner,0,world,MPI_STATUS_IGNORE);
|
||||
if (readnumber == 2) {
|
||||
MPI_Recv(&incoming2,1,MPI_INT,procpartner2,0,world,&status);
|
||||
MPI_Recv(&incoming2,1,MPI_INT,procpartner2,0,world,MPI_STATUS_IGNORE);
|
||||
incoming += incoming2;
|
||||
}
|
||||
}
|
||||
|
@ -527,7 +526,7 @@ void RCB::compute(int dimension, int n, double **x, double *wt,
|
|||
MPI_Send(NULL,0,MPI_INT,procpartner,0,world);
|
||||
if (readnumber == 2) MPI_Send(NULL,0,MPI_INT,procpartner2,0,world);
|
||||
}
|
||||
MPI_Recv(NULL,0,MPI_INT,procpartner,0,world,&status);
|
||||
MPI_Recv(NULL,0,MPI_INT,procpartner,0,world,MPI_STATUS_IGNORE);
|
||||
|
||||
// send dots to partner
|
||||
|
||||
|
@ -536,8 +535,8 @@ void RCB::compute(int dimension, int n, double **x, double *wt,
|
|||
// wait until all dots are received
|
||||
|
||||
if (readnumber > 0) {
|
||||
MPI_Wait(&request,&status);
|
||||
if (readnumber == 2) MPI_Wait(&request2,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
if (readnumber == 2) MPI_Wait(&request2,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
ndot = ndotnew;
|
||||
|
|
|
@ -369,7 +369,6 @@ void ReadRestart::command(int narg, char **arg)
|
|||
MPI_Bcast(&procsperfile,1,MPI_INT,0,clustercomm);
|
||||
|
||||
int tmp,iproc;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
for (int i = 0; i < procsperfile; i++) {
|
||||
|
@ -389,12 +388,12 @@ void ReadRestart::command(int narg, char **arg)
|
|||
if (i % nclusterprocs) {
|
||||
iproc = me + (i % nclusterprocs);
|
||||
MPI_Send(&n,1,MPI_INT,iproc,0,world);
|
||||
MPI_Recv(&tmp,0,MPI_INT,iproc,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,iproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(buf,n,MPI_DOUBLE,iproc,0,world);
|
||||
}
|
||||
|
||||
} else if (i % nclusterprocs == me - fileproc) {
|
||||
MPI_Recv(&n,1,MPI_INT,fileproc,0,world,&status);
|
||||
MPI_Recv(&n,1,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
|
||||
if (n > maxbuf) {
|
||||
maxbuf = n;
|
||||
memory->destroy(buf);
|
||||
|
@ -402,7 +401,7 @@ void ReadRestart::command(int narg, char **arg)
|
|||
}
|
||||
MPI_Irecv(buf,n,MPI_DOUBLE,fileproc,0,world,&request);
|
||||
MPI_Send(&tmp,0,MPI_INT,fileproc,0,world);
|
||||
MPI_Wait(&request,&status);
|
||||
MPI_Wait(&request,MPI_STATUS_IGNORE);
|
||||
}
|
||||
|
||||
if (i % nclusterprocs == me - fileproc) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* ----------------------------------------------------------------------
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
|
|
@ -328,10 +328,11 @@ void WriteData::atoms()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nAtoms # %s\n\n",atom->atom_style);
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -346,7 +347,7 @@ void WriteData::atoms()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -381,10 +382,11 @@ void WriteData::velocities()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nVelocities\n\n");
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -399,7 +401,7 @@ void WriteData::velocities()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -432,11 +434,12 @@ void WriteData::bonds()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
int index = 1;
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nBonds\n\n");
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -452,7 +455,7 @@ void WriteData::bonds()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_LMP_TAGINT,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -485,11 +488,12 @@ void WriteData::angles()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
int index = 1;
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nAngles\n\n");
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -505,7 +509,7 @@ void WriteData::angles()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_LMP_TAGINT,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -556,11 +560,12 @@ void WriteData::dihedrals()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
int index = 1;
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nDihedrals\n\n");
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -576,7 +581,7 @@ void WriteData::dihedrals()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_LMP_TAGINT,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -627,11 +632,12 @@ void WriteData::impropers()
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
int index = 1;
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
fprintf(fp,"\nImpropers\n\n");
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -647,7 +653,7 @@ void WriteData::impropers()
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_LMP_TAGINT,0,0,world);
|
||||
}
|
||||
|
||||
|
@ -680,11 +686,12 @@ void WriteData::fix(int ifix, int mth)
|
|||
// all other procs wait for ping, send their chunk to proc 0
|
||||
|
||||
int tmp,recvrow;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
int index = 1;
|
||||
if (me == 0) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
modify->fix[ifix]->write_data_section_keyword(mth,fp);
|
||||
for (int iproc = 0; iproc < nprocs; iproc++) {
|
||||
if (iproc) {
|
||||
|
@ -700,7 +707,7 @@ void WriteData::fix(int ifix, int mth)
|
|||
}
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,0,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(&buf[0][0],sendrow*ncol,MPI_DOUBLE,0,0,world);
|
||||
}
|
||||
|
||||
|
|
|
@ -393,10 +393,10 @@ void WriteRestart::write(char *file)
|
|||
|
||||
else {
|
||||
int tmp,recv_size;
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
|
||||
if (filewriter) {
|
||||
MPI_Status status;
|
||||
MPI_Request request;
|
||||
for (int iproc = 0; iproc < nclusterprocs; iproc++) {
|
||||
if (iproc) {
|
||||
MPI_Irecv(buf,max_size,MPI_DOUBLE,me+iproc,0,world,&request);
|
||||
|
@ -410,7 +410,7 @@ void WriteRestart::write(char *file)
|
|||
fclose(fp);
|
||||
|
||||
} else {
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,&status);
|
||||
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
|
||||
MPI_Rsend(buf,send_size,MPI_DOUBLE,fileproc,0,world);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue