whitespace fixes

This commit is contained in:
Axel Kohlmeyer 2022-12-01 10:12:14 -05:00
parent 7ce4b2eb68
commit 87a0833edd
No known key found for this signature in database
GPG Key ID: D9B44E93BF0C375A
15 changed files with 45 additions and 45 deletions

View File

@ -305,7 +305,7 @@ The *setup_grid()* method is called after the first constructor
(above) to partition the grid across processors, which determines
which grid cells each processor owns. It also calculates how many
ghost grid cells in each dimension and each direction each processor
needs to store.
needs to store.
Note that this method is NOT called if the second constructor above is
used. In that case, the caller assigns owned and ghost cells to each
@ -528,7 +528,7 @@ The *ghost_adjacent()* method returns a 1 if every processor can
perform the necessary owned/ghost communication with only its nearest
neighbor processors (4 in 2d, 6 in 3d). It returns a 0 if any
processor's ghost cells extend further than nearest neighbor
processors.
processors.
This can be checked by callers who have the option to change the
global grid size to insure more efficient nearest-neighbor-only

View File

@ -50,7 +50,7 @@ grid <dump>` output files as input.
Nov 2022) to add a section in the :doc:`Developer <Developer>`
section of the manual with a detailed description of how to use
these classes.
----------
These are the commands which currently define or use distributed

View File

@ -104,9 +104,9 @@ Restrictions
Related commands
""""""""""""""""
:doc:`compute temp/partial <compute_temp_partial>`, :doc:`compute
temp/region <compute_temp_region>`, :doc:`compute pressure
<compute_pressure>`
:doc:`compute temp/partial <compute_temp_partial>`,
:doc:`compute temp/region <compute_temp_region>`,
:doc:`compute pressure <compute_pressure>`
Default
"""""""

View File

@ -868,7 +868,7 @@ void PPPM::deallocate()
memory->destroy(gc_buf2);
memory->destroy3d_offset(density_brick,nzlo_out,nylo_out,nxlo_out);
if (differentiation_flag == 1) {
memory->destroy3d_offset(u_brick,nzlo_out,nylo_out,nxlo_out);
memory->destroy(sf_precoeff1);
@ -882,7 +882,7 @@ void PPPM::deallocate()
memory->destroy3d_offset(vdy_brick,nzlo_out,nylo_out,nxlo_out);
memory->destroy3d_offset(vdz_brick,nzlo_out,nylo_out,nxlo_out);
}
memory->destroy(density_fft);
memory->destroy(greensfn);
memory->destroy(work1);

View File

@ -205,7 +205,7 @@ void PPPMDipole::init()
gc_dipole->set_stencil_atom(-nlower,nupper);
gc_dipole->set_shift_atom(shiftatom_lo,shiftatom_hi);
gc_dipole->set_zfactor(slab_volfactor);
gc_dipole->setup_grid(nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
@ -541,7 +541,7 @@ void PPPMDipole::allocate()
gc_dipole->set_stencil_atom(-nlower,nupper);
gc_dipole->set_shift_atom(shiftatom_lo,shiftatom_hi);
gc_dipole->set_zfactor(slab_volfactor);
gc_dipole->setup_grid(nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);

View File

@ -409,7 +409,7 @@ void PPPMDisp::init()
double acc;
double acc_6,acc_real_6,acc_kspace_6;
int iteration = 0;
if (function[0]) {
@ -524,7 +524,7 @@ void PPPMDisp::init()
final_accuracy_6(acc_6,acc_real_6,acc_kspace_6);
}
// allocate K-space dependent memory
allocate();
@ -559,7 +559,7 @@ void PPPMDisp::init()
int ngrid_max,nfft_both_max;
MPI_Allreduce(&ngrid,&ngrid_max,1,MPI_INT,MPI_MAX,world);
MPI_Allreduce(&nfft_both,&nfft_both_max,1,MPI_INT,MPI_MAX,world);
if (me == 0) {
std::string mesg = fmt::format(" Coulomb G vector (1/distance)= {:.16g}\n",
g_ewald);
@ -576,14 +576,14 @@ void PPPMDisp::init()
utils::logmesg(lmp,mesg);
}
}
// print dipserion stats
if (function[1] + function[2] + function[3]) {
int ngrid_6_max,nfft_both_6_max;
MPI_Allreduce(&ngrid_6,&ngrid_6_max,1,MPI_INT,MPI_MAX,world);
MPI_Allreduce(&nfft_both_6,&nfft_both_6_max,1,MPI_INT,MPI_MAX,world);
if (me == 0) {
std::string mesg = fmt::format(" Dispersion G vector (1/distance)= "
"{:.16}\n",g_ewald_6);

View File

@ -78,9 +78,9 @@ DumpGrid::DumpGrid(LAMMPS *lmp, int narg, char **arg) :
dimension = domain->dimension;
// for 2d, set nzgrid = 1 for dump grid and grid/vtk files
if (dimension == 2) nzgrid = 1;
// computes and fixes which the dump accesses
ncompute = 0;
@ -661,11 +661,11 @@ int DumpGrid::parse_fields(int narg, char **arg)
// arg is not a valid Grid reference
// assume it's an additional dump grid option and return
if (iflag < 0) return iarg;
// grid reference is to a compute or fix
if (iflag == ArgInfo::COMPUTE) {
auto icompute = lmp->modify->get_compute_by_id(id);
field2index[iarg] = add_compute(id,icompute);

View File

@ -79,7 +79,7 @@ void DumpGridVTK::write_header(bigint ndump)
if (me) return;
xyz_grid();
fprintf(fp,"<?xml version=\"1.0\"\?>\n");
fprintf(fp,"<VTKFile type=\"RectilinearGrid\">\n");
fprintf(fp,"<RectilinearGrid WholeExtent=\"0 %d 0 %d 0 %d\" "
@ -90,7 +90,7 @@ void DumpGridVTK::write_header(bigint ndump)
fprintf(fp,"<Coordinates>\n");
// coords of center point of grid cells in each of xyz dimensions
fprintf(fp,"<DataArray type=\"Float32\" format=\"ascii\">\n");
for (int i = 0; i <= nxgrid; i++)
fprintf(fp,"%g ",xcoord[i]);
@ -103,9 +103,9 @@ void DumpGridVTK::write_header(bigint ndump)
for (int i = 0; i <= nzgrid; i++)
fprintf(fp,"%g ",zcoord[i]);
fprintf(fp,"\n</DataArray>\n");
fprintf(fp,"</Coordinates>\n");
fprintf(fp,"<CellData>\n");
if (mode == SCALAR)
fprintf(fp,"<DataArray type=\"Float32\" Name=\"Scalar\" format=\"ascii\">\n");
@ -155,10 +155,10 @@ void DumpGridVTK::xyz_grid()
double dx = domain->prd[0] / nxgrid;
double dy = domain->prd[1] / nygrid;
double dz = domain->prd[2] / nzgrid;
for (int ix = 0; ix <= nxgrid; ix++)
xcoord[ix] = boxlo[0] + ix*dx;
for (int iy = 0; iy <= nygrid; iy++)
ycoord[iy] = boxlo[1] + iy*dy;

View File

@ -32,7 +32,7 @@ class DumpGridVTK : public DumpGrid {
protected:
int mode;
double *xcoord,*ycoord,*zcoord;
// methods
void init_style() override;

View File

@ -126,7 +126,7 @@ DumpImage::DumpImage(LAMMPS *lmp, int narg, char **arg) :
gridflag = NO;
lineflag = triflag = bodyflag = fixflag = NO;
id_grid_compute = id_grid_fix = nullptr;
if (atom->nbondtypes == 0) bondflag = NO;
else {
bondflag = YES;
@ -490,7 +490,7 @@ void DumpImage::init_style()
error->all(FLERR,"Dump image and grid fix not computed at compatible times");
}
}
// check image variables
if (thetastr) {
@ -625,7 +625,7 @@ void DumpImage::write()
// pack grid gbuf with grid cell values
// ngrid = # of grid cells this proc owns
if (gridflag) {
if (domain->dimension == 2) {
if (grid_compute)
@ -700,7 +700,7 @@ void DumpImage::write()
for (int ix = nxlo_in; ix <= nxhi_in; ix++)
gbuf[n++] = array2d[iy][ix][index];
}
} else if (domain->dimension == 3) {
if (grid_index == 0) {
double ***vec3d;
@ -715,7 +715,7 @@ void DumpImage::write()
for (int iy = nylo_in; iy <= nyhi_in; iy++)
for (int ix = nxlo_in; ix <= nxhi_in; ix++)
gbuf[n++] = vec3d[iz][iy][ix];
}
} else {
double ****array3d;
@ -919,7 +919,7 @@ void DumpImage::create_image()
// 2 triangles for 2d rectangle, 12 triangles for 3d cube surface
// grid_cell_corners_2d/3d calculates orthogonal vs triclinic corner pts
// for 3d, outward normals on all 6 faces
if (gridflag) {
int n = 0;
if (domain->dimension == 2) {
@ -1358,7 +1358,7 @@ void DumpImage::grid_cell_corners_2d(int ix, int iy)
{
double *boxlo = domain->boxlo;
double *prd = domain->prd;
if (!domain->triclinic) {
double xdelta = prd[0] / nxgrid;
double ydelta = prd[1] / nygrid;
@ -1389,12 +1389,12 @@ void DumpImage::grid_cell_corners_2d(int ix, int iy)
}
/* ---------------------------------------------------------------------- */
void DumpImage::grid_cell_corners_3d(int ix, int iy, int iz)
{
double *boxlo = domain->boxlo;
double *prd = domain->prd;
if (!domain->triclinic) {
double xdelta = prd[0] / nxgrid;
double ydelta = prd[1] / nygrid;

View File

@ -91,7 +91,7 @@ class DumpImage : public DumpCustom {
double *gbuf;
int ngrid,maxgrid;
double gcorners[8][3];
class AtomVecLine *avec_line; // ptrs to atom style (sub)classes
class AtomVecTri *avec_tri;
class AtomVecBody *avec_body;

View File

@ -143,13 +143,13 @@ FixAveGrid::FixAveGrid(LAMMPS *lmp, int narg, char **arg) :
// if arg is not a per-atom or per-grid value
// then it's an optional arg after the values
ArgInfo argi(arg[iarg]);
if (argi.get_type() == ArgInfo::NONE || argi.get_type() == ArgInfo::UNKNOWN) break;
if (argi.get_dim() > 1) error->all(FLERR,"Invalid fix ave/grid command");
// atom value has no colon
if (!strchr(arg[iarg],':')) {
modeatom = 1;
ids[nvalues] = argi.copy_name();

View File

@ -537,7 +537,7 @@ void Grid3d::ghost_grid()
// OFFSET allows generation of negative indices with static_cast
// out xyz lo/hi = index range of owned + ghost cells
// if zextra, nz and effective prd[2] are both larger, so dzinv is the same
double dxinv = nx / prd[0];
double dyinv = ny / prd[1];
double dzinv = nz / prd[2];

View File

@ -887,27 +887,27 @@ int utils::check_grid_reference(char *errstr, char *ref, int nevery,
auto name = argi.get_name();
switch (argi.get_type()) {
case ArgInfo::UNKNOWN: {
lmp->error->all(FLERR,"%s grid reference %s is invalid",errstr,ref);
} break;
// compute value = c_ID
case ArgInfo::COMPUTE: {
// split name = idcompute:gname:dname into 3 strings
auto words = parse_grid_id(FLERR,name,lmp->error);
const auto &idcompute = words[0];
const auto &gname = words[1];
const auto &dname = words[2];
auto icompute = lmp->modify->get_compute_by_id(idcompute);
if (!icompute) lmp->error->all(FLERR,"{} compute ID {} not found",errstr,idcompute);
if (icompute->pergrid_flag == 0)
lmp->error->all(FLERR,"{} compute {} does not compute per-grid info",errstr,idcompute);
int dim;
igrid = icompute->get_grid_by_name(gname,dim);
if (igrid < 0)

View File

@ -401,7 +401,7 @@ namespace utils {
int check_grid_reference(char *errstr, char *ref, int nevery,
char *&id, int &igrid, int &idata, int &index, LAMMPS *lmp);
/*! Parse grid reference into 3 sub-strings
*
* Format of grid ID reference = id:gname:dname