git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@8605 f3b2605a-c512-4ea7-a41b-209d697bcdaa

This commit is contained in:
sjplimp 2012-08-11 19:06:25 +00:00
parent 58443251c3
commit 01b7ac9e20
56 changed files with 116 additions and 6388 deletions

View File

@ -1,38 +0,0 @@
This directory has examples of how to use LAMMPS as a library, either
by itself or in tandem with another code or library.
These examples is meant to illustrate what is possible when coupling
codes or calling LAMMPS as a library. The examples are provided for
demonstration purposes. The physics they calculate is too simple to
model a realistic problem.
See these sections of the LAMMPS manaul for details:
2.4 Building LAMMPS as a library (doc/Section_start.html#2_4)
4.10 Coupling LAMMPS to other codes (doc/Section_howto.html#4_10)
In all of the examples included here, LAMMPS must first be built as a
library. Basically, you type something like
make makelib
make -f Makefile.lib g++
in the LAMMPS src directory to create liblmp_g++.a
The library interface to LAMMPS is in src/library.cpp. Routines can
be easily added to this file so an external program can perform the
LAMMPS tasks desired.
-------------------------------------------------------------------
These are the sub-directories included in this directory:
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
library collection of useful inter-code communication routines
simple simple example of driver code calling LAMMPS as library
fortran a wrapper on the LAMMPS library API that
can be called from Fortran
Each sub-directory has its own README.

View File

@ -1,9 +0,0 @@
libfwrapper.c is a C file that wraps the LAMMPS library API
in src/library.h so that it can be called from Fortran.
See the couple/simple/simple.f90 program for an example
of a Fortran code that does this.
See the README file in that dir for instructions
on how to build a Fortran code that uses this
wrapper and links to the LAMMPS library.

View File

@ -1,129 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
www.cs.sandia.gov/~sjplimp/lammps.html
Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* libwrapper = fortran wrappers for LAMMPS library functions.
See README for compilation instructions */
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "stdint.h"
#include "library.h" /* this is a LAMMPS include file */
/* wrapper for creating a lammps instance from fortran.
since fortran has no simple way to emit a c-compatible
argument array, we don't support it. for simplicity,
the address of the pointer to the lammps object is
stored in a 64-bit integer on all platforms. */
void lammps_open_(MPI_Fint *comm, int64_t *ptr)
{
void *obj;
MPI_Comm ccomm;
/* convert MPI communicator from fortran to c */
ccomm = MPI_Comm_f2c(*comm);
lammps_open(0,NULL,ccomm,&obj);
*ptr = (int64_t) obj;
}
/* no-MPI version of the wrapper from above. */
void lammps_open_no_mpi_(int64_t *ptr)
{
void *obj;
lammps_open_no_mpi(0,NULL,&obj);
*ptr = (int64_t) obj;
}
/* wrapper for shutting down a lammps instance from fortran. */
void lammps_close_(int64_t *ptr)
{
void *obj;
obj = (void *) *ptr;
lammps_close(obj);
}
/* wrapper for passing an input file to lammps from fortran.
since fortran strings are not zero terminated, we have
to pass the length explicitly and make a copy that is. */
void lammps_file_(int64_t *ptr, char *fname, MPI_Fint *len)
{
void *obj;
char *cpy;
obj = (void *) *ptr;
cpy = (char *)calloc(*len + 1,sizeof(char));
memcpy(cpy,fname,*len);
lammps_file(obj,cpy);
free(cpy);
}
/* wrapper for passing a line input to lammps from fortran.
since fortran strings are not zero terminated, we have
to pass the length explicitly and make a copy that is. */
void lammps_command_(int64_t *ptr, char *line, MPI_Fint *len)
{
void *obj;
char *cpy;
obj = (void *) *ptr;
cpy = (char *)calloc(*len + 1,sizeof(char));
memcpy(cpy,line,*len);
lammps_command(obj,cpy);
free(cpy);
}
/* fortran wrapper to get the number of atoms from lammps.
return values require an interface in fortran, so we
make the wrapper into a procedure. */
void lammps_get_natoms_(int64_t *ptr, MPI_Fint *natoms)
{
void *obj;
obj = (void *) *ptr;
*natoms = lammps_get_natoms(obj);
}
/* wrapper to copy coordinates from lammps to fortran */
void lammps_get_coords_(int64_t *ptr, double *coords)
{
void *obj;
obj = (void *) *ptr;
lammps_get_coords(obj,coords);
}
/* wrapper to copy coordinates from fortran to lammps */
void lammps_put_coords_(int64_t *ptr, double *coords)
{
void *obj;
obj = (void *) *ptr;
lammps_put_coords(obj,coords);
}

View File

@ -1,47 +0,0 @@
# Makefile for MD with quantum forces via LAMMPS <-> Quest coupling
SHELL = /bin/sh
# System-specific settings
LAMMPS = /home/sjplimp/lammps
CC = g++
CCFLAGS = -g -O -DMPICH_IGNORE_CXX_SEEK -I../library
DEPFLAGS = -M
LINK = g++
LINKFLAGS = -g -O -L../library -L${LAMMPS}/src
USRLIB = -lcouple -llmp_g++
SYSLIB = -lfftw -lmpich -lpthread
ARCHIVE = ar
ARFLAGS = -rc
SIZE = size
# Files
EXE = lmpqst
SRC = $(wildcard *.cpp)
INC = $(wildcard *.h)
OBJ = $(SRC:.cpp=.o)
# Targets
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(OBJ) $(USRLIB) $(SYSLIB) -o $(EXE)
$(SIZE) $(EXE)
clean:
rm $(EXE) *.o
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
include $(DEPENDS)

View File

@ -1,64 +0,0 @@
This directory has an application that runs classical MD via LAMMPS,
but uses quantum forces calculated by the Quest DFT (density
functional) code in place of the usual classical MD forces calculated
by a pair style in LAMMPS.
lmpqst.cpp main program
it links LAMMPS as a library
it invokes Quest as an executable
in.lammps LAMMPS input script, without the run command
si_111.in Quest input script for an 8-atom Si unit cell
lmppath.h contains path to LAMMPS home directory
qstexe.h contains full pathname to Quest executable
After editing the Makefile, lmppath.h, and qstexe.h to make them
suitable for your box, type:
g++ -f Makefile.g++
and you should get the lmpqst executable.
NOTE: To run this coupled application, you must of course, have Quest
built on your system. It's WWW site is http://dft.sandia.gov/Quest.
It is not an open-source code, buy you can contact its authors to
obtain a copy.
You can run lmpqst in serial or parallel as:
% lmpqst Niter in.lammps in.quest
% mpirun -np 4 lmpqst Niter in.lammps in.quest
where
Niter = # of MD iterations
in.lammps = LAMMPS input script
in.quest = Quest input script
The log files are for this run:
% lmpqst 10 in.lammps si_111.in
This application is an example of a coupling where the driver code
(lmpqst) runs one code (LAMMPS) as an outer code and facilitates it
calling the other code (Quest) as an inner code. Specifically, the
driver (lmpqst) invokes one code (LAMMPS) to perform its timestep
loop, and grabs information from the other code (Quest) during its
timestep. This is done in LAMMPS using the fix external command,
which makes a "callback" to the driver application (lmpqst), which in
turn invokes Quest with new atom coordinates, lets Quest compute
forces, and returns those forces to the LAMMPS fix external.
The driver code launches LAMMPS in parallel. But Quest is only run on
a single processor. It would be possible to change this by using a
parallel build of Quest.
Since Quest does not currently have a library interface, the driver
code interfaces with Quest via input and output files.
Note that essentially 100% of the run time for this coupled
application is spent in Quest, as the quantum calculation of forces
dominates the calculation.
You can look at the log files in the directory to see sample LAMMPS
output for this simulation. Dump files produced by LAMMPS are stored
as dump.md.

View File

@ -1,20 +0,0 @@
# LAMMPS input for coupling MD/Quantum
units metal
dimension 3
atom_style atomic
atom_modify sort 0 0.0
lattice diamond 5.43
region box block 0 1 0 1 0 1
create_box 1 box
create_atoms 1 box
mass 1 28.08
velocity all create 300.0 87293 loop geom
fix 1 all nve
fix 2 all external
dump 1 all custom 1 dump.md id type x y z fx fy fz
thermo 1

View File

@ -1 +0,0 @@
#define LMPPATH /home/sjplimp/lammps

View File

@ -1,262 +0,0 @@
// lmpqst = umbrella driver to couple LAMMPS + Quest
// for MD using quantum forces
// Syntax: lmpqst Niter in.lammps in.quest
// Niter = # of MD iterations
// in.lammps = LAMMPS input script
// in.quest = Quest input script
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "many2one.h"
#include "one2many.h"
#include "files.h"
#include "memory.h"
#include "error.h"
#define QUOTE_(x) #x
#define QUOTE(x) QUOTE_(x)
#include "lmppath.h"
#include QUOTE(LMPPATH/src/lammps.h)
#include QUOTE(LMPPATH/src/library.h)
#include QUOTE(LMPPATH/src/input.h)
#include QUOTE(LMPPATH/src/modify.h)
#include QUOTE(LMPPATH/src/fix.h)
#include QUOTE(LMPPATH/src/fix_external.h)
#include "qstexe.h"
using namespace LAMMPS_NS;
#define ANGSTROM_per_BOHR 0.529
#define EV_per_RYDBERG 13.6056923
void quest_callback(void *, int, int, int *, double **, double **);
struct Info {
int me;
Memory *memory;
LAMMPS *lmp;
char *quest_input;
};
/* ---------------------------------------------------------------------- */
int main(int narg, char **arg)
{
int n;
char str[128];
// setup MPI
MPI_Init(&narg,&arg);
MPI_Comm comm = MPI_COMM_WORLD;
int me,nprocs;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
Memory *memory = new Memory(comm);
Error *error = new Error(comm);
// command-line args
if (narg != 4) error->all("Syntax: lmpqst Niter in.lammps in.quest");
int niter = atoi(arg[1]);
n = strlen(arg[2]) + 1;
char *lammps_input = new char[n];
strcpy(lammps_input,arg[2]);
n = strlen(arg[3]) + 1;
char *quest_input = new char[n];
strcpy(quest_input,arg[3]);
// instantiate LAMMPS
LAMMPS *lmp = new LAMMPS(0,NULL,MPI_COMM_WORLD);
// create simulation in LAMMPS from in.lammps
lmp->input->file(lammps_input);
// make info avaiable to callback function
Info info;
info.me = me;
info.memory = memory;
info.lmp = lmp;
info.quest_input = quest_input;
// set callback to Quest inside fix external
int ifix = lmp->modify->find_fix("2");
FixExternal *fix = (FixExternal *) lmp->modify->fix[ifix];
fix->set_callback(quest_callback,&info);
// run LAMMPS for Niter
// each time it needs forces, it will invoke quest_callback
sprintf(str,"run %d",niter);
lmp->input->one(str);
// clean up
delete lmp;
delete memory;
delete error;
delete [] lammps_input;
delete [] quest_input;
MPI_Finalize();
}
/* ----------------------------------------------------------------------
callback to Quest with atom IDs and coords from each proc
invoke Quest to compute forces, load them into f for LAMMPS to use
f can be NULL if proc owns no atoms
------------------------------------------------------------------------- */
void quest_callback(void *ptr, int ntimestep, int nlocal,
int *id, double **x, double **f)
{
int i,j;
char str[128];
Info *info = (Info *) ptr;
// boxlines = LAMMPS box size converted into Quest lattice vectors
char **boxlines = NULL;
if (info->me == 0) {
boxlines = new char*[3];
for (i = 0; i < 3; i++) boxlines[i] = new char[128];
}
double boxxlo = *((double *) lammps_extract_global(info->lmp,"boxxlo"));
double boxxhi = *((double *) lammps_extract_global(info->lmp,"boxxhi"));
double boxylo = *((double *) lammps_extract_global(info->lmp,"boxylo"));
double boxyhi = *((double *) lammps_extract_global(info->lmp,"boxyhi"));
double boxzlo = *((double *) lammps_extract_global(info->lmp,"boxzlo"));
double boxzhi = *((double *) lammps_extract_global(info->lmp,"boxzhi"));
double xprd = (boxxhi-boxxlo)/ANGSTROM_per_BOHR;
double yprd = (boxyhi-boxylo)/ANGSTROM_per_BOHR;
double zprd = (boxzhi-boxzlo)/ANGSTROM_per_BOHR;
if (info->me == 0) {
sprintf(boxlines[0],"%g %g %g\n",xprd,0.0,0.0);
sprintf(boxlines[1],"%g %g %g\n",0.0,yprd,0.0);
sprintf(boxlines[2],"%g %g %g\n",0.0,0.0,zprd);
}
// xlines = x for atoms on each proc converted to text lines
// xlines is suitable for insertion into Quest input file
// convert LAMMPS Angstroms to Quest bohr
int natoms;
MPI_Allreduce(&nlocal,&natoms,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
Many2One *lmp2qst = new Many2One(MPI_COMM_WORLD);
lmp2qst->setup(nlocal,id,natoms);
char **xlines = NULL;
double **xquest = NULL;
if (info->me == 0) {
xquest = info->memory->create_2d_double_array(natoms,3,"lmpqst:xquest");
xlines = new char*[natoms];
for (i = 0; i < natoms; i++) xlines[i] = new char[128];
}
if (info->me == 0) lmp2qst->gather(&x[0][0],3,&xquest[0][0]);
else lmp2qst->gather(&x[0][0],3,NULL);
if (info->me == 0) {
for (i = 0; i < natoms; i++) {
xquest[i][0] /= ANGSTROM_per_BOHR;
xquest[i][1] /= ANGSTROM_per_BOHR;
xquest[i][2] /= ANGSTROM_per_BOHR;
}
for (i = 0; i < natoms; i++) {
sprintf(xlines[i],"%d %d %g %g %g\n",i+1,1,
xquest[i][0],xquest[i][1],xquest[i][2]);
}
}
// one-processor tasks:
// whack all lcao.* files
// cp quest_input to lcao.in
// replace atom coords section of lcao.in with new atom coords
// run Quest on one proc, save screen output to file
// flines = atom forces extracted from Quest screen file
// fquest = atom forces
// convert Quest Ryd/bohr to LAMMPS eV/Angstrom
char **flines = NULL;
double **fquest = NULL;
if (info->me == 0) {
fquest = info->memory->create_2d_double_array(natoms,3,"lmpqst:fquest");
flines = new char*[natoms];
for (i = 0; i < natoms; i++) flines[i] = new char[128];
}
if (info->me == 0) {
system("rm lcao.*");
sprintf(str,"cp %s lcao.in",info->quest_input);
system(str);
sprintf(str,"cp %s lcao.x",QUOTE(QUEST));
system(str);
replace("lcao.in","primitive lattice vectors",3,boxlines);
replace("lcao.in","atom, type, position vector",natoms,xlines);
system("lcao.x > lcao.screen");
extract("lcao.screen","atom x force "
"y force z force",natoms,flines);
int itmp;
for (i = 0; i < natoms; i++)
sscanf(flines[i],"%d %lg %lg %lg",&itmp,
&fquest[i][0],&fquest[i][1],&fquest[i][2]);
for (i = 0; i < natoms; i++) {
fquest[i][0] *= EV_per_RYDBERG / ANGSTROM_per_BOHR;
fquest[i][1] *= EV_per_RYDBERG / ANGSTROM_per_BOHR;
fquest[i][2] *= EV_per_RYDBERG / ANGSTROM_per_BOHR;
}
}
// convert fquest on one proc into f for atoms on each proc
One2Many *qst2lmp = new One2Many(MPI_COMM_WORLD);
qst2lmp->setup(natoms,nlocal,id);
double *fvec = NULL;
if (f) fvec = &f[0][0];
if (info->me == 0) qst2lmp->scatter(&fquest[0][0],3,fvec);
else qst2lmp->scatter(NULL,3,fvec);
// clean up
// some data only exists on proc 0
delete lmp2qst;
delete qst2lmp;
info->memory->destroy_2d_double_array(xquest);
info->memory->destroy_2d_double_array(fquest);
if (boxlines) {
for (i = 0; i < 3; i++) delete [] boxlines[i];
delete [] boxlines;
}
if (xlines) {
for (i = 0; i < natoms; i++) delete [] xlines[i];
delete [] xlines;
}
if (flines) {
for (i = 0; i < natoms; i++) delete [] flines[i];
delete [] flines;
}
}

View File

@ -1,58 +0,0 @@
LAMMPS (20 Sep 2010)
# LAMMPS input for coupling MD/Quantum
units metal
dimension 3
atom_style atomic
atom_modify sort 0 0.0
lattice diamond 5.43
Lattice spacing in x,y,z = 5.43 5.43 5.43
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (5.43 5.43 5.43)
1 by 1 by 1 processor grid
create_atoms 1 box
Created 8 atoms
mass 1 28.08
velocity all create 300.0 87293 loop geom
fix 1 all nve
fix 2 all external
dump 1 all custom 1 dump.md id type x y z fx fy fz
thermo 1
run 10
Memory usage per processor = 1.25982 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 0.2714463 1810.9378
1 298.22165 0 0 0.26983722 1800.2029
2 293.2839 0 0 0.26536943 1770.3964
3 286.18537 0 0 0.25894654 1727.5464
4 277.61576 0 0 0.25119258 1675.8163
5 267.3325 0 0 0.24188807 1613.7418
6 254.94702 0 0 0.23068142 1538.9774
7 240.91176 0 0 0.21798202 1454.2541
8 226.27996 0 0 0.20474287 1365.9298
9 212.1059 0 0 0.19191788 1280.3687
10 199.27609 0 0 0.18030919 1202.922
Loop time of 80.663 on 1 procs for 10 steps with 8 atoms
Pair time (%) = 0 (0)
Neigh time (%) = 0 (0)
Comm time (%) = 4.91142e-05 (6.08882e-05)
Outpt time (%) = 0.00111485 (0.0013821)
Other time (%) = 80.6618 (99.9986)
Nlocal: 8 ave 8 max 8 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 10 ave 10 max 10 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0

View File

@ -1,58 +0,0 @@
LAMMPS (20 Sep 2010)
# LAMMPS input for coupling MD/Quantum
units metal
dimension 3
atom_style atomic
atom_modify sort 0 0.0
lattice diamond 5.43
Lattice spacing in x,y,z = 5.43 5.43 5.43
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (5.43 5.43 5.43)
1 by 2 by 2 processor grid
create_atoms 1 box
Created 8 atoms
mass 1 28.08
velocity all create 300.0 87293 loop geom
fix 1 all nve
fix 2 all external
dump 1 all custom 1 dump.md id type x y z fx fy fz
thermo 1
run 10
Memory usage per processor = 1.25928 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 0.2714463 1810.9378
1 298.22166 0 0 0.26983722 1800.2029
2 293.28391 0 0 0.26536944 1770.3964
3 286.18538 0 0 0.25894655 1727.5464
4 277.61578 0 0 0.25119259 1675.8164
5 267.33252 0 0 0.24188809 1613.7419
6 254.94703 0 0 0.23068143 1538.9774
7 240.91175 0 0 0.21798202 1454.254
8 226.27997 0 0 0.20474287 1365.9299
9 212.10594 0 0 0.19191791 1280.3689
10 199.27613 0 0 0.18030923 1202.9223
Loop time of 79.8256 on 4 procs for 10 steps with 8 atoms
Pair time (%) = 0 (0)
Neigh time (%) = 0 (0)
Comm time (%) = 0.000365376 (0.000457718)
Outpt time (%) = 0.00169969 (0.00212925)
Other time (%) = 79.8236 (99.9974)
Nlocal: 2 ave 2 max 2 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 5 ave 5 max 5 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0

View File

@ -1 +0,0 @@
#define QUEST /home/sjplimp/csrf/quest/src/lcao.x

View File

@ -1,161 +0,0 @@
do setup
do iters
do force
no relax
setup data
title
Si 1x1x1 unit cell
functional
PBE
dimensions of system (0=cluster ... 3=bulk)
3
primitive lattice vectors
10.261212 0.000000 0.000000
0.000000 10.261212 0.000000
0.000000 0.000000 10.261212
grid dimensions
10 10 10
atom types
1
type number, label:
1 Si_pbe
notes5
Originally constructed by Peter A. Schultz, 12Apr01
potential generated by new Hamann program PUNSLDX
Cite use with: D.R. Hamann, unpublished.
Potential: "standard" setting out to l=2
Basis: amended Jun05 for better (2d/1d not 1d/1d) d-function
effective nuclear charge (s2p2 to 10.0)
4.00000000d+00
pseudopotentials: Lmax, and effective gaussian range
2 0.86000000d+00
functional type used in generating potential:
PBE
radial mesh: number of points for local and non-local pot integrals
80 67
mesh points for nuclear potential; ham2dh
0.02500000 0.02696978 0.02909477 0.03138719 0.03386023 0.03652812
0.03940622 0.04251109 0.04586060 0.04947402 0.05337215 0.05757741
0.06211402 0.06700807 0.07228773 0.07798338 0.08412779 0.09075634
0.09790716 0.10562140 0.11394345 0.12292121 0.13260635 0.14305458
0.15432605 0.16648562 0.17960325 0.19375443 0.20902061 0.22548964
0.24325628 0.26242278 0.28309943 0.30540522 0.32946852 0.35542780
0.38343245 0.41364362 0.44623518 0.48139466 0.51932441 0.56024270
0.60438500 0.65200533 0.70337773 0.75879783 0.81858456 0.88308197
0.95266121 1.02772271 1.10869840 1.19605428 1.29029305 1.39195702
1.50163124 1.61994684 1.74758469 1.88527930 2.03382306 2.19407079
2.36694466 2.55343950 2.75462852 2.97166951 3.20581145 3.45840177
3.73089402 4.02485632 4.34198031 4.68409093 5.05315693 5.45130215
5.88081777 6.34417553 6.84404189 7.38329340 7.96503329 8.59260927
9.26963282 10.00000000
radwts: weights for radial points
0.00189603 0.00204542 0.00220659 0.00238045 0.00256800 0.00277034
0.00298862 0.00322410 0.00347813 0.00375218 0.00404781 0.00436675
0.00471081 0.00508198 0.00548240 0.00591436 0.00638036 0.00688308
0.00742541 0.00801047 0.00864162 0.00932251 0.01005704 0.01084945
0.01170429 0.01262649 0.01362135 0.01469459 0.01585240 0.01710143
0.01844888 0.01990249 0.02147064 0.02316234 0.02498733 0.02695611
0.02908002 0.03137128 0.03384307 0.03650961 0.03938625 0.04248955
0.04583736 0.04944895 0.05334510 0.05754823 0.06208254 0.06697411
0.07225109 0.07794385 0.08408515 0.09071034 0.09785753 0.10556786
0.11388570 0.12285891 0.13253914 0.14298208 0.15424783 0.16640123
0.17951222 0.19365623 0.20891467 0.22537535 0.24313298 0.26228977
0.28295594 0.30525043 0.32930153 0.35524766 0.38323811 0.41343397
0.44600900 0.48115067 0.51906119 0.55995874 0.60407867 0.65167486
0.70302122 0.75841323
non-local potential: l,potential*integration weight
0 0.62022930 0.62128855 0.62243016 0.62366033 0.62498568 0.62641328
0.62795061 0.62960563 0.63138673 0.63330275 0.63536294 0.63757692
0.63995464 0.64250630 0.64524218 0.64817253 0.65130735 0.65465605
0.65822713 0.66202767 0.66606269 0.67033437 0.67484108 0.67957602
0.68452576 0.68966817 0.69497006 0.70038419 0.70584566 0.71126756
0.71653578 0.72150290 0.72598113 0.72973436 0.73246932 0.73382636
0.73337030 0.73058243 0.72485505 0.71549107 0.70171167 0.68267654
0.65752236 0.62542611 0.58570073 0.53792896 0.48213811 0.41900888
0.35009536 0.27800640 0.20646172 0.14009458 0.08384960 0.04186877
0.01596164 0.00423035 0.00115036 0.00066636 0.00047879 0.00029939
0.00016329 0.00007995 0.00003517 0.00001362 0.00000445 0.00000111
0.00000016 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000
non-local potential: l,potential*integration weight
1 0.59551624 0.59463303 0.59368033 0.59265268 0.59154422 0.59034862
0.58905906 0.58766819 0.58616811 0.58455033 0.58280567 0.58092430
0.57889565 0.57670833 0.57435015 0.57180802 0.56906791 0.56611482
0.56293268 0.55950435 0.55581158 0.55183493 0.54755377 0.54294628
0.53798942 0.53265896 0.52692951 0.52077458 0.51416671 0.50707751
0.49947790 0.49133817 0.48262822 0.47331766 0.46337588 0.45277197
0.44147437 0.42945016 0.41666374 0.40307468 0.38863443 0.37328165
0.35693601 0.33949042 0.32080256 0.30068740 0.27891443 0.25521609
0.22931791 0.20100526 0.17024474 0.13737521 0.10336405 0.07007167
0.04035673 0.01767907 0.00470635 0.00076638 0.00047880 0.00029939
0.00016329 0.00007995 0.00003517 0.00001362 0.00000445 0.00000111
0.00000016 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000
non-local potential: l,potential*integration weight
2 0.56305372 0.55961728 0.55591134 0.55191498 0.54760572 0.54295941
0.53795013 0.53255008 0.52672947 0.52045641 0.51369682 0.50641433
0.49857022 0.49012333 0.48103004 0.47124429 0.46071759 0.44939919
0.43723624 0.42417413 0.41015690 0.39512792 0.37903070 0.36181001
0.34341340 0.32379300 0.30290805 0.28072780 0.25723539 0.23243242
0.20634465 0.17902876 0.15058041 0.12114359 0.09092117 0.06018665
0.02929636 -0.00129833 -0.03104046 -0.05926034 -0.08517498 -0.10789810
-0.12646610 -0.13988656 -0.14721657 -0.14767751 -0.14080976 -0.12666296
-0.10600305 -0.08049270 -0.05276798 -0.02629475 -0.00486427 0.00837657
0.01228139 0.00892332 0.00342796 0.00074936 0.00047880 0.00029939
0.00016329 0.00007995 0.00003517 0.00001362 0.00000445 0.00000111
0.00000016 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000
number of radial functions **** Si PBE Ham-II basis 20Feb01-PAS ****
5
angular momentum, number of alphas
0 4
alphas - s - 4s/2/4s407 (bulk Si dzp Eopt+reopt c1)
0.10460000d+00 0.27226300d+00 1.30050800d+00 2.60103000d+00
wave function coefficients
0.20995300d+00 0.55978200d+00 -0.99128200d+00 0.33487100d+00
angular momentum, number of alphas
1 3
alphas - p - 3p/2/3p492 (bulk Si dzp Eopt + reopt c1)
0.09424100d+00 0.31767900d+00 1.56114500d+00
wave function coefficients
0.06761600d+00 0.31821200d+00 -0.06638300d+00
angular momentum, number of alphas
0 1
alphas - s - second zeta s polarization
0.10460000d+00
wave function coefficients
1.00000000d+00
angular momentum, number of alphas
1 1
alphas - p - second zeta p polarization
0.09424100d+00
wave function coefficients
1.00000000d+00
angular momentum, number of alphas
2 2
alphas - d - angular polarization (dzp Eopt)
0.32000000d+00 1.40000000d+00
wave function coefficients
0.31557000d+00 1.00000000d+00
shell occupancies for this silicon, Si: s(2.00)p(2.00)
2.00000000 2.00000000 0.00000000 0.00000000 0.00000000 0.00000000
end atom file
number of atoms in unit cell
8
atom, type, position vector
1 1 0.0000000000 0.0000000000 0.0000000000
2 1 5.1306060590 5.1306060590 0.0000000000
3 1 5.1306060590 0.0000000000 5.1306060590
4 1 0.0000000000 5.1306060590 5.1306060590
5 1 2.5653030295 2.5653030295 2.5653030295
6 1 7.6959090885 7.6959090885 2.5653030295
7 1 7.6959090885 2.5653030295 7.6959090885
8 1 2.5653030295 7.6959090885 7.6959090885
kgrid
0 0 0
end setup phase data
run phase input data
end of run phase data

View File

@ -1,48 +0,0 @@
# Makefile for grain growth via LAMMPS <-> SPPARKS coupling
SHELL = /bin/sh
# System-specific settings
LAMMPS = /home/sjplimp/lammps
SPPARKS = /home/sjplimp/spparks
CC = g++
CCFLAGS = -g -O -DMPICH_IGNORE_CXX_SEEK -I../library
DEPFLAGS = -M
LINK = g++
LINKFLAGS = -g -O -L../library -L${LAMMPS}/src -L${SPPARKS}/src
USRLIB = -lcouple -llmp_g++ -lspk_g++
SYSLIB = -lfftw -lmpich -lpthread
ARCHIVE = ar
ARFLAGS = -rc
SIZE = size
# Files
EXE = lmpspk
SRC = $(wildcard *.cpp)
INC = $(wildcard *.h)
OBJ = $(SRC:.cpp=.o)
# Targets
lmpspk: $(OBJ)
$(LINK) $(LINKFLAGS) $(OBJ) $(USRLIB) $(SYSLIB) -o $(EXE)
$(SIZE) $(EXE)
clean:
rm $(EXE) *.o
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
include $(DEPENDS)

View File

@ -1,71 +0,0 @@
This directory has an application that models grain growth in the
presence of strain. The grain growth is simulated by a Potts model in
a kinetic Monte Carlo code SPPARKS. Clusters of like spins on a
lattice represent grains. The Hamiltonian for the energy due of a
collection of spins includes a strain term and is described on this
page in the SPPARKS documentation:
http://www.sandia.gov/~sjplimp/spparks/doc/app_potts_strain.html.
The strain is computed by LAMMPS as a particle displacement where
pairs of atoms across a grain boundary are of different types and thus
push off from each other due to a Lennard-Jones sigma between
particles of different types that is larger than the sigma between
particles of the same type (interior to grains).
lmpspk.cpp main program
it links LAMMPS and SPPARKS as libraries
in.spparks SPPARKS input script, without the run command
lmppath.h contains path to LAMMPS home directory
spkpath.h contains path to SPPARKS home directory
After editing the Makefile, lmppath.h, and spkpath.h to make them
suitable for your box, type:
g++ -f Makefile.g++
and you should get the lmpspk executable.
NOTE: To build and run this coupled application, you must of course,
have SPPARKS built on your system. It's WWW site is
http://www.sandia.gov/~sjplimp/spparks.html. It is an open-source
code, written by two of the LAMMPS authors.
You can run lmpspk in serial or parallel as:
% lmpspk Niter Ndelta Sfactor in.spparks
% mpirun -np 4 lmpspk Niter Ndelta Sfactor in.spparks
where
Niter = # of outer iterations
Ndelta = time to run MC in each iteration
Sfactor = multiplier on strain effect
in.spparks = SPPARKS input script
The log files are for this run:
% lmpspk 20 10.0 1 in.spparks
This application is an example of a coupling where the driver code
(lmpspk) alternates back and forth between the 2 applications (LAMMPS
and SPPARKS). Each outer timestep in the driver code, the following
tasks are performed. One code (SPPARKS) is invoked for a few Monte
Carlo steps. Some of its output (spin state) is passed to the other
code (LAMMPS) as input (atom type). The the other code (LAMMPS) is
invoked for a few timesteps. Some of its output (atom coords) is
massaged to become an input (per-atom strain) for the original code
(SPPARKS).
The driver code launches both SPPARKS and LAMMPS in parallel and they
both decompose their spatial domains in the same manner. The datums
in SPPARKS (lattice sites) are the same as the datums in LAMMPS
(coarse-grained particles). If this were not the case, more
sophisticated inter-code communication could be performed.
You can look at the log files in the directory to see sample LAMMPS
and SPPARKS output for this simulation. Dump files produced by the
run are stored as dump.mc and dump.md. The image*.png files show
snapshots from both the LAMMPS and SPPARKS output. Note that the
in.lammps and data.lammps files are not inputs; they are generated by
the lmpspk driver.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 280 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 260 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 261 KiB

View File

@ -1,23 +0,0 @@
# SPPARKS input for coupling MD/MC
seed 56789
app_style potts/strain 100
dimension 2
lattice sq/8n 1.0
region box block 0 50 0 50 -0.5 0.5
create_box box
create_sites box
set site range 1 100
set d1 value 0.0
sector yes
solve_style tree
diag_style energy
temperature 1.0
stats 10.0
dump 1 10.0 dump.mc

View File

@ -1 +0,0 @@
#define LMPPATH /home/sjplimp/lammps

View File

@ -1,223 +0,0 @@
// lmpspk = umbrella driver to couple LAMMPS + SPPARKS
// for a strain-induced grain growth model
// Syntax: lmpspk Niter Ndelta Sfactor in.spparks
// Niter = # of outer iterations
// Ndelta = time to run MC in each iteration
// Sfactor = multiplier on strain effect
// in.spparks = SPPARKS input script
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "lammps_data_write.h"
#include "many2many.h"
#include "memory.h"
#include "error.h"
#define QUOTE_(x) #x
#define QUOTE(x) QUOTE_(x)
#include "spkpath.h"
#include QUOTE(SPKPATH/src/spparks.h)
#include QUOTE(SPKPATH/src/library.h)
#include QUOTE(SPKPATH/src/input.h)
#include "lmppath.h"
#include QUOTE(LMPPATH/src/lammps.h)
#include QUOTE(LMPPATH/src/library.h)
#include QUOTE(LMPPATH/src/input.h)
#include QUOTE(LMPPATH/src/modify.h)
#include QUOTE(LMPPATH/src/compute.h)
using namespace SPPARKS_NS;
using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
int main(int narg, char **arg)
{
int i,n;
char str[128];
// setup MPI
MPI_Init(&narg,&arg);
MPI_Comm comm = MPI_COMM_WORLD;
int me,nprocs;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
Memory *memory = new Memory(comm);
Error *error = new Error(comm);
// command-line args
if (narg != 5)
error->all("Syntax: lmpspk Niter Ndelta Sfactor in.spparks");
int niter = atoi(arg[1]);
double delta = atof(arg[2]);
double sfactor = atof(arg[3]);
n = strlen(arg[4]) + 1;
char *spparks_input = new char[n];
strcpy(spparks_input,arg[4]);
// instantiate LAMMPS and SPPARKS
SPPARKS *spk = new SPPARKS(0,NULL,MPI_COMM_WORLD);
LAMMPS *lmp = new LAMMPS(0,NULL,MPI_COMM_WORLD);
// create simulation in SPPARKS from in.spparks
spk->input->file(spparks_input);
// extract permanent info from SPPARKS
int dimension,nglobal,nlocal_spparks,nspins;
double boxxlo,boxxhi,boxylo,boxyhi,boxzlo,boxzhi;
int *id_spparks,*spins;
double **xyz;
double *strain;
dimension = *((int *) spparks_extract(spk,"dimension"));
nglobal = *((int *) spparks_extract(spk,"nglobal"));
nlocal_spparks = *((int *) spparks_extract(spk,"nlocal"));
boxxlo = *((double *) spparks_extract(spk,"boxxlo"));
boxxhi = *((double *) spparks_extract(spk,"boxxhi"));
boxylo = *((double *) spparks_extract(spk,"boxylo"));
boxyhi = *((double *) spparks_extract(spk,"boxyhi"));
if (dimension == 3) {
boxzlo = *((double *) spparks_extract(spk,"boxzlo"));
boxzhi = *((double *) spparks_extract(spk,"boxzhi"));
} else {
boxzlo = -0.5;
boxzhi = 0.5;
}
id_spparks = (int *) spparks_extract(spk,"id");
spins = (int *) spparks_extract(spk,"site");
xyz = (double **) spparks_extract(spk,"xyz");
nspins = *((int *) spparks_extract(spk,"nspins"));
strain = (double *) spparks_extract(spk,"strain");
// write a LAMMPS input script using SPPARKS params
if (me == 0) {
FILE *fp = fopen("in.lammps","w");
if (fp == NULL) error->one("Could not create LAMMPS input script");
fprintf(fp,"units lj\n");
sprintf(str,"dimension %d\n",dimension);
fprintf(fp,str);
fprintf(fp,"atom_style atomic\n\n");
fprintf(fp,"read_data data.lammps\n");
fprintf(fp,"mass * 1.0\n\n");
fprintf(fp,"pair_style lj/cut 2.5\n");
fprintf(fp,"pair_coeff * * 1.0 1.2\n");
for (i = 0; i < nspins; i++) {
sprintf(str,"pair_coeff %d %d 1.0 1.0\n",i+1,i+1);
fprintf(fp,str);
}
fprintf(fp,"\n");
fprintf(fp,"compute da all displace/atom\n\n");
fprintf(fp,"dump 1 all atom 10 dump.md\n");
fprintf(fp,"thermo 1\n");
fclose(fp);
}
// write a LAMMPS data file using SPPARKS data
LAMMPSDataWrite *lwd = new LAMMPSDataWrite(MPI_COMM_WORLD);
lwd->file("data.lammps");
lwd->header("%d atoms",nglobal);
lwd->header("%d atom types",nspins);
lwd->header("%g %g xlo xhi",boxxlo,boxxhi);
lwd->header("%g %g ylo yhi",boxylo,boxyhi);
lwd->header("%g %g zlo zhi",boxzlo,boxzhi);
lwd->atoms(nlocal_spparks);
lwd->atoms(id_spparks);
lwd->atoms(spins);
lwd->atoms(3,xyz);
lwd->execute();
delete lwd;
// create simulation in LAMMPS from created input script
lmp->input->file("in.lammps");
// create transfer operators
Many2Many *spk2lmp = new Many2Many(MPI_COMM_WORLD);
Many2Many *lmp2spk = new Many2Many(MPI_COMM_WORLD);
// timestep loop
// run SPPARKS for delta time
// use SPPARKS spins to reset LAMMPS atom types
// perform LAMMPS minimization
// use atom displacements to reset strain values in SPPARKS
// realloc displace as necessary since nlocal_lammps may change
// re-create both xfers every iteration since LAMMPS may migrate atoms
int nmax = 0;
double *displace = NULL;
int nlocal_lammps;
int *id_lammps,*type;
double **displace_lammps;
for (int iter = 0; iter < niter; iter++) {
sprintf(str,"run %g",delta);
spk->input->one(str);
nlocal_lammps = *((int *) lammps_extract_global(lmp,"nlocal"));
id_lammps = (int *) lammps_extract_atom(lmp,"id");
type = (int *) lammps_extract_atom(lmp,"type");
spk2lmp->setup(nlocal_spparks,id_spparks,nlocal_lammps,id_lammps);
spk2lmp->exchange(spins,type);
lmp->input->one("minimize 0.001 0.001 10 1000");
nlocal_lammps = *((int *) lammps_extract_global(lmp,"nlocal"));
id_lammps = (int *) lammps_extract_atom(lmp,"id");
displace_lammps = (double **) lammps_extract_compute(lmp,"da",1,2);
if (nlocal_lammps > nmax) {
memory->sfree(displace);
nmax = nlocal_lammps;
displace = (double *)
memory->smalloc(nmax*sizeof(double),"lmpspk:displace");
}
for (i = 0; i < nlocal_lammps; i++)
displace[i] = sfactor*displace_lammps[i][3];
lmp2spk->setup(nlocal_lammps,id_lammps,nlocal_spparks,id_spparks);
lmp2spk->exchange(displace,strain);
}
memory->sfree(displace);
// clean up
delete spk2lmp;
delete lmp2spk;
delete spk;
delete lmp;
delete [] spparks_input;
delete memory;
delete error;
MPI_Finalize();
}

View File

@ -1,978 +0,0 @@
LAMMPS (20 Sep 2010)
units lj
dimension 2
atom_style atomic
read_data data.lammps
orthogonal box = (0 0 -0.5) to (50 50 0.5)
1 by 1 by 1 processor grid
2500 atoms
mass * 1.0
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.2
pair_coeff 1 1 1.0 1.0
pair_coeff 2 2 1.0 1.0
pair_coeff 3 3 1.0 1.0
pair_coeff 4 4 1.0 1.0
pair_coeff 5 5 1.0 1.0
pair_coeff 6 6 1.0 1.0
pair_coeff 7 7 1.0 1.0
pair_coeff 8 8 1.0 1.0
pair_coeff 9 9 1.0 1.0
pair_coeff 10 10 1.0 1.0
pair_coeff 11 11 1.0 1.0
pair_coeff 12 12 1.0 1.0
pair_coeff 13 13 1.0 1.0
pair_coeff 14 14 1.0 1.0
pair_coeff 15 15 1.0 1.0
pair_coeff 16 16 1.0 1.0
pair_coeff 17 17 1.0 1.0
pair_coeff 18 18 1.0 1.0
pair_coeff 19 19 1.0 1.0
pair_coeff 20 20 1.0 1.0
pair_coeff 21 21 1.0 1.0
pair_coeff 22 22 1.0 1.0
pair_coeff 23 23 1.0 1.0
pair_coeff 24 24 1.0 1.0
pair_coeff 25 25 1.0 1.0
pair_coeff 26 26 1.0 1.0
pair_coeff 27 27 1.0 1.0
pair_coeff 28 28 1.0 1.0
pair_coeff 29 29 1.0 1.0
pair_coeff 30 30 1.0 1.0
pair_coeff 31 31 1.0 1.0
pair_coeff 32 32 1.0 1.0
pair_coeff 33 33 1.0 1.0
pair_coeff 34 34 1.0 1.0
pair_coeff 35 35 1.0 1.0
pair_coeff 36 36 1.0 1.0
pair_coeff 37 37 1.0 1.0
pair_coeff 38 38 1.0 1.0
pair_coeff 39 39 1.0 1.0
pair_coeff 40 40 1.0 1.0
pair_coeff 41 41 1.0 1.0
pair_coeff 42 42 1.0 1.0
pair_coeff 43 43 1.0 1.0
pair_coeff 44 44 1.0 1.0
pair_coeff 45 45 1.0 1.0
pair_coeff 46 46 1.0 1.0
pair_coeff 47 47 1.0 1.0
pair_coeff 48 48 1.0 1.0
pair_coeff 49 49 1.0 1.0
pair_coeff 50 50 1.0 1.0
pair_coeff 51 51 1.0 1.0
pair_coeff 52 52 1.0 1.0
pair_coeff 53 53 1.0 1.0
pair_coeff 54 54 1.0 1.0
pair_coeff 55 55 1.0 1.0
pair_coeff 56 56 1.0 1.0
pair_coeff 57 57 1.0 1.0
pair_coeff 58 58 1.0 1.0
pair_coeff 59 59 1.0 1.0
pair_coeff 60 60 1.0 1.0
pair_coeff 61 61 1.0 1.0
pair_coeff 62 62 1.0 1.0
pair_coeff 63 63 1.0 1.0
pair_coeff 64 64 1.0 1.0
pair_coeff 65 65 1.0 1.0
pair_coeff 66 66 1.0 1.0
pair_coeff 67 67 1.0 1.0
pair_coeff 68 68 1.0 1.0
pair_coeff 69 69 1.0 1.0
pair_coeff 70 70 1.0 1.0
pair_coeff 71 71 1.0 1.0
pair_coeff 72 72 1.0 1.0
pair_coeff 73 73 1.0 1.0
pair_coeff 74 74 1.0 1.0
pair_coeff 75 75 1.0 1.0
pair_coeff 76 76 1.0 1.0
pair_coeff 77 77 1.0 1.0
pair_coeff 78 78 1.0 1.0
pair_coeff 79 79 1.0 1.0
pair_coeff 80 80 1.0 1.0
pair_coeff 81 81 1.0 1.0
pair_coeff 82 82 1.0 1.0
pair_coeff 83 83 1.0 1.0
pair_coeff 84 84 1.0 1.0
pair_coeff 85 85 1.0 1.0
pair_coeff 86 86 1.0 1.0
pair_coeff 87 87 1.0 1.0
pair_coeff 88 88 1.0 1.0
pair_coeff 89 89 1.0 1.0
pair_coeff 90 90 1.0 1.0
pair_coeff 91 91 1.0 1.0
pair_coeff 92 92 1.0 1.0
pair_coeff 93 93 1.0 1.0
pair_coeff 94 94 1.0 1.0
pair_coeff 95 95 1.0 1.0
pair_coeff 96 96 1.0 1.0
pair_coeff 97 97 1.0 1.0
pair_coeff 98 98 1.0 1.0
pair_coeff 99 99 1.0 1.0
pair_coeff 100 100 1.0 1.0
compute da all displace/atom
dump 1 all atom 10 dump.md
thermo 1
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.85036 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 9.3150455 0 9.3150455 97.103602
1 0 3.0727032 0 3.0727032 56.391395
2 0 2.4831365 0 2.4831365 52.450383
3 0 2.1851987 0 2.1851987 50.430755
4 0 1.9671681 0 1.9671681 48.939108
5 0 1.8292753 0 1.8292753 47.969182
6 0 1.7386325 0 1.7386325 47.341991
7 0 1.6672907 0 1.6672907 46.865385
8 0 1.6046164 0 1.6046164 46.429986
9 0 1.5099314 0 1.5099314 45.748095
10 0 1.4106898 0 1.4106898 45.035856
Loop time of 0.035866 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
9.31504545511 1.50993136661 1.41068980202
Force two-norm initial, final = 14635.9 857.658
Force max component initial, final = 332.921 80.6474
Final line search alpha, max atom move = 0.0003705 0.0298798
Iterations, force evaluations = 10 17
Pair time (%) = 0.0250154 (69.7467)
Neigh time (%) = 0.00361013 (10.0656)
Comm time (%) = 0.000285625 (0.796368)
Outpt time (%) = 0.00396585 (11.0574)
Other time (%) = 0.00298905 (8.33394)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 526 ave 526 max 526 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27247 ave 27247 max 27247 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27247
Ave neighs/atom = 10.8988
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
10 0 12.002101 0 12.002101 114.02687
11 0 5.3220163 0 5.3220163 70.950301
12 0 4.1162407 0 4.1162407 62.953059
13 0 3.2208482 0 3.2208482 56.887942
14 0 2.8397825 0 2.8397825 54.301623
15 0 2.019668 0 2.019668 48.619305
16 0 1.8710381 0 1.8710381 47.601138
17 0 1.5160328 0 1.5160328 45.108872
18 0 1.3630386 0 1.3630386 44.05177
19 0 1.1924745 0 1.1924745 42.849287
20 0 1.0523828 0 1.0523828 41.861394
Loop time of 0.0252161 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
12.0021008911 1.1924745171 1.0523827917
Force two-norm initial, final = 21524.6 1259.91
Force max component initial, final = 1527.37 241.935
Final line search alpha, max atom move = 0.000346619 0.0838594
Iterations, force evaluations = 10 10
Pair time (%) = 0.0147347 (58.4339)
Neigh time (%) = 0.00360608 (14.3007)
Comm time (%) = 0.000231743 (0.919027)
Outpt time (%) = 0.00401402 (15.9185)
Other time (%) = 0.00262952 (10.4279)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 528 ave 528 max 528 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27366 ave 27366 max 27366 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27366
Ave neighs/atom = 10.9464
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
20 0 10.274114 0 10.274114 102.04597
21 0 4.2587067 0 4.2587067 63.257085
22 0 3.4579606 0 3.4579606 57.918783
23 0 2.6091259 0 2.6091259 52.140871
24 0 2.2414512 0 2.2414512 49.626887
25 0 1.4257571 0 1.4257571 43.928929
26 0 1.2604412 0 1.2604412 42.77986
27 0 0.93381201 0 0.93381201 40.441383
28 0 0.77305388 0 0.77305388 39.333891
29 0 0.66272332 0 0.66272332 38.57176
30 0 0.57357831 0 0.57357831 37.947153
Loop time of 0.026665 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
10.2741143789 0.662723315031 0.57357831099
Force two-norm initial, final = 20598.4 860.344
Force max component initial, final = 1387.12 253.617
Final line search alpha, max atom move = 0.00039155 0.0993038
Iterations, force evaluations = 10 11
Pair time (%) = 0.0160739 (60.2811)
Neigh time (%) = 0.00360513 (13.5201)
Comm time (%) = 0.000238419 (0.894126)
Outpt time (%) = 0.00400496 (15.0195)
Other time (%) = 0.00274253 (10.2851)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 533 ave 533 max 533 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27534 ave 27534 max 27534 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27534
Ave neighs/atom = 11.0136
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
30 0 8.9362793 0 8.9362793 92.757153
31 0 3.9724633 0 3.9724633 60.7212
32 0 2.6325511 0 2.6325511 51.784932
33 0 1.9937863 0 1.9937863 47.422917
34 0 1.6064073 0 1.6064073 44.757737
35 0 1.1546808 0 1.1546808 41.600404
36 0 0.93474074 0 0.93474074 40.067859
37 0 0.67833269 0 0.67833269 38.264916
38 0 0.51906207 0 0.51906207 37.145018
39 0 0.31997654 0 0.31997654 35.736062
40 0 0.18467885 0 0.18467885 34.785873
Loop time of 0.0250409 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
8.93627934206 0.319976542343 0.184678853337
Force two-norm initial, final = 18864.2 753.682
Force max component initial, final = 1617.71 221.622
Final line search alpha, max atom move = 0.000384932 0.0853095
Iterations, force evaluations = 10 10
Pair time (%) = 0.0145993 (58.302)
Neigh time (%) = 0.00361705 (14.4446)
Comm time (%) = 0.000240088 (0.958783)
Outpt time (%) = 0.00393724 (15.7233)
Other time (%) = 0.00264716 (10.5714)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 531 ave 531 max 531 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27582 ave 27582 max 27582 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27582
Ave neighs/atom = 11.0328
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
40 0 7.767847 0 7.767847 84.925704
41 0 3.0582618 0 3.0582618 54.426036
42 0 2.3262167 0 2.3262167 49.551755
43 0 1.5424972 0 1.5424972 44.209493
44 0 1.1584069 0 1.1584069 41.573231
45 0 0.64653104 0 0.64653104 37.962296
46 0 0.40723134 0 0.40723134 36.303826
47 0 0.22801463 0 0.22801463 35.027621
48 0 0.083516845 0 0.083516845 34.013608
49 0 0.0031254794 0 0.0031254794 33.45763
50 0 -0.081313313 0 -0.081313313 32.838812
Loop time of 0.0265579 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
7.76784700214 0.00312547942455 -0.0813133129634
Force two-norm initial, final = 17487.9 1815.23
Force max component initial, final = 1273.66 876.61
Final line search alpha, max atom move = 0.00073525 0.644528
Iterations, force evaluations = 10 11
Pair time (%) = 0.016026 (60.3437)
Neigh time (%) = 0.003613 (13.6042)
Comm time (%) = 0.000234604 (0.883367)
Outpt time (%) = 0.00397921 (14.9831)
Other time (%) = 0.0027051 (10.1857)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 535 ave 535 max 535 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27865 ave 27865 max 27865 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27865
Ave neighs/atom = 11.146
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
50 0 7.6174014 0 7.6174014 83.803569
51 0 6.2032266 0 6.2032266 74.797688
52 0 2.5148735 0 2.5148735 50.744231
53 0 1.6582304 0 1.6582304 44.949256
54 0 1.1103519 0 1.1103519 41.160347
55 0 0.69940704 0 0.69940704 38.296984
56 0 0.31006967 0 0.31006967 35.549044
57 0 0.081622161 0 0.081622161 33.934922
58 0 -0.090832662 0 -0.090832662 32.712284
59 0 -0.21786156 0 -0.21786156 31.812246
60 0 -0.32169958 0 -0.32169958 31.078298
Loop time of 0.0236218 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
7.6174013995 -0.217861563426 -0.321699583013
Force two-norm initial, final = 21782.8 679.792
Force max component initial, final = 8739.6 189.661
Final line search alpha, max atom move = 0.000489439 0.0928273
Iterations, force evaluations = 10 10
Pair time (%) = 0.0145402 (61.5541)
Neigh time (%) = 0.00240993 (10.2022)
Comm time (%) = 0.000175714 (0.743866)
Outpt time (%) = 0.00391126 (16.5578)
Other time (%) = 0.0025847 (10.942)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 541 ave 541 max 541 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 27986 ave 27986 max 27986 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 27986
Ave neighs/atom = 11.1944
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
60 0 5.8294051 0 5.8294051 71.780312
61 0 2.2883695 0 2.2883695 48.754944
62 0 0.81882056 0 0.81882056 38.788188
63 0 0.50349389 0 0.50349389 36.603591
64 0 0.13140705 0 0.13140705 33.961812
65 0 -0.085091833 0 -0.085091833 32.438467
66 0 -0.28867774 0 -0.28867774 30.97302
67 0 -0.42572348 0 -0.42572348 30.002898
68 0 -0.52736622 0 -0.52736622 29.258456
69 0 -0.66178936 0 -0.66178936 28.29934
70 0 -0.72324173 0 -0.72324173 27.862878
Loop time of 0.0249891 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
5.82940513955 -0.661789361841 -0.7232417307
Force two-norm initial, final = 15029.7 727.267
Force max component initial, final = 1482.04 154.38
Final line search alpha, max atom move = 0.000666311 0.102865
Iterations, force evaluations = 10 10
Pair time (%) = 0.0145392 (58.1823)
Neigh time (%) = 0.00362301 (14.4983)
Comm time (%) = 0.000231981 (0.928329)
Outpt time (%) = 0.00396943 (15.8846)
Other time (%) = 0.00262547 (10.5064)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 552 ave 552 max 552 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28290 ave 28290 max 28290 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28290
Ave neighs/atom = 11.316
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
70 0 4.9285377 0 4.9285377 65.515598
71 0 1.5586603 0 1.5586603 43.561052
72 0 0.44697741 0 0.44697741 35.981964
73 0 0.069754802 0 0.069754802 33.298965
74 0 -0.2107147 0 -0.2107147 31.327263
75 0 -0.35830772 0 -0.35830772 30.28328
76 0 -0.57195013 0 -0.57195013 28.739001
77 0 -0.68409153 0 -0.68409153 27.931128
78 0 -0.79228314 0 -0.79228314 27.146502
79 0 -0.86504711 0 -0.86504711 26.626848
80 0 -0.91680491 0 -0.91680491 26.257842
Loop time of 0.0249 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.92853774479 -0.865047109705 -0.916804912211
Force two-norm initial, final = 14034.9 476.228
Force max component initial, final = 1270.06 91.1396
Final line search alpha, max atom move = 0.00036629 0.0333835
Iterations, force evaluations = 10 10
Pair time (%) = 0.0144863 (58.1781)
Neigh time (%) = 0.003613 (14.51)
Comm time (%) = 0.000246048 (0.988146)
Outpt time (%) = 0.00392914 (15.7797)
Other time (%) = 0.00262547 (10.5441)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 551 ave 551 max 551 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28409 ave 28409 max 28409 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28409
Ave neighs/atom = 11.3636
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
80 0 4.7702655 0 4.7702655 64.412746
81 0 1.4138967 0 1.4138967 42.494974
82 0 0.39709508 0 0.39709508 35.562808
83 0 0.091357616 0 0.091357616 33.433831
84 0 -0.14995689 0 -0.14995689 31.744545
85 0 -0.34070771 0 -0.34070771 30.384745
86 0 -0.56311713 0 -0.56311713 28.778505
87 0 -0.69122357 0 -0.69122357 27.855817
88 0 -0.77324301 0 -0.77324301 27.264482
89 0 -0.84548243 0 -0.84548243 26.734692
90 0 -0.90609547 0 -0.90609547 26.289732
Loop time of 0.0249319 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.77026550795 -0.845482429321 -0.906095466828
Force two-norm initial, final = 13429.1 828.371
Force max component initial, final = 1191.96 285.981
Final line search alpha, max atom move = 0.000318774 0.0911633
Iterations, force evaluations = 10 10
Pair time (%) = 0.014478 (58.07)
Neigh time (%) = 0.00361109 (14.4838)
Comm time (%) = 0.000230074 (0.922809)
Outpt time (%) = 0.00393724 (15.792)
Other time (%) = 0.00267553 (10.7314)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 557 ave 557 max 557 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28482 ave 28482 max 28482 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28482
Ave neighs/atom = 11.3928
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
90 0 4.6721167 0 4.6721167 63.632364
91 0 1.6951784 0 1.6951784 44.238331
92 0 0.44841999 0 0.44841999 35.811459
93 0 0.10249086 0 0.10249086 33.388633
94 0 -0.30440803 0 -0.30440803 30.509055
95 0 -0.58377427 0 -0.58377427 28.505707
96 0 -0.79655365 0 -0.79655365 26.960847
97 0 -0.90735594 0 -0.90735594 26.157017
98 0 -0.99343895 0 -0.99343895 25.51798
99 0 -1.0302142 0 -1.0302142 25.251301
100 0 -1.0696142 0 -1.0696142 24.971979
Loop time of 0.0263698 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.67211666559 -1.03021419494 -1.06961424119
Force two-norm initial, final = 13689.6 576.187
Force max component initial, final = 1523.72 245.904
Final line search alpha, max atom move = 0.000449346 0.110496
Iterations, force evaluations = 10 11
Pair time (%) = 0.0158815 (60.2262)
Neigh time (%) = 0.00362802 (13.7582)
Comm time (%) = 0.000239134 (0.906847)
Outpt time (%) = 0.00393391 (14.9182)
Other time (%) = 0.00268722 (10.1905)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 553 ave 553 max 553 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28522 ave 28522 max 28522 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28522
Ave neighs/atom = 11.4088
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0 4.0553768 0 4.0553768 59.538
101 0 0.97860768 0 0.97860768 39.398542
102 0 0.011581326 0 0.011581326 32.777939
103 0 -0.29467441 0 -0.29467441 30.628112
104 0 -0.49830972 0 -0.49830972 29.178225
105 0 -0.7295283 0 -0.7295283 27.468245
106 0 -0.83934397 0 -0.83934397 26.687019
107 0 -0.90122046 0 -0.90122046 26.263579
108 0 -0.96663619 0 -0.96663619 25.786343
109 0 -1.0417159 0 -1.0417159 25.23983
110 0 -1.122473 0 -1.122473 24.64702
Loop time of 0.0294309 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.05537676723 -1.04171594687 -1.12247296867
Force two-norm initial, final = 12331.6 860.308
Force max component initial, final = 1058.89 313.708
Final line search alpha, max atom move = 0.000932331 0.292479
Iterations, force evaluations = 10 13
Pair time (%) = 0.0187619 (63.749)
Neigh time (%) = 0.00360394 (12.2454)
Comm time (%) = 0.000266552 (0.905689)
Outpt time (%) = 0.0039897 (13.5562)
Other time (%) = 0.00280881 (9.54375)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 550 ave 550 max 550 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28527 ave 28527 max 28527 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28527
Ave neighs/atom = 11.4108
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
110 0 4.0088675 0 4.0088675 59.056837
111 0 2.2995428 0 2.2995428 47.949107
112 0 0.99828672 0 0.99828672 39.337343
113 0 0.13527687 0 0.13527687 33.474345
114 0 -0.20926445 0 -0.20926445 31.05957
115 0 -0.47575259 0 -0.47575259 29.164559
116 0 -0.67474214 0 -0.67474214 27.723517
117 0 -0.8468153 0 -0.8468153 26.486294
118 0 -0.95515511 0 -0.95515511 25.685323
119 0 -1.0564735 0 -1.0564735 24.951805
120 0 -1.0948823 0 -1.0948823 24.677001
Loop time of 0.027849 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.00886751896 -1.05647353137 -1.09488226118
Force two-norm initial, final = 12395.7 461.929
Force max component initial, final = 2663.69 76.5856
Final line search alpha, max atom move = 0.000326804 0.0250284
Iterations, force evaluations = 10 12
Pair time (%) = 0.017323 (62.2035)
Neigh time (%) = 0.003613 (12.9735)
Comm time (%) = 0.000246763 (0.886077)
Outpt time (%) = 0.0039196 (14.0745)
Other time (%) = 0.00274658 (9.86242)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 554 ave 554 max 554 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28631 ave 28631 max 28631 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28631
Ave neighs/atom = 11.4524
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
120 0 3.6078219 0 3.6078219 56.132202
121 0 1.14077 0 1.14077 40.040613
122 0 -0.10482013 0 -0.10482013 31.597603
123 0 -0.42488667 0 -0.42488667 29.333621
124 0 -0.65936545 0 -0.65936545 27.682524
125 0 -0.84075312 0 -0.84075312 26.378534
126 0 -0.96129843 0 -0.96129843 25.497027
127 0 -1.0968129 0 -1.0968129 24.512431
128 0 -1.1694506 0 -1.1694506 23.977989
129 0 -1.2126457 0 -1.2126457 23.660862
130 0 -1.2583205 0 -1.2583205 23.335303
Loop time of 0.0279129 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
3.60782185256 -1.21264568701 -1.25832049648
Force two-norm initial, final = 12338.7 511.335
Force max component initial, final = 1518.5 147.585
Final line search alpha, max atom move = 0.000294687 0.0434913
Iterations, force evaluations = 10 12
Pair time (%) = 0.0173075 (62.0056)
Neigh time (%) = 0.00363278 (13.0147)
Comm time (%) = 0.000241041 (0.863549)
Outpt time (%) = 0.00393629 (14.1021)
Other time (%) = 0.00279522 (10.0141)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 558 ave 558 max 558 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28611 ave 28611 max 28611 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28611
Ave neighs/atom = 11.4444
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
130 0 3.1243455 0 3.1243455 52.793945
131 0 0.71565075 0 0.71565075 37.056227
132 0 -0.27217454 0 -0.27217454 30.343304
133 0 -0.61618257 0 -0.61618257 27.874909
134 0 -0.7988002 0 -0.7988002 26.598662
135 0 -0.91256484 0 -0.91256484 25.789333
136 0 -1.0890032 0 -1.0890032 24.463878
137 0 -1.1980686 0 -1.1980686 23.681987
138 0 -1.2567103 0 -1.2567103 23.266253
139 0 -1.3060322 0 -1.3060322 22.899986
140 0 -1.359869 0 -1.359869 22.502063
Loop time of 0.0308802 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
3.12434547963 -1.30603224583 -1.35986897103
Force two-norm initial, final = 11522.7 423.596
Force max component initial, final = 1304.48 166.359
Final line search alpha, max atom move = 0.000640699 0.106586
Iterations, force evaluations = 10 14
Pair time (%) = 0.02019 (65.3817)
Neigh time (%) = 0.00362086 (11.7255)
Comm time (%) = 0.000261784 (0.847739)
Outpt time (%) = 0.00396395 (12.8365)
Other time (%) = 0.00284362 (9.20855)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 560 ave 560 max 560 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28662 ave 28662 max 28662 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28662
Ave neighs/atom = 11.4648
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
140 0 2.5370527 0 2.5370527 48.829908
141 0 0.17606803 0 0.17606803 33.286136
142 0 -0.47155579 0 -0.47155579 28.843352
143 0 -0.78220993 0 -0.78220993 26.612272
144 0 -0.99344036 0 -0.99344036 25.130357
145 0 -1.0847272 0 -1.0847272 24.47808
146 0 -1.186227 0 -1.186227 23.697138
147 0 -1.2660315 0 -1.2660315 23.115022
148 0 -1.3035228 0 -1.3035228 22.861414
149 0 -1.3383511 0 -1.3383511 22.607288
150 0 -1.3718092 0 -1.3718092 22.359069
Loop time of 0.032371 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.53705265492 -1.33835106775 -1.37180924768
Force two-norm initial, final = 10219.1 550.71
Force max component initial, final = 936.507 270.239
Final line search alpha, max atom move = 0.000417128 0.112724
Iterations, force evaluations = 10 15
Pair time (%) = 0.0216274 (66.811)
Neigh time (%) = 0.00362229 (11.1899)
Comm time (%) = 0.000267029 (0.8249)
Outpt time (%) = 0.00395203 (12.2085)
Other time (%) = 0.00290227 (8.96563)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 560 ave 560 max 560 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28716 ave 28716 max 28716 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28716
Ave neighs/atom = 11.4864
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
150 0 2.6511871 0 2.6511871 49.579345
151 0 0.33433869 0 0.33433869 34.351907
152 0 -0.60798579 0 -0.60798579 27.870213
153 0 -0.90621268 0 -0.90621268 25.71559
154 0 -1.1119344 0 -1.1119344 24.25877
155 0 -1.2040479 0 -1.2040479 23.599805
156 0 -1.2883766 0 -1.2883766 22.946371
157 0 -1.3343075 0 -1.3343075 22.614259
158 0 -1.3847759 0 -1.3847759 22.268602
159 0 -1.4145217 0 -1.4145217 22.049306
160 0 -1.4380243 0 -1.4380243 21.876406
Loop time of 0.0337121 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.65118714313 -1.41452169876 -1.4380243142
Force two-norm initial, final = 10492.4 416.256
Force max component initial, final = 1101.81 185.859
Final line search alpha, max atom move = 0.000596367 0.11084
Iterations, force evaluations = 10 15
Pair time (%) = 0.0216587 (64.2459)
Neigh time (%) = 0.00483203 (14.3332)
Comm time (%) = 0.000329733 (0.978083)
Outpt time (%) = 0.00391579 (11.6154)
Other time (%) = 0.00297594 (8.8275)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 562 ave 562 max 562 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28825 ave 28825 max 28825 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28825
Ave neighs/atom = 11.53
Neighbor list builds = 4
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
160 0 2.445485 0 2.445485 48.062267
161 0 0.13902608 0 0.13902608 32.876214
162 0 -0.75396797 0 -0.75396797 26.718868
163 0 -1.0071611 0 -1.0071611 24.885772
164 0 -1.1819905 0 -1.1819905 23.629123
165 0 -1.2798339 0 -1.2798339 22.901472
166 0 -1.3806808 0 -1.3806808 22.158356
167 0 -1.4560709 0 -1.4560709 21.602465
168 0 -1.5015178 0 -1.5015178 21.265259
169 0 -1.5465541 0 -1.5465541 20.930655
170 0 -1.5820884 0 -1.5820884 20.657365
Loop time of 0.023509 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.44548496469 -1.54655408621 -1.58208836572
Force two-norm initial, final = 10220.9 661.104
Force max component initial, final = 1044.13 354.281
Final line search alpha, max atom move = 0.0011882 0.420955
Iterations, force evaluations = 10 10
Pair time (%) = 0.014415 (61.317)
Neigh time (%) = 0.00240922 (10.2481)
Comm time (%) = 0.000192642 (0.819439)
Outpt time (%) = 0.00393844 (16.7529)
Other time (%) = 0.0025537 (10.8626)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 561 ave 561 max 561 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28706 ave 28706 max 28706 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28706
Ave neighs/atom = 11.4824
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
170 0 2.5358609 0 2.5358609 48.866435
171 0 0.063716631 0 0.063716631 32.5423
172 0 -0.53442893 0 -0.53442893 28.41204
173 0 -0.80688389 0 -0.80688389 26.509346
174 0 -1.0715541 0 -1.0715541 24.564023
175 0 -1.2254796 0 -1.2254796 23.462464
176 0 -1.3022249 0 -1.3022249 22.903025
177 0 -1.3771632 0 -1.3771632 22.333129
178 0 -1.4452028 0 -1.4452028 21.833035
179 0 -1.4781077 0 -1.4781077 21.596868
180 0 -1.5024534 0 -1.5024534 21.415156
Loop time of 0.0293849 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.53586089291 -1.47810766266 -1.50245335744
Force two-norm initial, final = 9891.64 448.294
Force max component initial, final = 808.125 120.102
Final line search alpha, max atom move = 0.00131125 0.157484
Iterations, force evaluations = 10 13
Pair time (%) = 0.0187562 (63.8293)
Neigh time (%) = 0.00361609 (12.306)
Comm time (%) = 0.000254869 (0.86735)
Outpt time (%) = 0.00398111 (13.5482)
Other time (%) = 0.00277662 (9.44916)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 567 ave 567 max 567 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28775 ave 28775 max 28775 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28775
Ave neighs/atom = 11.51
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
180 0 2.7212223 0 2.7212223 50.026136
181 0 0.2721017 0 0.2721017 33.925894
182 0 -0.42116779 0 -0.42116779 29.157245
183 0 -0.62932201 0 -0.62932201 27.728916
184 0 -0.84115646 0 -0.84115646 26.20046
185 0 -1.0503044 0 -1.0503044 24.685348
186 0 -1.1625169 0 -1.1625169 23.879002
187 0 -1.2479996 0 -1.2479996 23.257233
188 0 -1.359242 0 -1.359242 22.442562
189 0 -1.4157422 0 -1.4157422 22.034518
190 0 -1.4675347 0 -1.4675347 21.652723
Loop time of 0.024842 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.72122227536 -1.41574216788 -1.46753473448
Force two-norm initial, final = 10306.2 415.803
Force max component initial, final = 951.75 103.59
Final line search alpha, max atom move = 0.00089633 0.0928507
Iterations, force evaluations = 10 10
Pair time (%) = 0.0144215 (58.0527)
Neigh time (%) = 0.00360799 (14.5237)
Comm time (%) = 0.00022912 (0.922309)
Outpt time (%) = 0.00392914 (15.8165)
Other time (%) = 0.00265431 (10.6848)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 570 ave 570 max 570 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28849 ave 28849 max 28849 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28849
Ave neighs/atom = 11.5396
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 3.23183 Mbytes
Step Temp E_pair E_mol TotEng Press
190 0 2.0041133 0 2.0041133 44.903366
191 0 -0.13802828 0 -0.13802828 30.796936
192 0 -0.65552855 0 -0.65552855 27.241047
193 0 -0.8720148 0 -0.8720148 25.714407
194 0 -1.0652883 0 -1.0652883 24.338022
195 0 -1.2191873 0 -1.2191873 23.225018
196 0 -1.3593543 0 -1.3593543 22.192069
197 0 -1.4397147 0 -1.4397147 21.604815
198 0 -1.5433079 0 -1.5433079 20.81796
199 0 -1.6127297 0 -1.6127297 20.314889
200 0 -1.6540467 0 -1.6540467 19.995791
Loop time of 0.024853 on 1 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.00411330956 -1.61272973671 -1.65404673311
Force two-norm initial, final = 9931.07 717.032
Force max component initial, final = 976.09 300.507
Final line search alpha, max atom move = 0.00121807 0.366038
Iterations, force evaluations = 10 10
Pair time (%) = 0.0144005 (57.9427)
Neigh time (%) = 0.00363111 (14.6104)
Comm time (%) = 0.000238895 (0.961234)
Outpt time (%) = 0.00396919 (15.9707)
Other time (%) = 0.00261331 (10.5151)
Nlocal: 2500 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 572 ave 572 max 572 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 28888 ave 28888 max 28888 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 28888
Ave neighs/atom = 11.5552
Neighbor list builds = 3
Dangerous builds = 0

View File

@ -1,978 +0,0 @@
LAMMPS (20 Sep 2010)
units lj
dimension 2
atom_style atomic
read_data data.lammps
orthogonal box = (0 0 -0.5) to (50 50 0.5)
2 by 2 by 1 processor grid
2500 atoms
mass * 1.0
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.2
pair_coeff 1 1 1.0 1.0
pair_coeff 2 2 1.0 1.0
pair_coeff 3 3 1.0 1.0
pair_coeff 4 4 1.0 1.0
pair_coeff 5 5 1.0 1.0
pair_coeff 6 6 1.0 1.0
pair_coeff 7 7 1.0 1.0
pair_coeff 8 8 1.0 1.0
pair_coeff 9 9 1.0 1.0
pair_coeff 10 10 1.0 1.0
pair_coeff 11 11 1.0 1.0
pair_coeff 12 12 1.0 1.0
pair_coeff 13 13 1.0 1.0
pair_coeff 14 14 1.0 1.0
pair_coeff 15 15 1.0 1.0
pair_coeff 16 16 1.0 1.0
pair_coeff 17 17 1.0 1.0
pair_coeff 18 18 1.0 1.0
pair_coeff 19 19 1.0 1.0
pair_coeff 20 20 1.0 1.0
pair_coeff 21 21 1.0 1.0
pair_coeff 22 22 1.0 1.0
pair_coeff 23 23 1.0 1.0
pair_coeff 24 24 1.0 1.0
pair_coeff 25 25 1.0 1.0
pair_coeff 26 26 1.0 1.0
pair_coeff 27 27 1.0 1.0
pair_coeff 28 28 1.0 1.0
pair_coeff 29 29 1.0 1.0
pair_coeff 30 30 1.0 1.0
pair_coeff 31 31 1.0 1.0
pair_coeff 32 32 1.0 1.0
pair_coeff 33 33 1.0 1.0
pair_coeff 34 34 1.0 1.0
pair_coeff 35 35 1.0 1.0
pair_coeff 36 36 1.0 1.0
pair_coeff 37 37 1.0 1.0
pair_coeff 38 38 1.0 1.0
pair_coeff 39 39 1.0 1.0
pair_coeff 40 40 1.0 1.0
pair_coeff 41 41 1.0 1.0
pair_coeff 42 42 1.0 1.0
pair_coeff 43 43 1.0 1.0
pair_coeff 44 44 1.0 1.0
pair_coeff 45 45 1.0 1.0
pair_coeff 46 46 1.0 1.0
pair_coeff 47 47 1.0 1.0
pair_coeff 48 48 1.0 1.0
pair_coeff 49 49 1.0 1.0
pair_coeff 50 50 1.0 1.0
pair_coeff 51 51 1.0 1.0
pair_coeff 52 52 1.0 1.0
pair_coeff 53 53 1.0 1.0
pair_coeff 54 54 1.0 1.0
pair_coeff 55 55 1.0 1.0
pair_coeff 56 56 1.0 1.0
pair_coeff 57 57 1.0 1.0
pair_coeff 58 58 1.0 1.0
pair_coeff 59 59 1.0 1.0
pair_coeff 60 60 1.0 1.0
pair_coeff 61 61 1.0 1.0
pair_coeff 62 62 1.0 1.0
pair_coeff 63 63 1.0 1.0
pair_coeff 64 64 1.0 1.0
pair_coeff 65 65 1.0 1.0
pair_coeff 66 66 1.0 1.0
pair_coeff 67 67 1.0 1.0
pair_coeff 68 68 1.0 1.0
pair_coeff 69 69 1.0 1.0
pair_coeff 70 70 1.0 1.0
pair_coeff 71 71 1.0 1.0
pair_coeff 72 72 1.0 1.0
pair_coeff 73 73 1.0 1.0
pair_coeff 74 74 1.0 1.0
pair_coeff 75 75 1.0 1.0
pair_coeff 76 76 1.0 1.0
pair_coeff 77 77 1.0 1.0
pair_coeff 78 78 1.0 1.0
pair_coeff 79 79 1.0 1.0
pair_coeff 80 80 1.0 1.0
pair_coeff 81 81 1.0 1.0
pair_coeff 82 82 1.0 1.0
pair_coeff 83 83 1.0 1.0
pair_coeff 84 84 1.0 1.0
pair_coeff 85 85 1.0 1.0
pair_coeff 86 86 1.0 1.0
pair_coeff 87 87 1.0 1.0
pair_coeff 88 88 1.0 1.0
pair_coeff 89 89 1.0 1.0
pair_coeff 90 90 1.0 1.0
pair_coeff 91 91 1.0 1.0
pair_coeff 92 92 1.0 1.0
pair_coeff 93 93 1.0 1.0
pair_coeff 94 94 1.0 1.0
pair_coeff 95 95 1.0 1.0
pair_coeff 96 96 1.0 1.0
pair_coeff 97 97 1.0 1.0
pair_coeff 98 98 1.0 1.0
pair_coeff 99 99 1.0 1.0
pair_coeff 100 100 1.0 1.0
compute da all displace/atom
dump 1 all atom 10 dump.md
thermo 1
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.38041 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 10.131397 0 10.131397 102.99334
1 0 3.6579372 0 3.6579372 60.806825
2 0 3.0194933 0 3.0194933 56.563578
3 0 2.6605525 0 2.6605525 54.146441
4 0 2.3447848 0 2.3447848 51.988246
5 0 2.1836205 0 2.1836205 50.890593
6 0 2.0595616 0 2.0595616 50.04605
7 0 1.9519103 0 1.9519103 49.305129
8 0 1.8797312 0 1.8797312 48.806422
9 0 1.8230663 0 1.8230663 48.416594
10 0 1.7618168 0 1.7618168 47.996459
Loop time of 0.0110788 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
10.1313968863 1.82306625671 1.76181675684
Force two-norm initial, final = 14853.2 526.892
Force max component initial, final = 333.003 154.237
Final line search alpha, max atom move = 0.000395826 0.0610509
Iterations, force evaluations = 10 13
Pair time (%) = 0.00475216 (42.894)
Neigh time (%) = 0.000601947 (5.43331)
Comm time (%) = 0.000704825 (6.3619)
Outpt time (%) = 0.00406402 (36.6828)
Other time (%) = 0.00095588 (8.62798)
Nlocal: 625 ave 637 max 614 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Nghost: 275 ave 286 max 263 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 6779.25 ave 6929 max 6631 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 27117
Ave neighs/atom = 10.8468
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
10 0 13.855576 0 13.855576 126.15022
11 0 5.7386317 0 5.7386317 73.911894
12 0 4.0429707 0 4.0429707 62.56527
13 0 3.238614 0 3.238614 57.109892
14 0 2.4884651 0 2.4884651 51.940745
15 0 2.0958214 0 2.0958214 49.224795
16 0 1.7591268 0 1.7591268 46.897029
17 0 1.5877871 0 1.5877871 45.70491
18 0 1.4739685 0 1.4739685 44.914787
19 0 1.3756748 0 1.3756748 44.231775
20 0 1.2785795 0 1.2785795 43.556465
Loop time of 0.00972104 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
13.8555761196 1.37567476183 1.27857945482
Force two-norm initial, final = 24008.6 733.281
Force max component initial, final = 1448.98 310.85
Final line search alpha, max atom move = 0.000383264 0.119137
Iterations, force evaluations = 10 10
Pair time (%) = 0.00366753 (37.7278)
Neigh time (%) = 0.00060153 (6.18792)
Comm time (%) = 0.000533044 (5.48341)
Outpt time (%) = 0.00406796 (41.8469)
Other time (%) = 0.000850976 (8.75395)
Nlocal: 625 ave 632 max 615 min
Histogram: 1 0 0 0 0 1 0 0 1 1
Nghost: 275.25 ave 286 max 268 min
Histogram: 1 1 0 0 1 0 0 0 0 1
Neighs: 6797.25 ave 6867 max 6672 min
Histogram: 1 0 0 0 0 0 0 1 1 1
Total # of neighbors = 27189
Ave neighs/atom = 10.8756
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
20 0 10.453807 0 10.453807 103.11677
21 0 4.5433221 0 4.5433221 65.082297
22 0 3.5146935 0 3.5146935 58.258548
23 0 2.7050903 0 2.7050903 52.787155
24 0 2.2715623 0 2.2715623 49.814091
25 0 1.8851407 0 1.8851407 47.159818
26 0 1.5708258 0 1.5708258 44.974429
27 0 1.3890196 0 1.3890196 43.713996
28 0 1.1909213 0 1.1909213 42.334449
29 0 1.0181817 0 1.0181817 41.121861
30 0 0.89407363 0 0.89407363 40.258139
Loop time of 0.00972056 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
10.4538071086 1.01818167882 0.894073631386
Force two-norm initial, final = 20855.2 876.637
Force max component initial, final = 1585.58 289.452
Final line search alpha, max atom move = 0.000300637 0.0870201
Iterations, force evaluations = 10 10
Pair time (%) = 0.0036599 (37.6511)
Neigh time (%) = 0.000605106 (6.22501)
Comm time (%) = 0.000532508 (5.47816)
Outpt time (%) = 0.00407463 (41.9177)
Other time (%) = 0.000848413 (8.72802)
Nlocal: 625 ave 633 max 617 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 276.5 ave 285 max 267 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Neighs: 6819 ave 6928 max 6722 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 27276
Ave neighs/atom = 10.9104
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
30 0 9.1773214 0 9.1773214 94.19218
31 0 3.780095 0 3.780095 59.351334
32 0 2.1364679 0 2.1364679 48.310641
33 0 1.5981147 0 1.5981147 44.596478
34 0 1.1616454 0 1.1616454 41.559729
35 0 0.82858726 0 0.82858726 39.203145
36 0 0.57784722 0 0.57784722 37.453531
37 0 0.42911635 0 0.42911635 36.369814
38 0 0.32649469 0 0.32649469 35.641902
39 0 0.21091983 0 0.21091983 34.858374
40 0 0.13674971 0 0.13674971 34.342788
Loop time of 0.0113723 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
9.17732138819 0.210919834207 0.136749713408
Force two-norm initial, final = 19618.6 632.262
Force max component initial, final = 1558.39 105.69
Final line search alpha, max atom move = 0.000486832 0.0514532
Iterations, force evaluations = 10 13
Pair time (%) = 0.00474131 (41.6916)
Neigh time (%) = 0.000910223 (8.00384)
Comm time (%) = 0.000718117 (6.3146)
Outpt time (%) = 0.00406992 (35.788)
Other time (%) = 0.000932753 (8.20195)
Nlocal: 625 ave 632 max 610 min
Histogram: 1 0 0 0 0 0 0 1 0 2
Nghost: 279 ave 294 max 271 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Neighs: 6869.75 ave 6965 max 6677 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Total # of neighbors = 27479
Ave neighs/atom = 10.9916
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
40 0 7.1357354 0 7.1357354 80.465664
41 0 3.1864291 0 3.1864291 54.920554
42 0 1.4411303 0 1.4411303 43.175349
43 0 1.0616102 0 1.0616102 40.558071
44 0 0.76898007 0 0.76898007 38.515614
45 0 0.47634294 0 0.47634294 36.448833
46 0 0.26562782 0 0.26562782 34.961948
47 0 0.084628043 0 0.084628043 33.672837
48 0 -0.075634194 0 -0.075634194 32.547959
49 0 -0.21169535 0 -0.21169535 31.582919
50 0 -0.32991627 0 -0.32991627 30.759427
Loop time of 0.0101295 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
7.13573543411 -0.211695347593 -0.329916269499
Force two-norm initial, final = 16348.9 558.411
Force max component initial, final = 1619.91 103.265
Final line search alpha, max atom move = 0.000372042 0.038419
Iterations, force evaluations = 10 10
Pair time (%) = 0.00363976 (35.9324)
Neigh time (%) = 0.000923395 (9.11594)
Comm time (%) = 0.000634491 (6.26383)
Outpt time (%) = 0.00405943 (40.0756)
Other time (%) = 0.000872374 (8.61225)
Nlocal: 625 ave 635 max 614 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Nghost: 280.25 ave 290 max 270 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Neighs: 6920.75 ave 7090 max 6722 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Total # of neighbors = 27683
Ave neighs/atom = 11.0732
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
50 0 5.7268723 0 5.7268723 70.877358
51 0 2.3773675 0 2.3773675 49.119056
52 0 0.80808652 0 0.80808652 38.523549
53 0 0.44195199 0 0.44195199 35.94379
54 0 0.15030268 0 0.15030268 33.910057
55 0 -0.022616619 0 -0.022616619 32.66055
56 0 -0.16415793 0 -0.16415793 31.640965
57 0 -0.29891652 0 -0.29891652 30.708285
58 0 -0.39214086 0 -0.39214086 30.03762
59 0 -0.53733622 0 -0.53733622 28.975916
60 0 -0.63338854 0 -0.63338854 28.306058
Loop time of 0.0101259 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
5.72687232351 -0.537336224163 -0.633388542189
Force two-norm initial, final = 14623.1 1106.47
Force max component initial, final = 1563.21 227.768
Final line search alpha, max atom move = 0.000202951 0.0462258
Iterations, force evaluations = 10 10
Pair time (%) = 0.00362068 (35.7568)
Neigh time (%) = 0.000916481 (9.05088)
Comm time (%) = 0.000623763 (6.16009)
Outpt time (%) = 0.00407392 (40.2327)
Other time (%) = 0.00089103 (8.79953)
Nlocal: 625 ave 634 max 620 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Nghost: 282.75 ave 289 max 274 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 6997.75 ave 7188 max 6858 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Total # of neighbors = 27991
Ave neighs/atom = 11.1964
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
60 0 4.8209421 0 4.8209421 64.493854
61 0 2.5903199 0 2.5903199 50.078603
62 0 0.62828159 0 0.62828159 36.984599
63 0 0.28949259 0 0.28949259 34.641614
64 0 -0.031290153 0 -0.031290153 32.384853
65 0 -0.31225362 0 -0.31225362 30.379734
66 0 -0.50896819 0 -0.50896819 28.973548
67 0 -0.6583808 0 -0.6583808 27.894862
68 0 -0.79610742 0 -0.79610742 26.903809
69 0 -0.90468984 0 -0.90468984 26.119699
70 0 -0.97482926 0 -0.97482926 25.617752
Loop time of 0.0100566 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.82094210064 -0.90468984047 -0.974829264854
Force two-norm initial, final = 14460.6 700.095
Force max component initial, final = 2632.26 308.176
Final line search alpha, max atom move = 0.000565214 0.174185
Iterations, force evaluations = 10 10
Pair time (%) = 0.00361013 (35.8983)
Neigh time (%) = 0.000908792 (9.03681)
Comm time (%) = 0.000601768 (5.98384)
Outpt time (%) = 0.00405914 (40.3631)
Other time (%) = 0.000876725 (8.71794)
Nlocal: 625 ave 635 max 619 min
Histogram: 1 1 0 1 0 0 0 0 0 1
Nghost: 288 ave 294 max 281 min
Histogram: 1 0 1 0 0 0 0 0 0 2
Neighs: 7080.5 ave 7261 max 7005 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Total # of neighbors = 28322
Ave neighs/atom = 11.3288
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
70 0 4.3980829 0 4.3980829 61.741323
71 0 1.1235581 0 1.1235581 40.327809
72 0 0.21759714 0 0.21759714 34.129029
73 0 -0.081434306 0 -0.081434306 32.015966
74 0 -0.31232862 0 -0.31232862 30.394279
75 0 -0.52674206 0 -0.52674206 28.841997
76 0 -0.73501424 0 -0.73501424 27.343739
77 0 -0.84805615 0 -0.84805615 26.519545
78 0 -0.93529547 0 -0.93529547 25.886702
79 0 -0.99512174 0 -0.99512174 25.451552
80 0 -1.0566397 0 -1.0566397 25.00379
Loop time of 0.00964552 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.39808292997 -0.995121736021 -1.05663967552
Force two-norm initial, final = 12965.9 449.496
Force max component initial, final = 1092.61 161.966
Final line search alpha, max atom move = 0.000603976 0.0978234
Iterations, force evaluations = 10 10
Pair time (%) = 0.00359595 (37.281)
Neigh time (%) = 0.000608981 (6.31361)
Comm time (%) = 0.000535309 (5.54982)
Outpt time (%) = 0.00404197 (41.9051)
Other time (%) = 0.000863314 (8.95041)
Nlocal: 625 ave 632 max 621 min
Histogram: 1 1 1 0 0 0 0 0 0 1
Nghost: 288 ave 293 max 286 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Neighs: 7081.25 ave 7231 max 6981 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 28325
Ave neighs/atom = 11.33
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
80 0 4.0361093 0 4.0361093 59.222435
81 0 1.3881798 0 1.3881798 41.953718
82 0 0.27963712 0 0.27963712 34.458153
83 0 -0.027669726 0 -0.027669726 32.363955
84 0 -0.37030062 0 -0.37030062 29.921155
85 0 -0.60764833 0 -0.60764833 28.255858
86 0 -0.7571808 0 -0.7571808 27.195934
87 0 -0.89460643 0 -0.89460643 26.202321
88 0 -0.96080854 0 -0.96080854 25.741651
89 0 -1.0194781 0 -1.0194781 25.324323
90 0 -1.0637943 0 -1.0637943 24.9965
Loop time of 0.011281 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
4.03610925969 -1.01947808117 -1.06379432983
Force two-norm initial, final = 12187.6 545.261
Force max component initial, final = 1351.69 92.3548
Final line search alpha, max atom move = 0.000969229 0.089513
Iterations, force evaluations = 10 13
Pair time (%) = 0.00468087 (41.4934)
Neigh time (%) = 0.000908017 (8.04907)
Comm time (%) = 0.000690103 (6.11738)
Outpt time (%) = 0.00406533 (36.037)
Other time (%) = 0.000936687 (8.30322)
Nlocal: 625 ave 630 max 621 min
Histogram: 1 0 1 0 0 1 0 0 0 1
Nghost: 287 ave 291 max 283 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 7107.5 ave 7201 max 7017 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 28430
Ave neighs/atom = 11.372
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
90 0 3.125718 0 3.125718 52.867181
91 0 0.53212967 0 0.53212967 35.875122
92 0 -0.34301857 0 -0.34301857 29.831322
93 0 -0.58227566 0 -0.58227566 28.151184
94 0 -0.79489064 0 -0.79489064 26.623007
95 0 -0.97574132 0 -0.97574132 25.298929
96 0 -1.0874104 0 -1.0874104 24.482683
97 0 -1.1719839 0 -1.1719839 23.862768
98 0 -1.2392036 0 -1.2392036 23.370772
99 0 -1.2964836 0 -1.2964836 22.943244
100 0 -1.3478016 0 -1.3478016 22.567963
Loop time of 0.0100085 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
3.12571795497 -1.29648360341 -1.34780157905
Force two-norm initial, final = 11455.2 530.242
Force max component initial, final = 1096.07 260.447
Final line search alpha, max atom move = 0.00055639 0.14491
Iterations, force evaluations = 10 10
Pair time (%) = 0.00359488 (35.9184)
Neigh time (%) = 0.000907004 (9.06238)
Comm time (%) = 0.000564218 (5.63741)
Outpt time (%) = 0.00406653 (40.6309)
Other time (%) = 0.000875831 (8.75091)
Nlocal: 625 ave 631 max 620 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Nghost: 290.5 ave 295 max 286 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 7139.5 ave 7236 max 7066 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Total # of neighbors = 28558
Ave neighs/atom = 11.4232
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0 3.0454477 0 3.0454477 52.211737
101 0 0.37762092 0 0.37762092 34.686047
102 0 -0.14581755 0 -0.14581755 31.107565
103 0 -0.42841021 0 -0.42841021 29.141463
104 0 -0.68603979 0 -0.68603979 27.322046
105 0 -0.83601254 0 -0.83601254 26.242466
106 0 -0.99162456 0 -0.99162456 25.109576
107 0 -1.1100134 0 -1.1100134 24.238707
108 0 -1.2082157 0 -1.2082157 23.525014
109 0 -1.2572245 0 -1.2572245 23.158233
110 0 -1.2876903 0 -1.2876903 22.942477
Loop time of 0.0104213 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
3.04544768565 -1.25722445623 -1.28769033314
Force two-norm initial, final = 10849.1 693.937
Force max component initial, final = 904.387 236.839
Final line search alpha, max atom move = 0.000117815 0.027903
Iterations, force evaluations = 10 11
Pair time (%) = 0.00395721 (37.9724)
Neigh time (%) = 0.000905097 (8.68508)
Comm time (%) = 0.000644565 (6.18508)
Outpt time (%) = 0.00403285 (38.6982)
Other time (%) = 0.000881553 (8.45916)
Nlocal: 625 ave 635 max 619 min
Histogram: 1 1 0 1 0 0 0 0 0 1
Nghost: 293.75 ave 299 max 288 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Neighs: 7176 ave 7351 max 7047 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Total # of neighbors = 28704
Ave neighs/atom = 11.4816
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
110 0 2.7373476 0 2.7373476 49.916663
111 0 0.27920158 0 0.27920158 33.800547
112 0 -0.20019322 0 -0.20019322 30.51445
113 0 -0.53185019 0 -0.53185019 28.218677
114 0 -0.83093001 0 -0.83093001 26.099658
115 0 -1.0242231 0 -1.0242231 24.694823
116 0 -1.1583232 0 -1.1583232 23.729291
117 0 -1.2254882 0 -1.2254882 23.24164
118 0 -1.3210175 0 -1.3210175 22.517164
119 0 -1.3863132 0 -1.3863132 22.040913
120 0 -1.4387351 0 -1.4387351 21.669171
Loop time of 0.0104623 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.7373476491 -1.38631322727 -1.43873508821
Force two-norm initial, final = 10875.8 508.589
Force max component initial, final = 982.141 152.413
Final line search alpha, max atom move = 0.000364516 0.0555571
Iterations, force evaluations = 10 11
Pair time (%) = 0.00395113 (37.7655)
Neigh time (%) = 0.000912726 (8.72396)
Comm time (%) = 0.000642478 (6.1409)
Outpt time (%) = 0.0040555 (38.763)
Other time (%) = 0.000900447 (8.6066)
Nlocal: 625 ave 635 max 616 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Nghost: 294.25 ave 298 max 289 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Neighs: 7165.25 ave 7309 max 7016 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Total # of neighbors = 28661
Ave neighs/atom = 11.4644
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
120 0 2.231579 0 2.231579 46.568479
121 0 0.10623671 0 0.10623671 32.582536
122 0 -0.57013613 0 -0.57013613 27.925741
123 0 -0.82491886 0 -0.82491886 26.137265
124 0 -1.032301 0 -1.032301 24.65678
125 0 -1.1759783 0 -1.1759783 23.611572
126 0 -1.2850499 0 -1.2850499 22.811747
127 0 -1.3713102 0 -1.3713102 22.160066
128 0 -1.421629 0 -1.421629 21.795582
129 0 -1.4557009 0 -1.4557009 21.556374
130 0 -1.4891168 0 -1.4891168 21.308053
Loop time of 0.0116708 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.23157898725 -1.45570090921 -1.48911681685
Force two-norm initial, final = 9946.33 373.99
Force max component initial, final = 1003.46 83.0505
Final line search alpha, max atom move = 0.000997891 0.0828754
Iterations, force evaluations = 10 13
Pair time (%) = 0.00466818 (39.9989)
Neigh time (%) = 0.0012086 (10.3558)
Comm time (%) = 0.00076437 (6.54944)
Outpt time (%) = 0.00407952 (34.955)
Other time (%) = 0.000950098 (8.14084)
Nlocal: 625 ave 634 max 616 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 294 ave 306 max 285 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Neighs: 7190.25 ave 7319 max 7089 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Total # of neighbors = 28761
Ave neighs/atom = 11.5044
Neighbor list builds = 4
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
130 0 2.468581 0 2.468581 48.058148
131 0 0.23907579 0 0.23907579 33.424381
132 0 -0.57700066 0 -0.57700066 27.834841
133 0 -0.78159015 0 -0.78159015 26.389175
134 0 -0.96518905 0 -0.96518905 25.087162
135 0 -1.1206213 0 -1.1206213 23.938563
136 0 -1.2989415 0 -1.2989415 22.649792
137 0 -1.3646103 0 -1.3646103 22.169499
138 0 -1.404925 0 -1.404925 21.882639
139 0 -1.4453211 0 -1.4453211 21.590433
140 0 -1.476957 0 -1.476957 21.357542
Loop time of 0.0113478 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.46858101287 -1.44532109888 -1.47695704321
Force two-norm initial, final = 10339.5 404.157
Force max component initial, final = 1066.37 79.792
Final line search alpha, max atom move = 0.000516371 0.0412023
Iterations, force evaluations = 10 13
Pair time (%) = 0.00466353 (41.0964)
Neigh time (%) = 0.000915468 (8.06738)
Comm time (%) = 0.000732958 (6.45905)
Outpt time (%) = 0.00408739 (36.0193)
Other time (%) = 0.000948429 (8.35785)
Nlocal: 625 ave 630 max 619 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Nghost: 297 ave 308 max 291 min
Histogram: 1 1 1 0 0 0 0 0 0 1
Neighs: 7210.75 ave 7293 max 7157 min
Histogram: 2 0 0 0 0 1 0 0 0 1
Total # of neighbors = 28843
Ave neighs/atom = 11.5372
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
140 0 2.0546282 0 2.0546282 45.130766
141 0 0.063378271 0 0.063378271 32.071438
142 0 -0.75409628 0 -0.75409628 26.467076
143 0 -0.99188685 0 -0.99188685 24.768145
144 0 -1.1940141 0 -1.1940141 23.325743
145 0 -1.2863947 0 -1.2863947 22.657693
146 0 -1.4049088 0 -1.4049088 21.739543
147 0 -1.4732876 0 -1.4732876 21.244532
148 0 -1.5158202 0 -1.5158202 20.947078
149 0 -1.5492971 0 -1.5492971 20.696077
150 0 -1.5871773 0 -1.5871773 20.409409
Loop time of 0.0121427 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.05462816117 -1.54929713072 -1.58717731498
Force two-norm initial, final = 10054.9 460.349
Force max component initial, final = 1183.37 112.406
Final line search alpha, max atom move = 0.00067079 0.0754006
Iterations, force evaluations = 10 15
Pair time (%) = 0.00539333 (44.4164)
Neigh time (%) = 0.000901341 (7.42293)
Comm time (%) = 0.000786364 (6.47605)
Outpt time (%) = 0.00408238 (33.6202)
Other time (%) = 0.000979245 (8.0645)
Nlocal: 625 ave 627 max 622 min
Histogram: 1 0 0 0 1 0 0 0 0 2
Nghost: 296 ave 303 max 292 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Neighs: 7215.25 ave 7277 max 7165 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Total # of neighbors = 28861
Ave neighs/atom = 11.5444
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
150 0 2.1501435 0 2.1501435 45.854873
151 0 0.062697748 0 0.062697748 32.113988
152 0 -0.57549777 0 -0.57549777 27.750033
153 0 -0.81350196 0 -0.81350196 26.101995
154 0 -1.0553818 0 -1.0553818 24.383326
155 0 -1.205518 0 -1.205518 23.2918
156 0 -1.3021965 0 -1.3021965 22.595323
157 0 -1.3787145 0 -1.3787145 22.023536
158 0 -1.4667773 0 -1.4667773 21.377314
159 0 -1.4990249 0 -1.4990249 21.14571
160 0 -1.5341333 0 -1.5341333 20.884655
Loop time of 0.00999629 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
2.15014348714 -1.49902493946 -1.53413332365
Force two-norm initial, final = 9686.62 465.976
Force max component initial, final = 1025.75 184.146
Final line search alpha, max atom move = 0.000606717 0.111725
Iterations, force evaluations = 10 10
Pair time (%) = 0.00359142 (35.9275)
Neigh time (%) = 0.000905931 (9.06267)
Comm time (%) = 0.000550687 (5.50891)
Outpt time (%) = 0.00407612 (40.7763)
Other time (%) = 0.000872135 (8.72458)
Nlocal: 625 ave 626 max 624 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 299.5 ave 310 max 292 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Neighs: 7219.25 ave 7277 max 7155 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 28877
Ave neighs/atom = 11.5508
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
160 0 1.8339319 0 1.8339319 43.573087
161 0 -0.15883074 0 -0.15883074 30.485408
162 0 -0.88242605 0 -0.88242605 25.479286
163 0 -1.1028834 0 -1.1028834 23.930039
164 0 -1.3016251 0 -1.3016251 22.476064
165 0 -1.4075881 0 -1.4075881 21.708433
166 0 -1.4998541 0 -1.4998541 21.023248
167 0 -1.5636502 0 -1.5636502 20.550491
168 0 -1.6221148 0 -1.6221148 20.112975
169 0 -1.6633102 0 -1.6633102 19.808456
170 0 -1.6929671 0 -1.6929671 19.579216
Loop time of 0.00960529 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
1.83393187932 -1.66331017868 -1.69296708374
Force two-norm initial, final = 9975.74 475.873
Force max component initial, final = 1076.81 151.665
Final line search alpha, max atom move = 0.00168794 0.256002
Iterations, force evaluations = 10 10
Pair time (%) = 0.00359744 (37.4527)
Neigh time (%) = 0.000600696 (6.2538)
Comm time (%) = 0.000516534 (5.3776)
Outpt time (%) = 0.00404549 (42.1173)
Other time (%) = 0.000845134 (8.79863)
Nlocal: 625 ave 627 max 624 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Nghost: 299 ave 310 max 288 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 7226.25 ave 7297 max 7198 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Total # of neighbors = 28905
Ave neighs/atom = 11.562
Neighbor list builds = 2
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
170 0 1.5622987 0 1.5622987 41.627713
171 0 0.20405372 0 0.20405372 32.73863
172 0 -0.87631997 0 -0.87631997 25.414386
173 0 -1.1619584 0 -1.1619584 23.352987
174 0 -1.353506 0 -1.353506 21.988949
175 0 -1.4434669 0 -1.4434669 21.346794
176 0 -1.5075839 0 -1.5075839 20.864291
177 0 -1.5868121 0 -1.5868121 20.27149
178 0 -1.6381464 0 -1.6381464 19.889565
179 0 -1.6692179 0 -1.6692179 19.666555
180 0 -1.70156 0 -1.70156 19.42945
Loop time of 0.0104083 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
1.56229868243 -1.66921787874 -1.70155998988
Force two-norm initial, final = 9429.39 308.746
Force max component initial, final = 1781.79 76.8635
Final line search alpha, max atom move = 0.000837531 0.0643756
Iterations, force evaluations = 10 11
Pair time (%) = 0.00394958 (37.9463)
Neigh time (%) = 0.00091511 (8.79208)
Comm time (%) = 0.000590384 (5.67222)
Outpt time (%) = 0.00406975 (39.1008)
Other time (%) = 0.00088352 (8.48857)
Nlocal: 625 ave 627 max 622 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 298.75 ave 305 max 293 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Neighs: 7228.75 ave 7301 max 7151 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 28915
Ave neighs/atom = 11.566
Neighbor list builds = 3
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
180 0 1.4799149 0 1.4799149 41.115498
181 0 -0.45828563 0 -0.45828563 28.305878
182 0 -1.1127382 0 -1.1127382 23.745438
183 0 -1.326884 0 -1.326884 22.195474
184 0 -1.4902693 0 -1.4902693 21.002247
185 0 -1.5727487 0 -1.5727487 20.392828
186 0 -1.6421989 0 -1.6421989 19.854484
187 0 -1.668478 0 -1.668478 19.66228
188 0 -1.7142757 0 -1.7142757 19.337461
189 0 -1.7362458 0 -1.7362458 19.174238
190 0 -1.7533386 0 -1.7533386 19.045232
Loop time of 0.0124404 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
1.47991489058 -1.73624575112 -1.75333859414
Force two-norm initial, final = 9131.13 352.22
Force max component initial, final = 915.834 100.035
Final line search alpha, max atom move = 0.000889415 0.0889726
Iterations, force evaluations = 10 15
Pair time (%) = 0.00540572 (43.4528)
Neigh time (%) = 0.00120753 (9.70649)
Comm time (%) = 0.000805736 (6.47674)
Outpt time (%) = 0.00404233 (32.4934)
Other time (%) = 0.000979125 (7.8705)
Nlocal: 625 ave 631 max 619 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 299.5 ave 307 max 290 min
Histogram: 1 0 0 1 0 0 0 0 1 1
Neighs: 7246.5 ave 7330 max 7147 min
Histogram: 1 0 1 0 0 0 0 0 0 2
Total # of neighbors = 28986
Ave neighs/atom = 11.5944
Neighbor list builds = 4
Dangerous builds = 0
minimize 0.001 0.001 10 1000
WARNING: Resetting reneighboring criteria during minimization
Memory usage per processor = 2.70701 Mbytes
Step Temp E_pair E_mol TotEng Press
190 0 1.4561992 0 1.4561992 40.970034
191 0 -0.12263898 0 -0.12263898 30.588401
192 0 -0.96453383 0 -0.96453383 24.824541
193 0 -1.1578126 0 -1.1578126 23.461941
194 0 -1.3007191 0 -1.3007191 22.433693
195 0 -1.4026624 0 -1.4026624 21.70126
196 0 -1.4911196 0 -1.4911196 21.053133
197 0 -1.5634641 0 -1.5634641 20.530531
198 0 -1.5924275 0 -1.5924275 20.314473
199 0 -1.6303321 0 -1.6303321 20.023515
200 0 -1.6520498 0 -1.6520498 19.869534
Loop time of 0.0108462 on 4 procs for 10 steps with 2500 atoms
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
1.45619920225 -1.63033211038 -1.6520498229
Force two-norm initial, final = 8836.39 561.478
Force max component initial, final = 1227.95 211.829
Final line search alpha, max atom move = 0.000129991 0.0275358
Iterations, force evaluations = 10 12
Pair time (%) = 0.00431436 (39.7777)
Neigh time (%) = 0.000909686 (8.38714)
Comm time (%) = 0.000643253 (5.93068)
Outpt time (%) = 0.00406945 (37.5196)
Other time (%) = 0.000909448 (8.38494)
Nlocal: 625 ave 626 max 623 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 298 ave 306 max 290 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 7246.75 ave 7320 max 7153 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Total # of neighbors = 28987
Ave neighs/atom = 11.5948
Neighbor list builds = 3
Dangerous builds = 0

View File

@ -1,271 +0,0 @@
SPPARKS (26 Aug 2010)
# SPPARKS input for coupling MD/MC
seed 56789
app_style potts/strain 100
dimension 2
lattice sq/8n 1.0
region box block 0 50 0 50 -0.5 0.5
create_box box
Created box = (0 0 -0.5) to (50 50 0.5)
1 by 1 by 1 processor grid
create_sites box
Creating sites ...
2500 sites
2500 sites have 8 neighbors
set site range 1 100
2500 settings made for site
set d1 value 0.0
2500 settings made for d1
sector yes
solve_style tree
diag_style energy
temperature 1.0
stats 10.0
dump 1 10.0 dump.mc
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
0 0 0 0 0 19766
10 17929 0 0 0.0776 5324
Loop time of 0.077626 on 1 procs
Solve time (%) = 0.0070138 (9.03537)
Update time (%) = 0 (0)
Comm time (%) = 0.00146031 (1.88122)
Outpt time (%) = 0.00415015 (5.34634)
App time (%) = 0.0614648 (79.1807)
Other time (%) = 0.00353694 (4.55639)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
10 17929 0 0 0 5324
20 24582 0 0 0.0279 4750
Loop time of 0.027904 on 1 procs
Solve time (%) = 0.00271416 (9.72676)
Update time (%) = 0 (0)
Comm time (%) = 0.000461817 (1.65502)
Outpt time (%) = 0.0042181 (15.1165)
App time (%) = 0.0191288 (68.5521)
Other time (%) = 0.00138116 (4.94967)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
20 24582 0 0 0 4750
30 30109 0 0 0.0234 4264
Loop time of 0.023375 on 1 procs
Solve time (%) = 0.00216675 (9.2695)
Update time (%) = 0 (0)
Comm time (%) = 0.000342846 (1.46672)
Outpt time (%) = 0.00416183 (17.8046)
App time (%) = 0.015563 (66.5796)
Other time (%) = 0.00114059 (4.87954)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
30 30109 0 0 0 4264
40 35032 0 0 0.0208 3880
Loop time of 0.020786 on 1 procs
Solve time (%) = 0.0019896 (9.57182)
Update time (%) = 0 (0)
Comm time (%) = 0.000316858 (1.52438)
Outpt time (%) = 0.00413609 (19.8984)
App time (%) = 0.0132952 (63.962)
Other time (%) = 0.00104833 (5.04341)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
40 35032 0 0 0 3880
50 39543 0 0 0.0193 3784
Loop time of 0.0193491 on 1 procs
Solve time (%) = 0.00185919 (9.60865)
Update time (%) = 0 (0)
Comm time (%) = 0.000333071 (1.72138)
Outpt time (%) = 0.00412297 (21.3083)
App time (%) = 0.0120728 (62.3946)
Other time (%) = 0.000961065 (4.96698)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
50 39543 0 0 0 3784
60 43899 0 0 0.0187 3706
Loop time of 0.018713 on 1 procs
Solve time (%) = 0.00176001 (9.40526)
Update time (%) = 0 (0)
Comm time (%) = 0.000314474 (1.68051)
Outpt time (%) = 0.00413895 (22.118)
App time (%) = 0.0116153 (62.0706)
Other time (%) = 0.000884295 (4.72556)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
60 43899 0 0 0 3706
70 48028 0 0 0.0179 3392
Loop time of 0.0178769 on 1 procs
Solve time (%) = 0.00174618 (9.76781)
Update time (%) = 0 (0)
Comm time (%) = 0.000296593 (1.65909)
Outpt time (%) = 0.00414801 (23.2032)
App time (%) = 0.0108221 (60.5367)
Other time (%) = 0.000864029 (4.83322)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
70 48028 0 0 0 3392
80 51900 0 0 0.017 3208
Loop time of 0.0169971 on 1 procs
Solve time (%) = 0.00157022 (9.23819)
Update time (%) = 0 (0)
Comm time (%) = 0.000290394 (1.70849)
Outpt time (%) = 0.00416803 (24.522)
App time (%) = 0.0101995 (60.0076)
Other time (%) = 0.0007689 (4.52371)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
80 51900 0 0 0 3208
90 55450 0 0 0.0159 3162
Loop time of 0.0158761 on 1 procs
Solve time (%) = 0.0014894 (9.38143)
Update time (%) = 0 (0)
Comm time (%) = 0.000301123 (1.89671)
Outpt time (%) = 0.00413799 (26.0644)
App time (%) = 0.00924802 (58.2514)
Other time (%) = 0.00069952 (4.40613)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
90 55450 0 0 0 3162
100 59009 0 0 0.0158 3038
Loop time of 0.0158172 on 1 procs
Solve time (%) = 0.00142503 (9.00938)
Update time (%) = 0 (0)
Comm time (%) = 0.000296831 (1.87664)
Outpt time (%) = 0.00412178 (26.0589)
App time (%) = 0.00923729 (58.4004)
Other time (%) = 0.000736237 (4.65467)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
100 59009 0 0 0 3038
110 62454 0 0 0.0155 3066
Loop time of 0.015537 on 1 procs
Solve time (%) = 0.00135612 (8.72834)
Update time (%) = 0 (0)
Comm time (%) = 0.000293732 (1.89053)
Outpt time (%) = 0.00415182 (26.7221)
App time (%) = 0.00904965 (58.2457)
Other time (%) = 0.000685692 (4.41328)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
110 62454 0 0 0 3066
120 65856 0 0 0.0152 2898
Loop time of 0.0152211 on 1 procs
Solve time (%) = 0.00139046 (9.13505)
Update time (%) = 0 (0)
Comm time (%) = 0.000291109 (1.91253)
Outpt time (%) = 0.0041492 (27.2595)
App time (%) = 0.00871038 (57.2257)
Other time (%) = 0.00067997 (4.46728)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
120 65856 0 0 0 2898
130 68856 0 0 0.0137 2798
Loop time of 0.0137191 on 1 procs
Solve time (%) = 0.00130701 (9.52695)
Update time (%) = 0 (0)
Comm time (%) = 0.000296831 (2.16364)
Outpt time (%) = 0.00412989 (30.1032)
App time (%) = 0.00742817 (54.1448)
Other time (%) = 0.000557184 (4.06138)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
130 68856 0 0 0 2798
140 71967 0 0 0.0141 2700
Loop time of 0.0141542 on 1 procs
Solve time (%) = 0.00132799 (9.38232)
Update time (%) = 0 (0)
Comm time (%) = 0.000284195 (2.00785)
Outpt time (%) = 0.00414681 (29.2974)
App time (%) = 0.00777125 (54.9042)
Other time (%) = 0.000623941 (4.40817)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
140 71967 0 0 0 2700
150 74773 0 0 0.0132 2644
Loop time of 0.0131881 on 1 procs
Solve time (%) = 0.00111961 (8.48956)
Update time (%) = 0 (0)
Comm time (%) = 0.000277758 (2.10612)
Outpt time (%) = 0.00413394 (31.3459)
App time (%) = 0.00711513 (53.951)
Other time (%) = 0.000541687 (4.10738)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
150 74773 0 0 0 2644
160 77662 0 0 0.0134 2616
Loop time of 0.0133891 on 1 procs
Solve time (%) = 0.00116301 (8.68621)
Update time (%) = 0 (0)
Comm time (%) = 0.00027895 (2.08341)
Outpt time (%) = 0.00415397 (31.025)
App time (%) = 0.00725079 (54.1544)
Other time (%) = 0.000542402 (4.05107)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
160 77662 0 0 0 2616
170 80475 0 0 0.0131 2538
Loop time of 0.0130939 on 1 procs
Solve time (%) = 0.00108457 (8.28296)
Update time (%) = 0 (0)
Comm time (%) = 0.000279665 (2.13583)
Outpt time (%) = 0.00412798 (31.5259)
App time (%) = 0.00700951 (53.5324)
Other time (%) = 0.000592232 (4.52294)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
170 80475 0 0 0 2538
180 83407 0 0 0.0136 2660
Loop time of 0.0136199 on 1 procs
Solve time (%) = 0.00117278 (8.61079)
Update time (%) = 0 (0)
Comm time (%) = 0.000262737 (1.92907)
Outpt time (%) = 0.00413108 (30.3312)
App time (%) = 0.0074532 (54.7229)
Other time (%) = 0.0006001 (4.40605)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
180 83407 0 0 0 2660
190 86407 0 0 0.0138 2656
Loop time of 0.013859 on 1 procs
Solve time (%) = 0.00119066 (8.59124)
Update time (%) = 0 (0)
Comm time (%) = 0.000247717 (1.7874)
Outpt time (%) = 0.00414801 (29.93)
App time (%) = 0.00767326 (55.3665)
Other time (%) = 0.000599384 (4.32486)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
190 86407 0 0 0 2656
200 88961 0 0 0.0124 2374
Loop time of 0.0124531 on 1 procs
Solve time (%) = 0.00107932 (8.6671)
Update time (%) = 0 (0)
Comm time (%) = 0.000233412 (1.87433)
Outpt time (%) = 0.00414777 (33.3072)
App time (%) = 0.00648117 (52.0447)
Other time (%) = 0.000511408 (4.10668)

View File

@ -1,271 +0,0 @@
SPPARKS (26 Aug 2010)
# SPPARKS input for coupling MD/MC
seed 56789
app_style potts/strain 100
dimension 2
lattice sq/8n 1.0
region box block 0 50 0 50 -0.5 0.5
create_box box
Created box = (0 0 -0.5) to (50 50 0.5)
2 by 2 by 1 processor grid
create_sites box
Creating sites ...
2500 sites
2500 sites have 8 neighbors
set site range 1 100
2500 settings made for site
set d1 value 0.0
2500 settings made for d1
sector yes
solve_style tree
diag_style energy
temperature 1.0
stats 10.0
dump 1 10.0 dump.mc
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
0 0 0 0 0 19766
10 18695 0 0 0.024 5696
Loop time of 0.0240365 on 4 procs
Solve time (%) = 0.0017997 (7.48737)
Update time (%) = 0 (0)
Comm time (%) = 0.00263286 (10.9536)
Outpt time (%) = 0.00416493 (17.3275)
App time (%) = 0.0137114 (57.044)
Other time (%) = 0.00172764 (7.18756)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
10 18695 0 0 0 5696
20 25730 0 0 0.0114 4806
Loop time of 0.0113776 on 4 procs
Solve time (%) = 0.000647247 (5.68877)
Update time (%) = 0 (0)
Comm time (%) = 0.0013088 (11.5033)
Outpt time (%) = 0.00417346 (36.6812)
App time (%) = 0.0045594 (40.0733)
Other time (%) = 0.000688732 (6.05338)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
20 25730 0 0 0 4806
30 31377 0 0 0.00985 4308
Loop time of 0.00987101 on 4 procs
Solve time (%) = 0.000525832 (5.32704)
Update time (%) = 0 (0)
Comm time (%) = 0.0012235 (12.3949)
Outpt time (%) = 0.00415748 (42.1181)
App time (%) = 0.00343758 (34.825)
Other time (%) = 0.000526607 (5.33489)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
30 31377 0 0 0 4308
40 36311 0 0 0.0092 3840
Loop time of 0.00922179 on 4 procs
Solve time (%) = 0.000474155 (5.14168)
Update time (%) = 0 (0)
Comm time (%) = 0.00114667 (12.4344)
Outpt time (%) = 0.00414455 (44.943)
App time (%) = 0.00293165 (31.7905)
Other time (%) = 0.000524759 (5.69043)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
40 36311 0 0 0 3840
50 40886 0 0 0.00891 3534
Loop time of 0.0089252 on 4 procs
Solve time (%) = 0.000433862 (4.86109)
Update time (%) = 0 (0)
Comm time (%) = 0.00113171 (12.68)
Outpt time (%) = 0.00413698 (46.3517)
App time (%) = 0.00268257 (30.0561)
Other time (%) = 0.000540078 (6.05116)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
50 40886 0 0 0 3534
60 44888 0 0 0.00831 3306
Loop time of 0.00833625 on 4 procs
Solve time (%) = 0.000383437 (4.59963)
Update time (%) = 0 (0)
Comm time (%) = 0.00106108 (12.7285)
Outpt time (%) = 0.00413227 (49.5699)
App time (%) = 0.00232393 (27.8774)
Other time (%) = 0.000435531 (5.22455)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
60 44888 0 0 0 3306
70 48583 0 0 0.00806 3078
Loop time of 0.0080772 on 4 procs
Solve time (%) = 0.000353277 (4.37375)
Update time (%) = 0 (0)
Comm time (%) = 0.00102895 (12.739)
Outpt time (%) = 0.0041061 (50.8357)
App time (%) = 0.00213838 (26.4742)
Other time (%) = 0.000450492 (5.57732)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
70 48583 0 0 0 3078
80 52044 0 0 0.00786 2998
Loop time of 0.00788343 on 4 procs
Solve time (%) = 0.000333607 (4.23175)
Update time (%) = 0 (0)
Comm time (%) = 0.00101435 (12.8669)
Outpt time (%) = 0.00420028 (53.2799)
App time (%) = 0.00198931 (25.234)
Other time (%) = 0.000345886 (4.3875)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
80 52044 0 0 0 2998
90 55352 0 0 0.0083 3026
Loop time of 0.00831437 on 4 procs
Solve time (%) = 0.00031358 (3.77154)
Update time (%) = 0 (0)
Comm time (%) = 0.00111783 (13.4445)
Outpt time (%) = 0.00467652 (56.2462)
App time (%) = 0.00188529 (22.6751)
Other time (%) = 0.00032115 (3.86259)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
90 55352 0 0 0 3026
100 58494 0 0 0.00733 2742
Loop time of 0.00735307 on 4 procs
Solve time (%) = 0.000297189 (4.0417)
Update time (%) = 0 (0)
Comm time (%) = 0.000801027 (10.8938)
Outpt time (%) = 0.00418043 (56.8529)
App time (%) = 0.00179577 (24.422)
Other time (%) = 0.000278652 (3.7896)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
100 58494 0 0 0 2742
110 61498 0 0 0.00732 2636
Loop time of 0.00733691 on 4 procs
Solve time (%) = 0.000282168 (3.84587)
Update time (%) = 0 (0)
Comm time (%) = 0.000877678 (11.9625)
Outpt time (%) = 0.00414526 (56.4987)
App time (%) = 0.00174332 (23.7609)
Other time (%) = 0.000288486 (3.93199)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
110 61498 0 0 0 2636
120 64343 0 0 0.00719 2518
Loop time of 0.00721735 on 4 procs
Solve time (%) = 0.000270605 (3.74937)
Update time (%) = 0 (0)
Comm time (%) = 0.00078702 (10.9046)
Outpt time (%) = 0.00416625 (57.7254)
App time (%) = 0.00162369 (22.497)
Other time (%) = 0.000369787 (5.12359)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
120 64343 0 0 0 2518
130 66901 0 0 0.00709 2480
Loop time of 0.00710845 on 4 procs
Solve time (%) = 0.000247002 (3.47476)
Update time (%) = 0 (0)
Comm time (%) = 0.00100386 (14.1221)
Outpt time (%) = 0.00416774 (58.6307)
App time (%) = 0.00143349 (20.166)
Other time (%) = 0.00025636 (3.60641)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
130 66901 0 0 0 2480
140 69617 0 0 0.00723 2462
Loop time of 0.00725645 on 4 procs
Solve time (%) = 0.000262678 (3.61992)
Update time (%) = 0 (0)
Comm time (%) = 0.000848353 (11.691)
Outpt time (%) = 0.00418752 (57.7076)
App time (%) = 0.00152171 (20.9704)
Other time (%) = 0.000436187 (6.01102)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
140 69617 0 0 0 2462
150 72516 0 0 0.0077 2368
Loop time of 0.00771403 on 4 procs
Solve time (%) = 0.000279188 (3.61922)
Update time (%) = 0 (0)
Comm time (%) = 0.00115103 (14.9212)
Outpt time (%) = 0.00420797 (54.5495)
App time (%) = 0.00164241 (21.2911)
Other time (%) = 0.000433445 (5.61892)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
150 72516 0 0 0 2368
160 75201 0 0 0.00745 2384
Loop time of 0.00747746 on 4 procs
Solve time (%) = 0.00026089 (3.48901)
Update time (%) = 0 (0)
Comm time (%) = 0.00115579 (15.457)
Outpt time (%) = 0.00416714 (55.7293)
App time (%) = 0.0015164 (20.2796)
Other time (%) = 0.000377238 (5.045)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
160 75201 0 0 0 2384
170 77794 0 0 0.00709 2286
Loop time of 0.00710869 on 4 procs
Solve time (%) = 0.000261724 (3.68175)
Update time (%) = 0 (0)
Comm time (%) = 0.000879705 (12.3751)
Outpt time (%) = 0.00419676 (59.0371)
App time (%) = 0.00145274 (20.4362)
Other time (%) = 0.000317752 (4.46992)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
170 77794 0 0 0 2286
180 80320 0 0 0.00723 2186
Loop time of 0.00725418 on 4 procs
Solve time (%) = 0.000241041 (3.32279)
Update time (%) = 0 (0)
Comm time (%) = 0.00100464 (13.8491)
Outpt time (%) = 0.00413877 (57.0535)
App time (%) = 0.00144053 (19.8579)
Other time (%) = 0.000429213 (5.91677)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
180 80320 0 0 0 2186
190 82621 0 0 0.00686 2204
Loop time of 0.00687295 on 4 procs
Solve time (%) = 0.000222385 (3.23565)
Update time (%) = 0 (0)
Comm time (%) = 0.000860512 (12.5203)
Outpt time (%) = 0.00418121 (60.8357)
App time (%) = 0.00126499 (18.4053)
Other time (%) = 0.000343859 (5.00308)
run 10Setting up run ...
Time Naccept Nreject Nsweeps CPU Energy
190 82621 0 0 0 2204
200 84907 0 0 0.00681 2276
Loop time of 0.0068413 on 4 procs
Solve time (%) = 0.000233412 (3.4118)
Update time (%) = 0 (0)
Comm time (%) = 0.000758171 (11.0823)
Outpt time (%) = 0.00421125 (61.5562)
App time (%) = 0.00125164 (18.2953)
Other time (%) = 0.000386834 (5.65439)

View File

@ -1 +0,0 @@
#define SPKPATH /home/sjplimp/spparks

View File

@ -1,42 +0,0 @@
# Makefile for coupling library
SHELL = /bin/sh
# System-specific settings
CC = g++
CCFLAGS = -g -O -DMPICH_IGNORE_CXX_SEEK
DEPFLAGS = -M
LINK = g++
LINKFLAGS = -g -O
ARCHIVE = ar
ARFLAGS = -rc
SIZE = size
# Files
LIB = libcouple.a
SRC = $(wildcard *.cpp)
INC = $(wildcard *.h)
OBJ = $(SRC:.cpp=.o)
# Targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(LIB) $(OBJ)
clean:
rm $(LIB) *.o *.d
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
include $(DEPENDS)

View File

@ -1,16 +0,0 @@
This directory has a small collection of routines, useful for
exchanging data between 2 codes being run together as a coupled
application. It is used by the LAMMPS <-> Quest and LAMMPS <->
SPPARKS applications in 2 sister directories.
The library dir has a Makefile (which you may need to edit for your
box). If you type
g++ -f Makefile.g++
you should create libcouple.a, which the other coupled applications
link to.
Note that the library uses MPI, so the Makefile you use needs to
include a path to the MPI include file, if it is not someplace
the compiler will find it.

View File

@ -1,42 +0,0 @@
#include "mpi.h"
#include "stdlib.h"
#include "stdio.h"
#include "error.h"
/* ---------------------------------------------------------------------- */
Error::Error(MPI_Comm caller)
{
comm = caller;
MPI_Comm_rank(comm,&me);
}
/* ----------------------------------------------------------------------
called by all procs
------------------------------------------------------------------------- */
void Error::all(const char *str)
{
if (me == 0) printf("ERROR: %s\n",str);
MPI_Finalize();
exit(1);
}
/* ----------------------------------------------------------------------
called by one proc
------------------------------------------------------------------------- */
void Error::one(const char *str)
{
printf("ERROR on proc %d: %s\n",me,str);
MPI_Abort(comm,1);
}
/* ----------------------------------------------------------------------
called by one proc
------------------------------------------------------------------------- */
void Error::warning(const char *str)
{
printf("WARNING: %s\n",str);
}

View File

@ -1,19 +0,0 @@
#ifndef ERROR_H
#define ERROR_H
#include "mpi.h"
class Error {
public:
Error(MPI_Comm);
void all(const char *);
void one(const char *);
void warning(const char *);
private:
MPI_Comm comm;
int me;
};
#endif

View File

@ -1,48 +0,0 @@
#include "stdio.h"
#include "string.h"
#include "files.h"
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
void replace(char *file, char *header, int n, char **lines)
{
FILE *fpr = fopen(file,"r");
FILE *fpw = fopen("tmp.file","w");
char line[MAXLINE];
while (fgets(line,MAXLINE,fpr)) {
if (strstr(line,header)) {
fprintf(fpw,"%s",line);
for (int i = 0; i < n; i++) {
fgets(line,MAXLINE,fpr);
fprintf(fpw,"%s",lines[i]);
}
} else fprintf(fpw,"%s",line);
}
fclose(fpr);
fclose(fpw);
rename("tmp.file",file);
}
/* ---------------------------------------------------------------------- */
char **extract(char *file, char *header, int n, char **lines)
{
FILE *fp = fopen(file,"r");
char line[MAXLINE];
while (fgets(line,MAXLINE,fp)) {
if (strstr(line,header)) {
for (int i = 0; i < n; i++) {
fgets(line,MAXLINE,fp);
sprintf(lines[i],"%s",line);
}
break;
}
}
fclose(fp);
}

View File

@ -1,2 +0,0 @@
void replace(char *, char *, int, char **);
char **extract(char *, char *, int, char **);

View File

@ -1,393 +0,0 @@
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "irregular.h"
#include "memory.h"
#include "error.h"
#define MAX(A,B) ((A) > (B)) ? (A) : (B)
enum{UNSET,SET};
enum{NONE,SAME,VARYING};
/* ---------------------------------------------------------------------- */
Irregular::Irregular(MPI_Comm caller)
{
comm = caller;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
memory = new Memory(comm);
error = new Error(comm);
init();
patternflag = UNSET;
sizestyle = NONE;
}
/* ---------------------------------------------------------------------- */
Irregular::~Irregular()
{
delete memory;
delete error;
deallocate();
}
/* ----------------------------------------------------------------------
n = # of datums contributed by this proc
proclist = which proc each datum is to be sent to
------------------------------------------------------------------------- */
void Irregular::pattern(int n, int *proclist)
{
// free any previous irregular info
deallocate();
init();
patternflag = SET;
sizestyle = NONE;
ndatumsend = n;
// list = 1 for procs I send to, including self
// nrecv = # of messages I receive, not including self
// self = 0 if no data for self, 1 if there is
int *list = new int[nprocs];
int *counts = new int[nprocs];
for (int i = 0; i < nprocs; i++) {
list[i] = 0;
counts[i] = 1;
}
for (int i = 0; i < n; i++) list[proclist[i]] = 1;
MPI_Reduce_scatter(list,&nrecv,counts,MPI_INT,MPI_SUM,comm);
self = 0;
if (list[me]) self = 1;
if (self) nrecv--;
// storage for recv info, not including self
recvproc = new int[nrecv];
recvcount = new int[nrecv];
recvsize = new int[nrecv];
request = new MPI_Request[nrecv];
status = new MPI_Status[nrecv];
// list = # of datums to send to each proc, including self
// nsend = # of messages I send, not including self
for (int i = 0; i < nprocs; i++) list[i] = 0;
for (int i = 0; i < n; i++) list[proclist[i]]++;
nsend = 0;
for (int i = 0; i < nprocs; i++) if (list[i] > 0) nsend++;
if (self) nsend--;
// storage for send info, including self
sendproc = new int[nsend+self];
sendcount = new int[nsend+self];
sendsize = new int[nsend+self];
sendindices = (int *) memory->smalloc(n*sizeof(int),"sendindices");
// setup sendprocs and sendcounts, including self
// each proc begins with iproc > me, and continues until iproc = me
// list ends up with pointer to which send that proc is associated with
int iproc = me;
int isend = 0;
for (int i = 0; i < nprocs; i++) {
iproc++;
if (iproc == nprocs) iproc = 0;
if (list[iproc] > 0) {
sendproc[isend] = iproc;
sendcount[isend] = list[iproc];
list[iproc] = isend;
isend++;
}
}
// post all receives for datum counts
for (int i = 0; i < nrecv; i++)
MPI_Irecv(&recvcount[i],1,MPI_INT,MPI_ANY_SOURCE,0,comm,&request[i]);
// barrier to insure receives are posted
MPI_Barrier(comm);
// send each datum count, packing buf with needed datums
for (int i = 0; i < nsend; i++)
MPI_Send(&sendcount[i],1,MPI_INT,sendproc[i],0,comm);
// insure all MPI_ANY_SOURCE messages are received
// set recvproc
if (nrecv) MPI_Waitall(nrecv,request,status);
for (int i = 0; i < nrecv; i++) recvproc[i] = status[i].MPI_SOURCE;
// ndatumrecv = total datums received, including self
ndatumrecv = 0;
for (int i = 0; i < nrecv; i++)
ndatumrecv += recvcount[i];
if (self) ndatumrecv += sendcount[nsend];
// setup sendindices, including self
// counts = offset into sendindices for each proc I send to
// let sc0 = sendcount[0], sc1 = sendcount[1], etc
// sendindices[0:sc0-1] = indices of datums in 1st message
// sendindices[sc0:sc0+sc1-1] = indices of datums in 2nd message, etc
counts[0] = 0;
for (int i = 1; i < nsend+self; i++)
counts[i] = counts[i-1] + sendcount[i-1];
for (int i = 0; i < n; i++) {
isend = list[proclist[i]];
sendindices[counts[isend]++] = i;
}
// clean up
delete [] counts;
delete [] list;
}
/* ----------------------------------------------------------------------
n = size of each received datum
return total size in bytes of received data on this proc
------------------------------------------------------------------------- */
int Irregular::size(int n)
{
if (patternflag == UNSET) error->all("Cannot size without pattern");
sizestyle = SAME;
nsize = n;
nsendmax = 0;
for (int i = 0; i < nsend+self; i++) {
sendsize[i] = nsize * sendcount[i];
if (i < nsend) nsendmax = MAX(nsendmax,sendsize[i]);
}
for (int i = 0; i < nrecv; i++) recvsize[i] = nsize * recvcount[i];
nbytesrecv = nsize * ndatumrecv;
return nbytesrecv;
}
/* ----------------------------------------------------------------------
slength,rlength = size of each datum to send and recv
soffset = offset into eventual buffer of send data for each datum
soffset can be NULL, in which case will build sendoffset from slength
return total size in bytes of received data on this proc
------------------------------------------------------------------------- */
int Irregular::size(int *slength, int *soffset, int *rlength)
{
if (patternflag == UNSET) error->all("Cannot size without pattern");
sizestyle = VARYING;
// store local copy of pointers to send lengths/offsets
// if soffset not provided, create local copy from slength
sendsizedatum = slength;
if (soffset == NULL) {
sendoffsetflag = 1;
sendoffset = (int *) memory->smalloc(ndatumsend*sizeof(int),"sendoffset");
if (ndatumsend) sendoffset[0] = 0;
for (int i = 1; i < ndatumsend; i++)
sendoffset[i] = sendoffset[i-1] + sendsizedatum[i-1];
} else {
if (sendoffsetflag) memory->sfree(sendoffset);
sendoffsetflag = 0;
sendoffset = soffset;
}
nsendmax = 0;
int m = 0;
for (int i = 0; i < nsend+self; i++) {
sendsize[i] = 0;
for (int j = 0; j < sendcount[i]; j++)
sendsize[i] += sendsizedatum[sendindices[m++]];
if (i < nsend) nsendmax = MAX(nsendmax,sendsize[i]);
}
nbytesrecv = 0;
m = 0;
for (int i = 0; i < nrecv; i++) {
recvsize[i] = 0;
for (int j = 0; j < recvcount[i]; j++) recvsize[i] += rlength[m++];
nbytesrecv += recvsize[i];
}
if (self) nbytesrecv += sendsize[nsend];
return nbytesrecv;
}
/* ----------------------------------------------------------------------
wrapper on 2 versions of exchange
------------------------------------------------------------------------- */
void Irregular::exchange(char *sendbuf, char *recvbuf)
{
if (sizestyle == SAME) exchange_same(sendbuf,recvbuf);
else if (sizestyle == VARYING) exchange_varying(sendbuf,recvbuf);
else error->all("Irregular size was not set");
}
/* ----------------------------------------------------------------------
sendbuf = data to send
recvbuf = buffer to recv all data into
requires nsize,nsendmax,recvsize,sendsize be setup by size(int)
------------------------------------------------------------------------- */
void Irregular::exchange_same(char *sendbuf, char *recvbuf)
{
// post all receives
int recvoffset = 0;
for (int irecv = 0; irecv < nrecv; irecv++) {
MPI_Irecv(&recvbuf[recvoffset],recvsize[irecv],MPI_BYTE,
recvproc[irecv],0,comm,&request[irecv]);
recvoffset += recvsize[irecv];
}
// malloc buf for largest send
char *buf = (char *) memory->smalloc(nsendmax,"buf");
// barrier to insure receives are posted
MPI_Barrier(comm);
// send each message, packing buf with needed datums
int m = 0;
for (int isend = 0; isend < nsend; isend++) {
int bufoffset = 0;
for (int i = 0; i < sendcount[isend]; i++) {
memcpy(&buf[bufoffset],&sendbuf[nsize*sendindices[m++]],nsize);
bufoffset += nsize;
}
MPI_Send(buf,sendsize[isend],MPI_BYTE,sendproc[isend],0,comm);
}
// copy self data directly from sendbuf to recvbuf
if (self)
for (int i = 0; i < sendcount[nsend]; i++) {
memcpy(&recvbuf[recvoffset],&sendbuf[nsize*sendindices[m++]],nsize);
recvoffset += nsize;
}
// free send buffer
memory->sfree(buf);
// wait on all incoming messages
if (nrecv) MPI_Waitall(nrecv,request,status);
}
/* ----------------------------------------------------------------------
sendbuf = data to send
recvbuf = buffer to recv all data into
requires nsendmax,recvsize,sendsize,sendoffset,sendsizedatum
be setup by size(int *, int *)
------------------------------------------------------------------------- */
void Irregular::exchange_varying(char *sendbuf, char *recvbuf)
{
// post all receives
int recvoffset = 0;
for (int irecv = 0; irecv < nrecv; irecv++) {
MPI_Irecv(&recvbuf[recvoffset],recvsize[irecv],MPI_BYTE,
recvproc[irecv],0,comm,&request[irecv]);
recvoffset += recvsize[irecv];
}
// malloc buf for largest send
char *buf = (char *) memory->smalloc(nsendmax,"buf");
// barrier to insure receives are posted
MPI_Barrier(comm);
// send each message, packing buf with needed datums
int index;
int m = 0;
for (int isend = 0; isend < nsend; isend++) {
int bufoffset = 0;
for (int i = 0; i < sendcount[isend]; i++) {
index = sendindices[m++];
memcpy(&buf[bufoffset],&sendbuf[sendoffset[index]],sendsizedatum[index]);
bufoffset += sendsizedatum[index];
}
MPI_Send(buf,sendsize[isend],MPI_BYTE,sendproc[isend],0,comm);
}
// copy self data directly from sendbuf to recvbuf
if (self)
for (int i = 0; i < sendcount[nsend]; i++) {
index = sendindices[m++];
memcpy(&recvbuf[recvoffset],&sendbuf[sendoffset[index]],
sendsizedatum[index]);
recvoffset += sendsizedatum[index];
}
// free send buffer
memory->sfree(buf);
// wait on all incoming messages
if (nrecv) MPI_Waitall(nrecv,request,status);
}
/* ---------------------------------------------------------------------- */
void Irregular::init()
{
sendoffsetflag = 0;
sendproc = sendcount = sendsize = sendindices = NULL;
sendoffset = NULL;
recvproc = recvcount = recvsize = NULL;
request = NULL;
status = NULL;
}
/* ---------------------------------------------------------------------- */
void Irregular::deallocate()
{
delete [] sendproc;
delete [] sendcount;
delete [] sendsize;
memory->sfree(sendindices);
if (sendoffsetflag) memory->sfree(sendoffset);
delete [] recvproc;
delete [] recvcount;
delete [] recvsize;
delete [] request;
delete [] status;
}

View File

@ -1,58 +0,0 @@
#ifndef IRREGULAR_H
#define IRREGULAR_H
#include "mpi.h"
class Irregular {
public:
Irregular(MPI_Comm);
~Irregular();
void pattern(int, int *);
int size(int);
int size(int *, int *, int *);
void exchange(char *, char *);
private:
int me,nprocs;
int patternflag; // UNSET,SET
int sizestyle; // NONE,SAME,VARYING
int self; // 0 = no data to copy to self, 1 = yes
int ndatumsend; // # of datums to send w/ self
int ndatumrecv; // # of datums to recv w/ self
int nbytesrecv; // total bytes in received data w/ self
int nsend; // # of messages to send w/out self
int nrecv; // # of messages to recv w/out self
int nsendmax; // # of bytes in largest send message, w/out self
int *sendproc; // list of procs to send to w/out self
int *sendcount; // # of datums to send to each proc w/ self
int *sendsize; // # of bytes to send to each proc w/ self
int *sendindices; // indices of datums to send to each proc w/ self
int nsize; // size of every datum in bytes (SAME)
int *sendsizedatum; // bytes in each datum to send w/ self (VARYING)
int *sendoffset; // byte offset to where each datum starts w/ self
int sendoffsetflag; // 1 if allocated sendoffset, 0 if passed in
int *recvproc; // list of procs to recv from w/out self
int *recvcount; // # of datums to recv from each proc w/out self
int *recvsize; // # of bytes to recv from each proc w/out self
MPI_Request *request; // MPI requests for posted recvs
MPI_Status *status; // MPI statuses for Waitall
MPI_Comm comm; // MPI communicator for all communication
class Memory *memory;
class Error *error;
void exchange_same(char *, char *);
void exchange_varying(char *, char *);
void init();
void deallocate();
};
#endif

View File

@ -1,249 +0,0 @@
#include "stdlib.h"
#include "string.h"
#include "lammps_data_write.h"
#include "memory.h"
#include "error.h"
#define DELTA 4;
enum{INT,DOUBLE,DOUBLE2};
/* ---------------------------------------------------------------------- */
LAMMPSDataWrite::LAMMPSDataWrite(MPI_Comm caller_comm) : Send2One(caller_comm)
{
outfile = NULL;
nheader = maxheader = 0;
format = NULL;
headtype = NULL;
ihead = NULL;
dhead = NULL;
ddhead = NULL;
nper = maxper = 0;
atomtype = NULL;
ivec = NULL;
dvec = NULL;
stride = NULL;
}
/* ---------------------------------------------------------------------- */
LAMMPSDataWrite::~LAMMPSDataWrite()
{
delete [] outfile;
for (int i = 0; i < nheader; i++) delete [] format[i];
memory->sfree(format);
memory->sfree(headtype);
memory->sfree(ihead);
memory->sfree(dhead);
memory->destroy_2d_double_array(ddhead);
memory->sfree(atomtype);
memory->sfree(ivec);
memory->sfree(dvec);
memory->sfree(stride);
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::pre()
{
if (me == 0) {
fp = fopen(outfile,"w");
if (fp == NULL)
error->one("Could not open data_write output file");
}
if (me == 0) {
fprintf(fp,"%s","LAMMPS data file\n\n");
for (int i = 0; i < nheader; i++)
if (headtype[i] == INT) fprintf(fp,format[i],ihead[i]);
else if (headtype[i] == DOUBLE) fprintf(fp,format[i],dhead[i]);
else if (headtype[i] == DOUBLE2) fprintf(fp,format[i],
ddhead[i][0],ddhead[i][1]);
fprintf(fp,"\nAtoms\n\n");
}
}
/* ---------------------------------------------------------------------- */
int LAMMPSDataWrite::size()
{
return nper*nlocal*sizeof(double);
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::pack(char *cbuf)
{
int i,j;
double *dbuf = (double *) cbuf;
int m = 0;
for (i = 0; i < nlocal; i++)
for (j = 0; j < nper; j++) {
if (i == 0) {
if (atomtype[j] == 0)
printf("AT %d %d %p %d\n",
atomtype[j],stride[j],ivec[j],ivec[j][0]);
else
printf("AT %d %d %p %g\n",
atomtype[j],stride[j],dvec[j],dvec[j][0]);
}
if (atomtype[j] == INT) dbuf[m++] = ivec[j][i*stride[j]];
else if (atomtype[j] == DOUBLE) dbuf[m++] = dvec[j][i*stride[j]];
}
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::process(int nbuf, char *cbuf)
{
int i,j;
double *dbuf = (double *) cbuf;
int n = nbuf/nper/sizeof(double);
int m = 0;
for (i = 0; i < n; i++) {
for (j = 0; j < nper; j++) {
double dvalue = dbuf[m++];
if (atomtype[j] == INT) fprintf(fp,"%d ",static_cast<int> (dvalue));
else if (atomtype[j] == DOUBLE) fprintf(fp,"%g ",dvalue);
}
fprintf(fp,"\n");
}
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::post()
{
if (me == 0) fclose(fp);
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::file(char *fname)
{
int n = strlen(fname) + 1;
outfile = new char[n];
strcpy(outfile,fname);
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::header(char *str, int ivalue)
{
if (nheader == maxheader) grow_header();
int n = strlen(str) + 2;
format[nheader] = new char[n];
strcpy(format[nheader],str);
format[nheader][n-2] = '\n';
format[nheader][n-1] = '\0';
headtype[nheader] = INT;
ihead[nheader] = ivalue;
nheader++;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::header(char *str, double dvalue)
{
if (nheader == maxheader) grow_header();
int n = strlen(str) + 2;
format[nheader] = new char[n];
strcpy(format[nheader],str);
format[nheader][n-2] = '\n';
format[nheader][n-1] = '\0';
headtype[nheader] = DOUBLE;
dhead[nheader] = dvalue;
nheader++;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::header(char *str, double dvalue1, double dvalue2)
{
if (nheader == maxheader) grow_header();
int n = strlen(str) + 2;
format[nheader] = new char[n];
strcpy(format[nheader],str);
format[nheader][n-2] = '\n';
format[nheader][n-1] = '\0';
headtype[nheader] = DOUBLE2;
ddhead[nheader][0] = dvalue1;
ddhead[nheader][1] = dvalue2;
nheader++;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::atoms(int n)
{
nlocal = n;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::atoms(int *vec)
{
if (nper == maxper) grow_peratom();
atomtype[nper] = INT;
ivec[nper] = vec;
stride[nper] = 1;
nper++;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::atoms(double *vec)
{
if (nper == maxper) grow_peratom();
atomtype[nper] = DOUBLE;
dvec[nper] = vec;
stride[nper] = 1;
nper++;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::atoms(int n, double **vec)
{
if (nper+n >= maxper) grow_peratom();
for (int i = 0; i < n; i++) {
atomtype[nper] = DOUBLE;
dvec[nper] = &vec[0][i];
stride[nper] = n;
nper++;
}
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::grow_header()
{
int n = maxheader + DELTA;
format = (char **) memory->srealloc(format,n*sizeof(char *),"ldw:format");
headtype = (int *) memory->srealloc(headtype,n*sizeof(int),"ldw:headtype");
ihead = (int *) memory->srealloc(ihead,n*sizeof(int),"ldw:ihead");
dhead = (double *) memory->srealloc(dhead,n*sizeof(double),"ldw:dhead");
ddhead = memory->grow_2d_double_array(ddhead,n,2,"ldw:ddhead");
maxheader = n;
}
/* ---------------------------------------------------------------------- */
void LAMMPSDataWrite::grow_peratom()
{
int n = maxper + DELTA;
atomtype = (int *) memory->srealloc(atomtype,n*sizeof(int *),"ldw:atomtype");
ivec = (int **) memory->srealloc(ivec,n*sizeof(int *),"ldw:ihead");
dvec = (double **) memory->srealloc(dvec,n*sizeof(double *),"ldw:dhead");
stride = (int *) memory->srealloc(stride,n*sizeof(int *),"ldw:stride");
maxper = n;
}

View File

@ -1,48 +0,0 @@
#ifndef LAMMPS_DATA_WRITE_H
#define LAMMPS_DATA_WRITE_H
#include "send2one.h"
#include "stdio.h"
class LAMMPSDataWrite : public Send2One {
public:
LAMMPSDataWrite(MPI_Comm);
~LAMMPSDataWrite();
void pre();
int size();
void pack(char *);
void process(int, char *);
void post();
void file(char *);
void header(char *, int);
void header(char *, double);
void header(char *, double, double);
void atoms(int);
void atoms(int *);
void atoms(double *);
void atoms(int, double **);
private:
char *outfile;
int nlocal;
FILE *fp;
int nheader,maxheader;
char **format;
int *headtype,*ihead;
double *dhead;
double **ddhead;
int nper,maxper;
int *atomtype;
int **ivec;
double **dvec;
int *stride;
void grow_header();
void grow_peratom();
};
#endif

View File

@ -1,316 +0,0 @@
#include "mpi.h"
#include "many2many.h"
#include "irregular.h"
#include "memory.h"
#include "error.h"
#include <map>
#define MAX(A,B) ((A) > (B)) ? (A) : (B)
/* ---------------------------------------------------------------------- */
Many2Many::Many2Many(MPI_Comm caller)
{
comm = caller;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
memory = new Memory(comm);
error = new Error(comm);
src_own = dest_own = NULL;
src_off = dest_off = NULL;
src_iwork = dest_iwork = NULL;
src_dwork = dest_dwork = NULL;
irregular = NULL;
}
/* ---------------------------------------------------------------------- */
Many2Many::~Many2Many()
{
delete memory;
delete error;
deallocate();
}
/* ----------------------------------------------------------------------
create a many2many pattern, deallocating any previous pattern
each proc will contribute nsrc items with IDs listed in id_src
each proc will receive ndest items with IDs listed in id_dest
only sets up communication via rendezvous algorithm and Irregular class
if id_src does not match id_dest on all procs
------------------------------------------------------------------------- */
void Many2Many::setup(int nsrc, int *id_src, int ndest, int *id_dest)
{
int i,j,isrc,idest,nsend,nrecv;
int *proclist,*work;
std::map<int,int> hash;
std::map<int,int>::iterator loc;
// free any previous many2many info
deallocate();
// allocate on-proc and off-proc index lists
src_own =
(int *) memory->smalloc(nsrc*sizeof(int),"many2many:src_own");
dest_own =
(int *) memory->smalloc(ndest*sizeof(int),"many2many:dest_own");
src_off =
(int *) memory->smalloc(nsrc*sizeof(int),"many2many:src_off");
dest_off =
(int *) memory->smalloc(ndest*sizeof(int),"many2many:dest_off");
// store destination IDs in hash
for (int i = 0; i < ndest; i++)
hash.insert(std::pair<int,int> (id_dest[i],i));
// src_own, dest_own = list of IDs in both src and dest
// nsrc_off = list of IDs in src owned by other procs
nown = nsrc_off = 0;
nsrc_off = 0;
for (i = 0; i < nsrc; i++) {
loc = hash.find(id_src[i]);
if (loc != hash.end()) {
src_own[nown] = i;
dest_own[nown] = loc->second;
nown++;
} else src_off[nsrc_off++] = i;
}
// all done if all procs have one-to-one mapping of src and dest IDs
// else figure out irregular comm needed
int flag = 0;
if (nown == nsrc && nown == ndest) flag = 1;
int flagall;
MPI_Allreduce(&flag,&flagall,1,MPI_INT,MPI_MIN,comm);
if (flagall) return;
// ndest_off = list of IDs in dest owned by other procs
work = (int *) memory->smalloc(ndest*sizeof(int),"many2many:work");
for (i = 0; i < ndest; i++) work[i] = 0;
for (i = 0; i < nown; i++) work[dest_own[i]] = 1;
ndest_off = 0;
for (i = 0; i < ndest; i++)
if (work[i] == 0) dest_off[ndest_off++] = i;
memory->sfree(work);
// realloc off-proc arrays to smaller size
src_off = (int *)
memory->srealloc(src_off,nsrc_off*sizeof(int),"many2many:src_off");
dest_off = (int *)
memory->srealloc(dest_off,ndest_off*sizeof(int),"many2many:dest_off");
// send off-proc src and dest Datums to 3rd-party proc via irregular comm
// proc = ID % nprocs
nsend = nsrc_off + ndest_off;
proclist = new int[nsend];
Datum1 *send1 = new Datum1[nsend];
for (i = 0; i < nsrc_off; i++) {
proclist[i] = id_src[src_off[i]] % nprocs;
send1[i].id = id_src[src_off[i]];
send1[i].proc = me;
send1[i].index = src_off[i];
}
for (i = 0, j = nsrc_off; i < ndest_off; i++, j++) {
proclist[j] = id_dest[dest_off[i]] % nprocs;
send1[j].id = -id_dest[dest_off[i]];
send1[j].proc = me;
send1[j].index = dest_off[i];
}
irregular = new Irregular(comm);
irregular->pattern(nsend,proclist);
nrecv = irregular->size(sizeof(Datum1)) / sizeof(Datum1);
Datum1 *recv1 = new Datum1[nrecv];
irregular->exchange((char *) send1, (char *) recv1);
delete irregular;
delete [] proclist;
// as 3rd-party proc, now have matching pairs of off-proc IDs
// store src IDs (which are positive) in hash
// loop over dest IDs (which are negative) to find matches
// send match info back to src procs via a 2nd irregular comm
nsend = nrecv/2;
proclist = new int[nsend];
Datum2 *send2 = new Datum2[nsend];
nsend = 0;
hash.clear();
for (isrc = 0; isrc < nrecv; isrc++)
if (recv1[isrc].id > 0)
hash.insert(std::pair<int,int> (recv1[isrc].id,isrc));
for (idest = 0; idest < nrecv; idest++)
if (recv1[idest].id < 0) {
loc = hash.find(-recv1[idest].id);
if (loc != hash.end()) {
isrc = loc->second;
proclist[nsend] = recv1[isrc].proc;
send2[nsend].slocal = recv1[isrc].index;
send2[nsend].dlocal = recv1[idest].index;
send2[nsend].dproc = recv1[idest].proc;
nsend++;
} else error->one("Did not receive matching src/dest ID");
}
irregular = new Irregular(comm);
irregular->pattern(nsend,proclist);
nrecv = irregular->size(sizeof(Datum2)) / sizeof(Datum2);
Datum2 *recv2 = new Datum2[nrecv];
irregular->exchange((char *) send2, (char *) recv2);
delete irregular;
delete [] proclist;
// use list of received src->dest Datums to build final irregular commm
// irregular comm will communicate off-proc info from src to dest directly
// work = local indices of dest IDs to send initially
nsend = nrecv;
proclist = new int[nsend];
work = new int[nsend];
for (i = 0; i < nrecv; i++) {
src_off[i] = recv2[i].slocal;
work[i] = recv2[i].dlocal;
proclist[i] = recv2[i].dproc;
}
irregular = new Irregular(comm);
irregular->pattern(nsend,proclist);
// send receiver's local indices
// receiver stores them as indirection list in dest_off
nrecv = irregular->size(sizeof(int)) / sizeof(int);
irregular->exchange((char *) work, (char *) dest_off);
delete [] proclist;
delete [] work;
// create work arrays for data exchange of int/double data
src_iwork =
(int *) memory->smalloc(nsrc_off*sizeof(int),"many2many:src_iwork");
dest_iwork =
(int *) memory->smalloc(ndest_off*sizeof(int),"many2many:dest_iwork");
src_dwork =
(double *) memory->smalloc(nsrc_off*sizeof(double),"many2many:src_dwork");
dest_dwork =
(double *) memory->smalloc(ndest_off*sizeof(double),
"many2many:dest_dwork");
// clean up
delete [] send1;
delete [] recv1;
delete [] send2;
delete [] recv2;
// debug checks for full coverage of srd/dest - can delete eventually
work = new int[MAX(nsrc,ndest)];
for (i = 0; i < nsrc; i++) work[i] = 0;
for (i = 0; i < nown; i++) work[src_own[i]]++;
for (i = 0; i < nsrc_off; i++) work[src_off[i]]++;
for (i = 0; i < nsrc; i++)
if (work[i] != 1) {
char str[128];
sprintf(str,"BAD SRC %d: %d %d\n",me,i,work[i]);
error->one(str);
}
for (i = 0; i < ndest; i++) work[i] = 0;
for (i = 0; i < nown; i++) work[dest_own[i]]++;
for (i = 0; i < ndest_off; i++) work[dest_off[i]]++;
for (i = 0; i < ndest; i++)
if (work[i] != 1) {
char str[128];
sprintf(str,"BAD DEST %d: %d %d\n",me,i,work[i]);
error->one(str);
}
delete [] work;
}
/* ----------------------------------------------------------------------
transfer one src entity to dest entity, matched by IDs in create()
operates on an int vector
------------------------------------------------------------------------- */
void Many2Many::exchange(int *src, int *dest)
{
int i;
// copy on-proc info
for (i = 0; i < nown; i++)
dest[dest_own[i]] = src[src_own[i]];
// communicate off-proc info
// user src_off and dest_off to pack/unpack data
if (irregular) {
int nrecv = irregular->size(sizeof(int)) / sizeof(int);
for (i = 0; i < nsrc_off; i++) src_iwork[i] = src[src_off[i]];
irregular->exchange((char *) src_iwork, (char *) dest_iwork);
for (i = 0; i < ndest_off; i++) dest[dest_off[i]] = dest_iwork[i];
}
}
/* ----------------------------------------------------------------------
transfer one src entity to dest entity, matched by IDs in create()
operates on a double vector
------------------------------------------------------------------------- */
void Many2Many::exchange(double *src, double *dest)
{
int i;
// copy on-proc info
for (int i = 0; i < nown; i++)
dest[dest_own[i]] = src[src_own[i]];
// communicate off-proc info
// user src_off and dest_off to pack/unpack data
if (irregular) {
int nrecv = irregular->size(sizeof(double)) / sizeof(double);
for (i = 0; i < nsrc_off; i++) src_dwork[i] = src[src_off[i]];
irregular->exchange((char *) src_dwork, (char *) dest_dwork);
for (i = 0; i < ndest_off; i++) dest[dest_off[i]] = dest_dwork[i];
}
}
/* ---------------------------------------------------------------------- */
void Many2Many::deallocate()
{
memory->sfree(src_own);
memory->sfree(dest_own);
memory->sfree(src_off);
memory->sfree(dest_off);
memory->sfree(src_iwork);
memory->sfree(dest_iwork);
memory->sfree(src_dwork);
memory->sfree(dest_dwork);
delete irregular;
}

View File

@ -1,47 +0,0 @@
#ifndef MANY2MANY_H
#define MANY2MANY_H
#include "mpi.h"
class Many2Many {
public:
Many2Many(MPI_Comm);
~Many2Many();
void setup(int, int *, int, int *);
void exchange(int *, int *);
void exchange(double *, double *);
protected:
int me,nprocs;
MPI_Comm comm;
class Memory *memory;
class Error *error;
int nown; // # of IDs common to src and dest
int nsrc_off,ndest_off; // # of off-processor IDs
int *src_own,*dest_own; // indices of the owned IDs
int *src_off,*dest_off; // indices of the off-proc IDs
int *src_iwork,*dest_iwork; // work arrays for comm of ints
double *src_dwork,*dest_dwork; // work arrays for comm of doubles
class Irregular *irregular; // irregular comm from src->dest
struct Datum1 {
int id; // src or dest global ID
int proc; // owning proc
int index; // local index on owning proc
};
struct Datum2 {
int slocal; // local index of src ID on sending proc
int dlocal; // local index of dest ID on receiving proc
int dproc; // receiving proc
};
void deallocate();
};
#endif

View File

@ -1,103 +0,0 @@
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "many2one.h"
#include "memory.h"
/* ---------------------------------------------------------------------- */
Many2One::Many2One(MPI_Comm caller_comm)
{
comm = caller_comm;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
memory = new Memory(comm);
if (me == 0) {
counts = new int[nprocs];
multicounts = new int[nprocs];
displs = new int[nprocs];
multidispls = new int[nprocs];
} else counts = multicounts = displs = multidispls = NULL;
idall = NULL;
}
/* ---------------------------------------------------------------------- */
Many2One::~Many2One()
{
delete memory;
delete [] counts;
delete [] multicounts;
delete [] displs;
delete [] multidispls;
memory->sfree(idall);
}
/* ---------------------------------------------------------------------- */
void Many2One::setup(int nsrc_in, int *id, int ndest)
{
nsrc = nsrc_in;
MPI_Allreduce(&nsrc,&nall,1,MPI_INT,MPI_SUM,comm);
MPI_Gather(&nsrc,1,MPI_INT,counts,1,MPI_INT,0,comm);
if (me == 0) {
displs[0] = 0;
for (int i = 1; i < nprocs; i++)
displs[i] = displs[i-1] + counts[i-1];
}
// gather IDs into idall
idall = NULL;
if (me == 0)
idall = (int *) memory->smalloc(nall*sizeof(int),"many2one:idall");
MPI_Gatherv(id,nsrc,MPI_INT,idall,counts,displs,MPI_INT,0,comm);
}
/* ---------------------------------------------------------------------- */
void Many2One::gather(double *src, int n, double *dest)
{
int i,j,ii,jj,m;
if (me == 0)
for (int i = 0; i < nprocs; i++) {
multicounts[i] = n*counts[i];
multidispls[i] = n*displs[i];
}
// allgather src into desttmp
double *desttmp = NULL;
if (me == 0)
desttmp = (double *) memory->smalloc(n*nall*sizeof(double),
"many2one:idsttmp");
MPI_Gatherv(src,n*nsrc,MPI_DOUBLE,desttmp,multicounts,multidispls,
MPI_DOUBLE,0,comm);
// use idall to move datums from desttmp to dest
if (me == 0) {
if (n == 1)
for (i = 0; i < nall; i++) {
j = idall[i] - 1;
dest[j] = desttmp[i];
}
else
for (i = 0; i < nall; i++) {
j = idall[i] - 1;
ii = n*i;
jj = n*j;
for (m = 0; m < n; m++)
dest[jj++] = desttmp[ii++];
}
}
memory->sfree(desttmp);
}

View File

@ -1,25 +0,0 @@
#ifndef MANY2ONE_H
#define MANY2ONE_H
#include "mpi.h"
class Many2One {
public:
Many2One(MPI_Comm);
~Many2One();
void setup(int, int *, int);
void gather(double *, int, double *);
protected:
int me,nprocs;
MPI_Comm comm;
class Memory *memory;
int nsrc,nall;
int *counts,*multicounts;
int *displs,*multidispls;
int *idall;
};
#endif

View File

@ -1,120 +0,0 @@
#include "mpi.h"
#include "stdlib.h"
#include "stdio.h"
#include "memory.h"
#include "error.h"
/* ---------------------------------------------------------------------- */
Memory::Memory(MPI_Comm comm)
{
error = new Error(comm);
}
/* ---------------------------------------------------------------------- */
Memory::~Memory()
{
delete error;
}
/* ----------------------------------------------------------------------
safe malloc
------------------------------------------------------------------------- */
void *Memory::smalloc(int n, const char *name)
{
if (n == 0) return NULL;
void *ptr = malloc(n);
if (ptr == NULL) {
char str[128];
sprintf(str,"Failed to allocate %d bytes for array %s",n,name);
error->one(str);
}
return ptr;
}
/* ----------------------------------------------------------------------
safe free
------------------------------------------------------------------------- */
void Memory::sfree(void *ptr)
{
if (ptr == NULL) return;
free(ptr);
}
/* ----------------------------------------------------------------------
safe realloc
------------------------------------------------------------------------- */
void *Memory::srealloc(void *ptr, int n, const char *name)
{
if (n == 0) {
sfree(ptr);
return NULL;
}
ptr = realloc(ptr,n);
if (ptr == NULL) {
char str[128];
sprintf(str,"Failed to reallocate %d bytes for array %s",n,name);
error->one(str);
}
return ptr;
}
/* ----------------------------------------------------------------------
create a 2d double array
------------------------------------------------------------------------- */
double **Memory::create_2d_double_array(int n1, int n2, const char *name)
{
double *data = (double *) smalloc(n1*n2*sizeof(double),name);
double **array = (double **) smalloc(n1*sizeof(double *),name);
int n = 0;
for (int i = 0; i < n1; i++) {
array[i] = &data[n];
n += n2;
}
return array;
}
/* ----------------------------------------------------------------------
grow or shrink 1st dim of a 2d double array
last dim must stay the same
if either dim is 0, return NULL
------------------------------------------------------------------------- */
double **Memory::grow_2d_double_array(double **array,
int n1, int n2, const char *name)
{
if (array == NULL) return create_2d_double_array(n1,n2,name);
double *data = (double *) srealloc(array[0],n1*n2*sizeof(double),name);
array = (double **) srealloc(array,n1*sizeof(double *),name);
int n = 0;
for (int i = 0; i < n1; i++) {
array[i] = &data[n];
n += n2;
}
return array;
}
/* ----------------------------------------------------------------------
free a 2d double array
------------------------------------------------------------------------- */
void Memory::destroy_2d_double_array(double **array)
{
if (array == NULL) return;
sfree(array[0]);
sfree(array);
}

View File

@ -1,23 +0,0 @@
#ifndef MEMORY_H
#define MEMORY_H
#include "mpi.h"
class Memory {
public:
Memory(MPI_Comm);
~Memory();
void *smalloc(int n, const char *);
void sfree(void *);
void *srealloc(void *, int n, const char *name);
double **create_2d_double_array(int, int, const char *);
double **grow_2d_double_array(double **, int, int, const char *);
void destroy_2d_double_array(double **);
private:
class Error *error;
};
#endif

View File

@ -1,76 +0,0 @@
#include "mpi.h"
#include "one2many.h"
#include "memory.h"
#include <map>
/* ---------------------------------------------------------------------- */
One2Many::One2Many(MPI_Comm caller_comm)
{
comm = caller_comm;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
memory = new Memory(comm);
hash = new std::map<int,int>();
}
/* ---------------------------------------------------------------------- */
One2Many::~One2Many()
{
delete memory;
delete hash;
}
/* ---------------------------------------------------------------------- */
void One2Many::setup(int nsrc_in, int ndest, int *id)
{
nsrc = nsrc_in;
// store my local IDs in hash
hash->clear();
for (int i = 0; i < ndest; i++)
hash->insert(std::pair<int,int> (id[i],i));
}
/* ---------------------------------------------------------------------- */
void One2Many::scatter(double *src, int n, double *dest)
{
int i,j,k,m;
// allocate src on procs that don't have it
int flag = 0;
if (src == NULL) {
src = (double *) memory->smalloc(n*nsrc*sizeof(double),"one2many:src");
flag = 1;
}
// broadcast src from 0 to other procs
MPI_Bcast(src,n*nsrc,MPI_DOUBLE,0,comm);
// each proc loops over entire src
// if I own the global ID, copy src values into dest
std::map<int,int>::iterator loc;
for (m = 1; m <= nsrc; m++) {
loc = hash->find(m);
if (loc == hash->end()) continue;
i = n*loc->second;
j = 3*(m-1);
if (n == 1) dest[i] = src[j];
else
for (k = 0; k < n; k++)
dest[i++] = src[j++];
}
// free locally allocated src
if (flag) memory->sfree(src);
}

View File

@ -1,24 +0,0 @@
#ifndef ONE2MANY_H
#define ONE2MANY_H
#include "mpi.h"
#include <map>
class One2Many {
public:
One2Many(MPI_Comm);
~One2Many();
void setup(int, int, int *);
void scatter(double *, int, double *);
protected:
int me,nprocs;
MPI_Comm comm;
class Memory *memory;
std::map<int,int> *hash;
int nsrc;
};
#endif

View File

@ -1,83 +0,0 @@
#include "mpi.h"
#include "stdlib.h"
#include "stdio.h"
#include "send2one.h"
#include "memory.h"
#include "error.h"
/* ---------------------------------------------------------------------- */
Send2One::Send2One(MPI_Comm caller_comm)
{
comm = caller_comm;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&nprocs);
memory = new Memory(comm);
error = new Error(comm);
buf = NULL;
maxbuf = 0;
}
/* ---------------------------------------------------------------------- */
Send2One::~Send2One()
{
delete memory;
delete error;
memory->sfree(buf);
}
/* ---------------------------------------------------------------------- */
void Send2One::execute()
{
int nme,nmax,nsize,ping;
MPI_Status status;
MPI_Request request;
// pre-processing before ping loop
pre();
// nme = size of data I contribute, in bytes
// nmax = max size of data on any proc, in bytes
// reallocate buf if necessary
nme = size();
MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,comm);
if (nmax > maxbuf) {
maxbuf = nmax;
memory->sfree(buf);
buf = (char *) memory->smalloc(maxbuf,"foo:buf");
}
// pack my data into buf
pack(buf);
// proc 0 pings each proc, receives its data
// all other procs wait for ping, send their data to proc 0
// invoke process() to work with data
if (me == 0) {
for (int iproc = 0; iproc < nprocs; iproc++) {
if (iproc) {
MPI_Irecv(buf,maxbuf,MPI_CHAR,iproc,0,comm,&request);
MPI_Send(&ping,0,MPI_INT,iproc,0,comm);
MPI_Wait(&request,&status);
MPI_Get_count(&status,MPI_CHAR,&nsize);
} else nsize = nme;
process(nsize,buf);
}
} else {
MPI_Recv(&ping,0,MPI_INT,0,0,comm,&status);
MPI_Rsend(buf,nme,MPI_CHAR,0,0,comm);
}
post();
}

View File

@ -1,29 +0,0 @@
#ifndef SEND2ONE_H
#define SEND2ONE_H
#include "mpi.h"
class Send2One {
public:
Send2One(MPI_Comm);
virtual ~Send2One();
void execute();
protected:
int me,nprocs;
MPI_Comm comm;
class Memory *memory;
class Error *error;
int maxbuf;
char *buf;
virtual void pre() = 0;
virtual int size() = 0;
virtual void pack(char *) = 0;
virtual void process(int, char *) = 0;
virtual void post() = 0;
};
#endif

View File

@ -1,78 +0,0 @@
This directory has a simple C, C++, and Fortran code that shows how
LAMMPS can be linked to a driver application as a library. The purpose
is to illustrate how another code could perform computations while
using LAMMPS to perform MD on all or a subset of the processors, or
how an umbrella code or script could call both LAMMPS and some other
code to perform a coupled calculation.
simple.cpp is the C++ driver
simple.c is the C driver
simple.f90 is the Fortran driver
libfwrapper.c is the Fortran-to-C wrapper
The 3 codes do the same thing, so you can compare them to see how to
drive LAMMPS in this manner. The C driver is similar in spirit to what
one could use to write a scripting language interface. The Fortran
driver in addition requires a wrapper library that interfaces the C
interface of the LAMMPS library to Fortran and also translates the MPI
communicator from Fortran to C.
You can then build either driver code with a compile line something
like this, which includes paths to the LAMMPS library interface, MPI,
and FFTW (assuming you built LAMMPS as a library with its PPPM
solver).
This builds the C++ driver with the LAMMPS library using a C++ compiler:
g++ -I/home/sjplimp/lammps/src -c simple.cpp
g++ -L/home/sjplimp/lammps/src simple.o \
-llmp_g++ -lfftw -lmpich -lmpl -lpthread -o simpleCC
This builds the C driver with the LAMMPS library using a C compiler:
gcc -I/home/sjplimp/lammps/src -c simple.c
gcc -L/home/sjplimp/lammps/src simple.o \
-llmp_g++ -lfftw -lmpich -lmpl -lpthread -lstdc++ -o simpleC
This builds the Fortran wrapper and driver with the LAMMPS library
using a Fortran and C compiler:
cp ../fortran/libfwrapper.c .
gcc -I/home/sjplimp/lammps/src -c libfwrapper.c
gfortran -I/home/sjplimp/lammps/src -c simple.f90
gfortran -L/home/sjplimp/lammps/src simple.o libfwrapper.o \
-llmp_g++ -lfftw -lfmpich -lmpich -lpthread -lstdc++ -o simpleF
You then run simpleCC, simpleC, or simpleF on a parallel machine
on some number of processors Q with 2 arguments:
mpirun -np Q simpleCC P in.lj
P is the number of procs you want LAMMPS to run on (must be <= Q) and
in.lj is a LAMMPS input script.
The driver will launch LAMMPS on P procs, read the input script a line
at a time, and pass each command line to LAMMPS. The final line of
the script is a "run" command, so LAMMPS will run the problem.
The driver then requests all the atom coordinates from LAMMPS, moves
one of the atoms a small amount "epsilon", passes the coordinates back
to LAMMPS, and runs LAMMPS again. If you look at the output, you
should see a small energy change between runs, due to the moved atom.
The C driver is calling C-style routines in the src/library.cpp file
of LAMMPS. You could add any functions you wish to this file to
manipulate LAMMPS data however you wish.
The Fortran driver is using the same C-style routines, but requires an
additional wrapper to make them Fortran callable. Only a subset of the
library functions are currently wrapped, but it should be clear how to
extend the wrapper if desired.
The C++ driver does the same thing, except that it instantiates LAMMPS
as an object first. Some of the functions in src/library.cpp can be
invoked directly as methods within appropriate LAMMPS classes, which
is what the driver does. Any public LAMMPS class method could be
called from the driver this way. However the get/put functions are
only implemented in src/library.cpp, so the C++ driver calls them as
C-style functions.

View File

@ -1,24 +0,0 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
atom_modify map array
lattice fcc 0.8442
region box block 0 4 0 4 0 4
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
run 10

View File

@ -1,124 +0,0 @@
LAMMPS (20 Sep 2010)
# 3d Lennard-Jones melt
units lj
atom_style atomic
atom_modify map array
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 4 0 4 0 4
create_box 1 box
Created orthogonal box = (0 0 0) to (6.71838 6.71838 6.71838)
1 by 1 by 1 processor grid
create_atoms 1 box
Created 256 atoms
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
run 10
Memory usage per processor = 1.50139 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6218056 -5.0244179
10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175
Loop time of 0.00370193 on 1 procs for 10 steps with 256 atoms
Pair time (%) = 0.00340414 (91.9559)
Neigh time (%) = 0 (0)
Comm time (%) = 0.000165701 (4.47607)
Outpt time (%) = 2.31266e-05 (0.624718)
Other time (%) = 0.000108957 (2.94326)
Nlocal: 256 ave 256 max 256 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1431 ave 1431 max 1431 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 9984 ave 9984 max 9984 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9984
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds = 0
run 10
Memory usage per processor = 1.50139 Mbytes
Step Temp E_pair E_mol TotEng Press
10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175
20 0.6239063 -5.557644 0 -4.6254403 0.97451173
Loop time of 0.00365806 on 1 procs for 10 steps with 256 atoms
Pair time (%) = 0.0033741 (92.2375)
Neigh time (%) = 0 (0)
Comm time (%) = 0.000161886 (4.42547)
Outpt time (%) = 1.09673e-05 (0.299811)
Other time (%) = 0.000111103 (3.03722)
Nlocal: 256 ave 256 max 256 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1431 ave 1431 max 1431 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 9952 ave 9952 max 9952 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9952
Ave neighs/atom = 38.875
Neighbor list builds = 0
Dangerous builds = 0
run 1
Memory usage per processor = 1.50139 Mbytes
Step Temp E_pair E_mol TotEng Press
20 0.6239063 -5.5404291 0 -4.6082254 1.0394285
21 0.63845863 -5.5628733 0 -4.6089263 0.99398278
Loop time of 0.000490904 on 1 procs for 1 steps with 256 atoms
Pair time (%) = 0.000452042 (92.0835)
Neigh time (%) = 0 (0)
Comm time (%) = 1.69277e-05 (3.44828)
Outpt time (%) = 1.00136e-05 (2.03983)
Other time (%) = 1.19209e-05 (2.42836)
Nlocal: 256 ave 256 max 256 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1431 ave 1431 max 1431 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 9705 ave 9705 max 9705 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9705
Ave neighs/atom = 37.9102
Neighbor list builds = 0
Dangerous builds = 0

View File

@ -1,124 +0,0 @@
LAMMPS (20 Sep 2010)
# 3d Lennard-Jones melt
units lj
atom_style atomic
atom_modify map array
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 4 0 4 0 4
create_box 1 box
Created orthogonal box = (0 0 0) to (6.71838 6.71838 6.71838)
1 by 2 by 2 processor grid
create_atoms 1 box
Created 256 atoms
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
run 10
Memory usage per processor = 1.48354 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6218056 -5.0244179
10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175
Loop time of 0.00202775 on 4 procs for 10 steps with 256 atoms
Pair time (%) = 0.00085938 (42.381)
Neigh time (%) = 0 (0)
Comm time (%) = 0.00108671 (53.592)
Outpt time (%) = 2.79546e-05 (1.3786)
Other time (%) = 5.37038e-05 (2.64844)
Nlocal: 64 ave 64 max 64 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 843 ave 843 max 843 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 2496 ave 2496 max 2496 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9984
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds = 0
run 10
Memory usage per processor = 1.48354 Mbytes
Step Temp E_pair E_mol TotEng Press
10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175
20 0.6239063 -5.557644 0 -4.6254403 0.97451173
Loop time of 0.00224167 on 4 procs for 10 steps with 256 atoms
Pair time (%) = 0.000862718 (38.4855)
Neigh time (%) = 0 (0)
Comm time (%) = 0.00127524 (56.888)
Outpt time (%) = 5.19753e-05 (2.31859)
Other time (%) = 5.17368e-05 (2.30796)
Nlocal: 64 ave 69 max 59 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Nghost: 843 ave 848 max 838 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Neighs: 2488 ave 2745 max 2319 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 9952
Ave neighs/atom = 38.875
Neighbor list builds = 0
Dangerous builds = 0
run 1
Memory usage per processor = 1.48354 Mbytes
Step Temp E_pair E_mol TotEng Press
20 0.6239063 -5.5404291 0 -4.6082254 1.0394285
21 0.63845863 -5.5628733 0 -4.6089263 0.99398278
Loop time of 0.000325441 on 4 procs for 1 steps with 256 atoms
Pair time (%) = 0.000120759 (37.1062)
Neigh time (%) = 0 (0)
Comm time (%) = 0.000165045 (50.7143)
Outpt time (%) = 2.86698e-05 (8.80952)
Other time (%) = 1.09673e-05 (3.36996)
Nlocal: 64 ave 70 max 58 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 843 ave 849 max 837 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Neighs: 2426.25 ave 2704 max 2229 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 9705
Ave neighs/atom = 37.9102
Neighbor list builds = 0
Dangerous builds = 0

View File

@ -1,116 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
www.cs.sandia.gov/~sjplimp/lammps.html
Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* c_driver = simple example of how an umbrella program
can invoke LAMMPS as a library on some subset of procs
Syntax: c_driver P in.lammps
P = # of procs to run LAMMPS on
must be <= # of procs the driver code itself runs on
in.lammps = LAMMPS input script
See README for compilation instructions */
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "mpi.h"
#include "library.h" /* this is a LAMMPS include file */
int main(int narg, char **arg)
{
/* setup MPI and various communicators
driver runs on all procs in MPI_COMM_WORLD
comm_lammps only has 1st P procs (could be all or any subset) */
MPI_Init(&narg,&arg);
if (narg != 3) {
printf("Syntax: c_driver P in.lammps\n");
exit(1);
}
int me,nprocs;
MPI_Comm_rank(MPI_COMM_WORLD,&me);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
int nprocs_lammps = atoi(arg[1]);
if (nprocs_lammps > nprocs) {
if (me == 0)
printf("ERROR: LAMMPS cannot use more procs than available\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
int lammps;
if (me < nprocs_lammps) lammps = 1;
else lammps = MPI_UNDEFINED;
MPI_Comm comm_lammps;
MPI_Comm_split(MPI_COMM_WORLD,lammps,0,&comm_lammps);
/* open LAMMPS input script */
FILE *fp;
if (me == 0) {
fp = fopen(arg[2],"r");
if (fp == NULL) {
printf("ERROR: Could not open LAMMPS input script\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
}
/* run the input script thru LAMMPS one line at a time until end-of-file
driver proc 0 reads a line, Bcasts it to all procs
(could just send it to proc 0 of comm_lammps and let it Bcast)
all LAMMPS procs call lammps_command() on the line */
void *ptr;
if (lammps == 1) lammps_open(0,NULL,comm_lammps,&ptr);
int n;
char line[1024];
while (1) {
if (me == 0) {
if (fgets(line,1024,fp) == NULL) n = 0;
else n = strlen(line) + 1;
if (n == 0) fclose(fp);
}
MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);
if (n == 0) break;
MPI_Bcast(line,n,MPI_CHAR,0,MPI_COMM_WORLD);
if (lammps == 1) lammps_command(ptr,line);
}
/* run 10 more steps
get coords from LAMMPS
change coords of 1st atom
put coords back into LAMMPS
run a single step with changed coords */
if (lammps == 1) {
lammps_command(ptr,"run 10");
int natoms = lammps_get_natoms(ptr);
double *x = (double *) malloc(3*natoms*sizeof(double));
lammps_get_coords(ptr,x);
double epsilon = 0.1;
x[0] += epsilon;
lammps_put_coords(ptr,x);
free(x);
lammps_command(ptr,"run 1");
}
if (lammps == 1) lammps_close(ptr);
/* close down MPI */
MPI_Finalize();
}

View File

@ -1,121 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
www.cs.sandia.gov/~sjplimp/lammps.html
Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// c++_driver = simple example of how an umbrella program
// can invoke LAMMPS as a library on some subset of procs
// Syntax: c++_driver P in.lammps
// P = # of procs to run LAMMPS on
// must be <= # of procs the driver code itself runs on
// in.lammps = LAMMPS input script
// See README for compilation instructions
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "mpi.h"
#include "lammps.h" // these are LAMMPS include files
#include "input.h"
#include "atom.h"
#include "library.h"
using namespace LAMMPS_NS;
int main(int narg, char **arg)
{
// setup MPI and various communicators
// driver runs on all procs in MPI_COMM_WORLD
// comm_lammps only has 1st P procs (could be all or any subset)
MPI_Init(&narg,&arg);
if (narg != 3) {
printf("Syntax: c++_driver P in.lammps\n");
exit(1);
}
int me,nprocs;
MPI_Comm_rank(MPI_COMM_WORLD,&me);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
int nprocs_lammps = atoi(arg[1]);
if (nprocs_lammps > nprocs) {
if (me == 0)
printf("ERROR: LAMMPS cannot use more procs than available\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
int lammps;
if (me < nprocs_lammps) lammps = 1;
else lammps = MPI_UNDEFINED;
MPI_Comm comm_lammps;
MPI_Comm_split(MPI_COMM_WORLD,lammps,0,&comm_lammps);
// open LAMMPS input script
FILE *fp;
if (me == 0) {
fp = fopen(arg[2],"r");
if (fp == NULL) {
printf("ERROR: Could not open LAMMPS input script\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
}
// run the input script thru LAMMPS one line at a time until end-of-file
// driver proc 0 reads a line, Bcasts it to all procs
// (could just send it to proc 0 of comm_lammps and let it Bcast)
// all LAMMPS procs call input->one() on the line
LAMMPS *lmp;
if (lammps == 1) lmp = new LAMMPS(0,NULL,comm_lammps);
int n;
char line[1024];
while (1) {
if (me == 0) {
if (fgets(line,1024,fp) == NULL) n = 0;
else n = strlen(line) + 1;
if (n == 0) fclose(fp);
}
MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);
if (n == 0) break;
MPI_Bcast(line,n,MPI_CHAR,0,MPI_COMM_WORLD);
if (lammps == 1) lmp->input->one(line);
}
// run 10 more steps
// get coords from LAMMPS
// change coords of 1st atom
// put coords back into LAMMPS
// run a single step with changed coords
if (lammps == 1) {
lmp->input->one("run 10");
int natoms = static_cast<int> (lmp->atom->natoms);
double *x = new double[3*natoms];
lammps_get_coords(lmp,x); // no LAMMPS class function for this
double epsilon = 0.1;
x[0] += epsilon;
lammps_put_coords(lmp,x); // no LAMMPS class function for this
delete [] x;
lmp->input->one("run 1");
}
if (lammps == 1) delete lmp;
// close down MPI
MPI_Finalize();
}

View File

@ -1,135 +0,0 @@
! LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
! www.cs.sandia.gov/~sjplimp/lammps.html
! Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
!
! Copyright (2003) Sandia Corporation. Under the terms of Contract
! DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
! certain rights in this software. This software is distributed under
! the GNU General Public License.
!
! See the README file in the top-level LAMMPS directory.
! f_driver = simple example of how an umbrella program
! can invoke LAMMPS as a library on some subset of procs
! Syntax: f_driver P in.lammps
! P = # of procs to run LAMMPS on
! must be <= # of procs the driver code itself runs on
! in.lammps = LAMMPS input script
! See README for compilation instructions
PROGRAM f_driver
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER, PARAMETER :: fp=20
INTEGER :: n, narg, ierr, me, nprocs, natoms
INTEGER :: lammps, nprocs_lammps, comm_lammps
INTEGER (kind=8) :: ptr
REAL (kind=8), ALLOCATABLE :: x(:)
REAL (kind=8), PARAMETER :: epsilon=0.1
CHARACTER (len=64) :: arg
CHARACTER (len=1024) :: line
! setup MPI and various communicators
! driver runs on all procs in MPI_COMM_WORLD
! comm_lammps only has 1st P procs (could be all or any subset)
CALL mpi_init(ierr)
narg = command_argument_count()
IF (narg /= 2) THEN
PRINT *, 'Syntax: f_driver P in.lammps'
CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
END IF
CALL mpi_comm_rank(MPI_COMM_WORLD,me,ierr);
CALL mpi_comm_size(MPI_COMM_WORLD,nprocs,ierr);
CALL get_command_argument(1,arg)
READ (arg,'(I10)') nprocs_lammps
IF (nprocs_lammps > nprocs) THEN
IF (me == 0) THEN
PRINT *, 'ERROR: LAMMPS cannot use more procs than available'
CALL mpi_abort(MPI_COMM_WORLD,2,ierr)
END IF
END IF
lammps = 0
IF (me < nprocs_lammps) THEN
lammps = 1
ELSE
lammps = MPI_UNDEFINED
END IF
CALL mpi_comm_split(MPI_COMM_WORLD,lammps,0,comm_lammps,ierr)
! open LAMMPS input script on rank zero
CALL get_command_argument(2,arg)
OPEN(UNIT=fp, FILE=arg, ACTION='READ', STATUS='OLD', IOSTAT=ierr)
IF (ierr /= 0) THEN
PRINT *, 'ERROR: Could not open LAMMPS input script'
CALL mpi_abort(MPI_COMM_WORLD,3,ierr);
END IF
! run the input script thru LAMMPS one line at a time until end-of-file
! driver proc 0 reads a line, Bcasts it to all procs
! (could just send it to proc 0 of comm_lammps and let it Bcast)
! all LAMMPS procs call lammps_command() on the line */
IF (lammps == 1) CALL lammps_open(comm_lammps,ptr)
n = 0
DO
IF (me == 0) THEN
READ (UNIT=fp, FMT='(A)', IOSTAT=ierr) line
n = 0
IF (ierr == 0) THEN
n = LEN(TRIM(line))
IF (n == 0 ) THEN
line = ' '
n = 1
END IF
END IF
END IF
CALL mpi_bcast(n,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
IF (n == 0) EXIT
CALL mpi_bcast(line,n,MPI_CHARACTER,0,MPI_COMM_WORLD,ierr)
IF (lammps == 1) CALL lammps_command(ptr,line,n)
END DO
CLOSE(UNIT=fp)
! run 10 more steps
! get coords from LAMMPS
! change coords of 1st atom
! put coords back into LAMMPS
! run a single step with changed coords */
IF (lammps == 1) THEN
CALL lammps_command(ptr,'run 10',6)
CALL lammps_get_natoms(ptr,natoms)
ALLOCATE(x(3*natoms))
CALL lammps_get_coords(ptr,x)
x(1) = x(1) + epsilon
CALL lammps_put_coords(ptr,x)
DEALLOCATE(x)
CALL lammps_command(ptr,'run 1',5);
END IF
! free LAMMPS object
IF (lammps == 1) CALL lammps_close(ptr);
! close down MPI
CALL mpi_finalize(ierr)
END PROGRAM f_driver

View File

@ -11,19 +11,22 @@
This section describes how to build and use LAMMPS via a Python
interface.
11.1 "Extending Python with a serial version of LAMMPS"_#py_1
11.2 "Creating a shared MPI library"_#py_2
11.3 "Extending Python with a parallel version of LAMMPS"_#py_3
11.4 "Extending Python with MPI"_#py_4
11.5 "Testing the Python-LAMMPS interface"_#py_5
11.6 "Using LAMMPS from Python"_#py_6
11.7 "Example Python scripts that use LAMMPS"_#py_7 :ul
11.1 "Setting necessary environment variables"_#py_1
11.2 "Building LAMMPS as a shared library"_#py_2
11.3 "Extending Python with MPI"_#py_3
11.4 "Testing the Python-LAMMPS interface"_#py_4
11.5 "Using LAMMPS from Python"_#py_5
11.6 "Example Python scripts that use LAMMPS"_#py_6 :ul
The LAMMPS distribution includes some Python code in its python
directory which wraps the library interface to LAMMPS. This makes it
is possible to run LAMMPS, invoke LAMMPS commands or give it an input
script, extract LAMMPS results, an modify internal LAMMPS variables,
either from a Python script or interactively from a Python prompt.
The LAMMPS distribution includes the file python/lammps.py which wraps
the library interface to LAMMPS. This file makes it is possible to
run LAMMPS, invoke LAMMPS commands or give it an input script, extract
LAMMPS results, an modify internal LAMMPS variables, either from a
Python script or interactively from a Python prompt. You can do the
former in serial or parallel. Running Python interactively in
parallel does not generally work, unless you have a package installed
that extends your Python to enable multiple instances of Python to
read what you type.
"Python"_http://www.python.org is a powerful scripting and programming
language which can be used to wrap software like LAMMPS and other
@ -37,75 +40,116 @@ section"_Section_howto.html#howto_19 for a description of the library
interface provided in src/library.cpp and src/library.h and how to
extend it for your needs. As described below, that interface is what
is exposed to Python. It is designed to be easy to add functions to.
This has the effect of extending the Python inteface as well. See
details below.
This can easily extend the Python inteface as well. See details
below.
By using the Python interface LAMMPS can also be coupled with a GUI or
visualization tools that display graphs or animations in real time as
LAMMPS runs. Examples of such scripts are inlcluded in the python
directory.
By using the Python interface, LAMMPS can also be coupled with a GUI
or other visualization tools that display graphs or animations in real
time as LAMMPS runs. Examples of such scripts are inlcluded in the
python directory.
Two advantages of using Python are how concise the language is and
Two advantages of using Python are how concise the language is, and
that it can be run interactively, enabling rapid development and
debugging of programs. If you use it to mostly invoke costly
operations within LAMMPS, such as running a simulation for a
reasonable number of timesteps, then the overhead cost of invoking
LAMMPS thru Python will be negligible.
Before using LAMMPS from a Python script, the Python on your machine
must be "extended" to include an interface to the LAMMPS library. If
your Python script will invoke MPI operations, you will also need to
extend your Python with an interface to MPI itself.
Before using LAMMPS from a Python script, you have to do two things.
You need to set two environment variables. And you need to build
LAMMPS as a dynamic shared library, so it can be loaded by Python.
Both these steps are discussed below.
Thus you should first decide how you intend to use LAMMPS from Python.
There are 3 options:
The Python wrapper for LAMMPS uses the amazing and magical (to me)
"ctypes" package in Python, which auto-generates the interface code
needed between Python and a set of C interface routines for a library.
Ctypes is part of standard Python for versions 2.5 and later. You can
check which version of Python you have installed, by simply typing
"python" at a shell prompt.
(1) Use LAMMPS on a single processor running Python.
:line
:line
(2) Use LAMMPS in parallel, where each processor runs Python, but your
Python program does not use MPI.
11.1 Setting necessary environment variables :link(py_1),h4
(3) Use LAMMPS in parallel, where each processor runs Python, and your
Python script also makes MPI calls through a Python/MPI interface.
For Python to use the LAMMPS interface, it needs to find two files.
The paths to these files need to be added to two environment variables
that Python checks.
Note that for (2) and (3) you will not be able to use Python
interactively by typing commands and getting a response. This is
because you will have multiple instances of Python running (e.g. on a
parallel machine) and they cannot all read what you type.
The first is the environment variable PYTHONPATH. It needs
to include the directory where the python/lammps.py file is.
For the csh or tcsh shells, you could add something like this to your
~/.cshrc file:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/python :pre
The second is the environment variable LD_LIBRARY_PATH, which is used
by the operating system to find dynamic shared libraries when it loads
them. It needs to include the directory where the shared LAMMPS
library will be. Normally this is the LAMMPS src dir, as explained in
the following section.
For the csh or tcsh shells, you could add something like this to your
~/.cshrc file:
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/src :pre
Note that a LAMMPS build may depend on several auxiliary libraries,
which are specied in your low-level src/Makefile.foo file. For
example, an MPI library, the FFTW library, a JPEG library, etc.
Depending on what LAMMPS packages you have installed, you may
pre-build additional libraries in the lib directories, which are linked
to in your LAMMPS build.
As discussed below, in you are including those options in LAMMPS, all
of the auxiliary libraries have to be available as shared libraries
for Python to successfully load LAMMPS. If they are not in default
places where the operating system can find them, then you also have to
add their paths to the LD_LIBRARY_PATH environment variable.
For example, if you are using the dummy MPI library provided in
src/STUBS, you need to add something like this to your ~/.cshrc file:
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/src/STUBS :pre
If you are using the LAMMPS USER-ATC package, you need to add
something like this to your ~/.cshrc file:
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/atc :pre
:line
11.2 Building LAMMPS as a shared library :link(py_2),h4
A shared library is one that is dynamically loadable, which is what
Python requires. On Linux this is a library file that ends in ".so",
not ".a". Such a shared library is normally not built if you
installed MPI yourself, but it is easy to do. Here is how to do it
for "MPICH"_mpich, a popular open-source version of MPI, distributed
by Argonne National Labs. From within the mpich directory, type
:link(mpich,http://www-unix.mcs.anl.gov/mpi)
./configure --enable-shared
make
make install :pre
You may need to use "sudo make install" in place of the last line.
The end result should be the file libmpich.so in /usr/local/lib.
Working in mode (1) does not require your machine to have MPI
installed. You should extend your Python with a serial version of
LAMMPS and the dummy MPI library provided with LAMMPS. See
instructions below on how to do this.
Working in mode (2) requires your machine to have an MPI library
installed, but your Python does not need to be extended with MPI
itself. The MPI library must be a shared library (e.g. a *.so file on
Linux) which is not typically created when MPI is built/installed.
See instruction below on how to do this. You should extend your
Python with the a parallel versionn of LAMMPS which will use the
shared MPI system library. See instructions below on how to do this.
Working in mode (3) requires your machine to have MPI installed (as a
shared library as in (2)). You must also extend your Python with a
parallel version of LAMMPS (same as in (2)) and with MPI itself, via
one of several available Python/MPI packages. See instructions below
on how to do the latter task.
Several of the following sub-sections cover the rest of the Python
setup discussion. The next to last sub-section describes the Python
syntax used to invoke LAMMPS. The last sub-section describes example
Python scripts included in the python directory.
Before proceeding, there are 2 items to note.
(1) The provided Python wrapper for LAMMPS uses the amazing and
magical (to me) "ctypes" package in Python, which auto-generates the
interface code needed between Python and a set of C interface routines
for a library. Ctypes is part of standard Python for versions 2.5 and
later. You can check which version of Python you have installed, by
simply typing "python" at a shell prompt.
(2) Any library wrapped by Python, including LAMMPS, must be built as
a shared library (e.g. a *.so file on Linux and not a *.a file). The
python/setup_serial.py and setup.py scripts do this build for LAMMPS
@ -131,63 +175,6 @@ will also require you to edit the python/setup_serial.py or setup.py
scripts to enable Python to access those libraries when it builds the
LAMMPS wrapper.
:line
:line
11.1 Extending Python with a serial version of LAMMPS :link(py_1),h4
From the python directory in the LAMMPS distribution, type
python setup_serial.py build :pre
and then one of these commands:
sudo python setup_serial.py install
python setup_serial.py install --home=~/foo :pre
The "build" command should compile all the needed LAMMPS files,
including its dummy MPI library. The first "install" command will put
the needed files in your Python's site-packages sub-directory, so that
Python can load them. For example, if you installed Python yourself
on a Linux machine, it would typically be somewhere like
/usr/local/lib/python2.5/site-packages. Installing Python packages
this way often requires you to be able to write to the Python
directories, which may require root priveleges, hence the "sudo"
prefix. If this is not the case, you can drop the "sudo". If you use
the "sudo" prefix and you have installed Python yourself, you should
make sure that root uses the same Python as the one you did the
"install" in. E.g. these 2 commands may do the install in different
Python versions:
python setup_serial.py install --home=~/foo
python /usr/local/bin/python/setup_serial.py install --home=~/foo :pre
Alternatively, you can install the LAMMPS files (or any other Python
packages) in your own user space. The second "install" command does
this, where you should replace "foo" with your directory of choice.
If these commands are successful, a {lammps.py} and
{_lammps_serial.so} file will be put in the appropriate directory.
:line
11.2 Creating a shared MPI library :link(py_2),h4
A shared library is one that is dynamically loadable, which is what
Python requires. On Linux this is a library file that ends in ".so",
not ".a". Such a shared library is normally not built if you
installed MPI yourself, but it is easy to do. Here is how to do it
for "MPICH"_mpich, a popular open-source version of MPI, distributed
by Argonne National Labs. From within the mpich directory, type
:link(mpich,http://www-unix.mcs.anl.gov/mpi)
./configure --enable-shared
make
make install :pre
You may need to use "sudo make install" in place of the last line.
The end result should be the file libmpich.so in /usr/local/lib.
IMPORTANT NOTE: If the file libmpich.a already exists in your
installation directory (e.g. /usr/local/lib), you will now have both a
@ -200,47 +187,16 @@ this happens, it means you will need to remove the file
/usr/local/lib/libmich.so before building LAMMPS again as a
stand-alone code.
:line
11.3 Extending Python with MPI :link(py_3),h4
11.3 Extending Python with a parallel version of LAMMPS :link(py_3),h4
From the python directory, type
python setup.py build :pre
If
your Python script will run in parallel and you want to be able to
invoke MPI calls directly from Python, you will also need to extend
your Python with an interface to MPI.
and then one of these commands:
sudo python setup.py install
python setup.py install --home=~/foo :pre
The "build" command should compile all the needed LAMMPS C++ files,
which will require MPI to be installed on your system. This means it
must find both the header file mpi.h and a shared library file,
e.g. libmpich.so if the MPICH version of MPI is installed. See the
preceding section for how to create a shared library version of MPI if
it does not exist. You may need to adjust the "include_dirs" and
"library_dirs" and "libraries" fields in python/setup.py to
insure the Python build finds all the files it needs.
The first "install" command will put the needed files in your Python's
site-packages sub-directory, so that Python can load them. For
example, if you installed Python yourself on a Linux machine, it would
typically be somewhere like /usr/local/lib/python2.5/site-packages.
Installing Python packages this way often requires you to be able to
write to the Python directories, which may require root priveleges,
hence the "sudo" prefix. If this is not the case, you can drop the
"sudo".
Alternatively, you can install the LAMMPS files (or any other Python
packages) in your own user space. The second "install" command does
this, where you should replace "foo" with your directory of choice.
If these commands are successful, a {lammps.py} and {_lammps.so} file
will be put in the appropriate directory.
:line
11.4 Extending Python with MPI :link(py_4),h4
There are several Python packages available that purport to wrap MPI
as a library and allow MPI functions to be called from Python.
@ -315,37 +271,16 @@ and see one line of output for each processor you ran on.
:line
11.5 Testing the Python-LAMMPS interface :link(py_5),h4
11.4 Testing the Python-LAMMPS interface :link(py_4),h4
Before using LAMMPS in a Python program, one more step is needed. The
interface to LAMMPS is via the Python ctypes package, which loads the
shared LAMMPS library via a CDLL() call, which in turn is a wrapper on
the C-library dlopen(). This command is different than a normal
Python "import" and needs to be able to find the LAMMPS shared
library, which is either in the Python site-packages directory or in a
local directory you specified in the "python setup.py install"
command, as described above.
The simplest way to do this is add a line like this to your
.cshrc or other shell start-up file.
setenv LD_LIBRARY_PATH
$\{LD_LIBRARY_PATH\}:/usr/local/lib/python2.5/site-packages :pre
and then execute the shell file to insure the path has been updated.
This will extend the path that dlopen() uses to look for shared
libraries.
To test if the serial LAMMPS library has been successfully installed
(mode 1 above), launch Python and type
To test if LAMMPS is now callable from Python, launch Python and type:
>>> from lammps import lammps
>>> lmp = lammps() :pre
If you get no errors, you're ready to use serial LAMMPS from Python.
If you get no errors, you're ready to use LAMMPS from Python.
If you built LAMMPS for parallel use (mode 2 or 3 above), launch
Python in parallel:
% mpirun -np 4 python test.script :pre
@ -368,15 +303,6 @@ see if the LAMMPS run says it ran on P processors or if you get output
from P duplicated 1-processor runs written to the screen. In the
latter case, Pypar is not working correctly.
Note that this line:
from lammps import lammps :pre
will import either the serial or parallel version of the LAMMPS
library, as wrapped by lammps.py. But if you installed both via
setup_serial.py and setup.py, it will always import the parallel
version, since it attempts that first.
Note that if your Python script imports the Pypar package (as above),
so that it can use MPI calls directly, then Pypar initializes MPI for
you. Thus the last line of your Python script should be
@ -408,7 +334,7 @@ Python on a single processor, not in parallel.
:line
:line
11.6 Using LAMMPS from Python :link(py_6),h4
11.5 Using LAMMPS from Python :link(py_5),h4
The Python interface to LAMMPS consists of a Python "lammps" module,
the source code for which is in python/lammps.py, which creates a
@ -599,7 +525,7 @@ Python script. Isn't ctypes amazing? :l,ule
:line
:line
11.7 Example Python scripts that use LAMMPS :link(py_7),h4
11.6 Example Python scripts that use LAMMPS :link(py_6),h4
These are the Python scripts included as demos in the python/examples
directory of the LAMMPS distribution, to illustrate the kinds of