From 4180b4a7d6eeab710d132ff7ed2e19fd95ebd3cf Mon Sep 17 00:00:00 2001 From: Axel Kohlmeyer Date: Tue, 25 Feb 2020 19:07:49 +0100 Subject: [PATCH 1/2] add example to COUPLE folder demonstrating loading LAMMPS as a plugin. --- examples/COUPLE/plugin/README | 70 +++++ examples/COUPLE/plugin/in.lj | 25 ++ examples/COUPLE/plugin/liblammpsplugin.c | 99 +++++++ examples/COUPLE/plugin/liblammpsplugin.h | 126 +++++++++ examples/COUPLE/plugin/log.simple.plugin.1 | 294 ++++++++++++++++++++ examples/COUPLE/plugin/log.simple.plugin.4 | 296 +++++++++++++++++++++ examples/COUPLE/plugin/simple.c | 175 ++++++++++++ src/library.h | 4 + 8 files changed, 1089 insertions(+) create mode 100644 examples/COUPLE/plugin/README create mode 100644 examples/COUPLE/plugin/in.lj create mode 100644 examples/COUPLE/plugin/liblammpsplugin.c create mode 100644 examples/COUPLE/plugin/liblammpsplugin.h create mode 100644 examples/COUPLE/plugin/log.simple.plugin.1 create mode 100644 examples/COUPLE/plugin/log.simple.plugin.4 create mode 100644 examples/COUPLE/plugin/simple.c diff --git a/examples/COUPLE/plugin/README b/examples/COUPLE/plugin/README new file mode 100644 index 0000000000..7075689d6f --- /dev/null +++ b/examples/COUPLE/plugin/README @@ -0,0 +1,70 @@ +This directory has a simple C code that shows how +LAMMPS can be linked to a driver application as a library. The purpose +is to illustrate how another code could perform computations while +using LAMMPS to perform MD on all or a subset of the processors, or +how an umbrella code or script could call both LAMMPS and some other +code to perform a coupled calculation. + +simple.c is the C driver +liblammpsplugin.c is the LAMMPS library plugin loader + +The 3 codes do the same thing, so you can compare them to see how to +drive LAMMPS from each language. See lammps/python/example/simple.py +to do something similar from Python. The Fortran driver requires an +additional wrapper library that interfaces the C interface of the +LAMMPS library to Fortran and also translates the MPI communicator +from Fortran to C. + +First build LAMMPS as a library (see examples/COUPLE/README), e.g. + +cd $HOME/lammps/src +make mode=shlib mpi + +or + +cd $HOME/lammps +mkdir build-shared +cd build-shared +cmake -D BUILD_LIB=on -D BUILD_SHARED_LIBS=on ../cmake +make + +You can then build any of the driver codes with compile lines like +these, which include paths to the LAMMPS library interface, and +linking with FFTW (only needed if you built LAMMPS as a library with +its PPPM solver). + +mpicc -c simple.c +mpicc simple.o -llammps -lfftw -o simpleC + + +You then run simpleC on a parallel machine +on some number of processors Q with 3 arguments: + +% mpirun -np Q simpleC P in.lj $HOME/lammps/src/liblammps.so + +or + +% mpirun -np Q simpleC P in.lj $HOME/lammps/build-shared/liblammps.so + +P is the number of procs you want LAMMPS to run on (must be <= Q) and +in.lj is a LAMMPS input script and the last argument is the path to +the LAMMPS shared library. This either has to be an absolute path, or +liblammps.so has to be in a folder that is included in the environment +variable LD_LIBRARY_PATH so it will be found by the dynamic object loader. + +The driver will launch LAMMPS on P procs, read the input script a line +at a time, and pass each command line to LAMMPS. The final line of +the script is a "run" command, so LAMMPS will run the problem. + +The driver then requests all the atom coordinates from LAMMPS, moves +one of the atoms a small amount "epsilon", passes the coordinates back +to LAMMPS, and runs LAMMPS again. If you look at the output, you +should see a small energy change between runs, due to the moved atom. + +The C driver is calling C-style routines in the src/library.cpp file +of LAMMPS through the function pointers in the liblammpsplugin_t struct. +This has the benefit that your binary is not linked to liblammps.so directly +and thus you can change the name of the shared library (e.g. to have +different variants compiled, or to load a different LAMMPS versions without +having to update your executable). + diff --git a/examples/COUPLE/plugin/in.lj b/examples/COUPLE/plugin/in.lj new file mode 100644 index 0000000000..3dc80bdfea --- /dev/null +++ b/examples/COUPLE/plugin/in.lj @@ -0,0 +1,25 @@ +# 3d Lennard-Jones melt + +units lj +atom_style atomic +atom_modify map array + +lattice fcc 0.8442 +region box block 0 4 0 4 0 4 +create_box 1 box +create_atoms 1 box +mass 1 1.0 + +velocity all create 1.44 87287 loop geom + +pair_style lj/cut 2.5 +pair_coeff 1 1 1.0 1.0 2.5 + +neighbor 0.3 bin +neigh_modify delay 0 every 20 check no + +fix 1 all nve + +variable fx atom fx + +run 10 diff --git a/examples/COUPLE/plugin/liblammpsplugin.c b/examples/COUPLE/plugin/liblammpsplugin.c new file mode 100644 index 0000000000..a66d87dfcc --- /dev/null +++ b/examples/COUPLE/plugin/liblammpsplugin.c @@ -0,0 +1,99 @@ +/* -*- c++ -*- ---------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + http://lammps.sandia.gov, Sandia National Laboratories + Steve Plimpton, sjplimp@sandia.gov + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +/* + Variant of the C style library interface to LAMMPS + that uses a shared library and dynamically opens it, + so this can be used as a prototype code to integrate + a LAMMPS plugin to some other software. +*/ + +#include "library.h" +#include "liblammpsplugin.h" +#include +#include + +liblammpsplugin_t *liblammpsplugin_load(const char *lib) +{ + liblammpsplugin_t *lmp; + void *handle; + + if (lib == NULL) return NULL; + handle = dlopen(lib,RTLD_NOW|RTLD_GLOBAL); + if (handle == NULL) return NULL; + + lmp = (liblammpsplugin_t *) malloc(sizeof(liblammpsplugin_t)); + lmp->handle = handle; + +#define ADDSYM(symbol) lmp->symbol = dlsym(handle,"lammps_" #symbol) + ADDSYM(open); + ADDSYM(open_no_mpi); + ADDSYM(close); + ADDSYM(version); + ADDSYM(file); + ADDSYM(command); + ADDSYM(commands_list); + ADDSYM(commands_string); + ADDSYM(free); + ADDSYM(extract_setting); + ADDSYM(extract_global); + ADDSYM(extract_box); + ADDSYM(extract_atom); + ADDSYM(extract_compute); + ADDSYM(extract_fix); + ADDSYM(extract_variable); + + ADDSYM(get_thermo); + ADDSYM(get_natoms); + + ADDSYM(set_variable); + ADDSYM(reset_box); + + ADDSYM(gather_atoms); + ADDSYM(gather_atoms_concat); + ADDSYM(gather_atoms_subset); + ADDSYM(scatter_atoms); + ADDSYM(scatter_atoms_subset); + + ADDSYM(set_fix_external_callback); + + ADDSYM(config_has_package); + ADDSYM(config_package_count); + ADDSYM(config_package_name); + ADDSYM(config_has_gzip_support); + ADDSYM(config_has_png_support); + ADDSYM(config_has_jpeg_support); + ADDSYM(config_has_ffmpeg_support); + ADDSYM(config_has_exceptions); + ADDSYM(create_atoms); +#ifdef LAMMPS_EXCEPTIONS + lmp->has_exceptions = 1; + ADDSYM(has_error); + ADDSYM(get_last_error_message); +#else + lmp->has_exceptions = 0; + lmp->has_error = NULL; + lmp->get_last_error_message = NULL; +#endif + return lmp; +} + +int liblammpsplugin_release(liblammpsplugin_t *lmp) +{ + if (lmp == NULL) return 1; + if (lmp->handle == NULL) return 2; + + dlclose(lmp->handle); + free((void *)lmp); + return 0; +} diff --git a/examples/COUPLE/plugin/liblammpsplugin.h b/examples/COUPLE/plugin/liblammpsplugin.h new file mode 100644 index 0000000000..83d0dfa557 --- /dev/null +++ b/examples/COUPLE/plugin/liblammpsplugin.h @@ -0,0 +1,126 @@ +/* -*- c++ -*- ---------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + http://lammps.sandia.gov, Sandia National Laboratories + Steve Plimpton, sjplimp@sandia.gov + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +#ifndef LIBLAMMPSPLUGIN_H +#define LIBLAMMPSPLUGIN_H +/* + Variant of the C style library interface to LAMMPS + that uses a shared library and dynamically opens it, + so this can be used as a prototype code to integrate + a LAMMPS plugin to some other software. +*/ + +/* + * Follow the behavior of regular LAMMPS compilation and assume + * -DLAMMPS_SMALLBIG when no define is set. + */ +#if !defined(LAMMPS_BIGBIG) && !defined(LAMMPS_SMALLBIG) && !defined(LAMMPS_SMALLSMALL) +#define LAMMPS_SMALLBIG +#endif + +#include +#if defined(LAMMPS_BIGBIG) || defined(LAMMPS_SMALLBIG) +#include /* for int64_t */ +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(LAMMPS_BIGBIG) +typedef void (*FixExternalFnPtr)(void *, int64_t, int, int64_t *, double **, double **); +#elif defined(LAMMPS_SMALLBIG) +typedef void (*FixExternalFnPtr)(void *, int64_t, int, int *, double **, double **); +#else +typedef void (*FixExternalFnPtr)(void *, int, int, int *, double **, double **); +#endif + + +struct _liblammpsplugin { + int abiversion; + int has_exceptions; + void *handle; + void (*open)(int, char **, MPI_Comm, void **); + void (*open_no_mpi)(int, char **, void **); + void (*close)(void *); + int (*version)(void *); + void (*file)(void *, char *); + char *(*command)(void *, char *); + void (*commands_list)(void *, int, char **); + void (*commands_string)(void *, char *); + void (*free)(void *); + int (*extract_setting)(void *, char *); + void *(*extract_global)(void *, char *); + void (*extract_box)(void *, double *, double *, + double *, double *, double *, int *, int *); + void *(*extract_atom)(void *, char *); + void *(*extract_compute)(void *, char *, int, int); + void *(*extract_fix)(void *, char *, int, int, int, int); + void *(*extract_variable)(void *, char *, char *); + + double (*get_thermo)(void *, char *); + int (*get_natoms)(void *); + + int (*set_variable)(void *, char *, char *); + void (*reset_box)(void *, double *, double *, double, double, double); + + void (*gather_atoms)(void *, char *, int, int, void *); + void (*gather_atoms_concat)(void *, char *, int, int, void *); + void (*gather_atoms_subset)(void *, char *, int, int, int, int *, void *); + void (*scatter_atoms)(void *, char *, int, int, void *); + void (*scatter_atoms_subset)(void *, char *, int, int, int, int *, void *); + + void (*set_fix_external_callback)(void *, char *, FixExternalFnPtr, void*); + + int (*config_has_package)(char * package_name); + int (*config_package_count)(); + int (*config_package_name)(int index, char * buffer, int max_size); + int (*config_has_gzip_support)(); + int (*config_has_png_support)(); + int (*config_has_jpeg_support)(); + int (*config_has_ffmpeg_support)(); + int (*config_has_exceptions)(); + + int (*find_pair_neighlist)(void* ptr, char * style, int exact, int nsub, int request); + int (*find_fix_neighlist)(void* ptr, char * id, int request); + int (*find_compute_neighlist)(void* ptr, char * id, int request); + int (*neighlist_num_elements)(void* ptr, int idx); + void (*neighlist_element_neighbors)(void * ptr, int idx, int element, int * iatom, int * numneigh, int ** neighbors); + +// lammps_create_atoms() takes tagint and imageint as args +// ifdef insures they are compatible with rest of LAMMPS +// caller must match to how LAMMPS library is built + +#ifdef LAMMPS_BIGBIG + void (*create_atoms)(void *, int, int64_t *, int *, + double *, double *, int64_t *, int); +#else + void (*create_atoms)(void *, int, int *, int *, + double *, double *, int *, int); +#endif + + int (*has_error)(void *); + int (*get_last_error_message)(void *, char *, int); +}; + +typedef struct _liblammpsplugin liblammpsplugin_t; + +liblammpsplugin_t *liblammpsplugin_load(const char *); +int liblammpsplugin_release(liblammpsplugin_t *); + +#undef LAMMPS +#ifdef __cplusplus +} +#endif + +#endif diff --git a/examples/COUPLE/plugin/log.simple.plugin.1 b/examples/COUPLE/plugin/log.simple.plugin.1 new file mode 100644 index 0000000000..ba4be378f4 --- /dev/null +++ b/examples/COUPLE/plugin/log.simple.plugin.1 @@ -0,0 +1,294 @@ +LAMMPS (18 Feb 2020) +Lattice spacing in x,y,z = 1.6796 1.6796 1.6796 +Created orthogonal box = (0 0 0) to (6.71838 6.71838 6.71838) + 1 by 1 by 1 MPI processor grid +Created 256 atoms + create_atoms CPU = 0.000297844 secs +Neighbor list info ... + update every 20 steps, delay 0 steps, check no + max neighbors/atom: 2000, page size: 100000 + master list distance cutoff = 2.8 + ghost atom cutoff = 2.8 + binsize = 1.4, bins = 5 5 5 + 1 neighbor lists, perpetual/occasional/extra = 1 0 0 + (1) pair lj/cut, perpetual + attributes: half, newton on + pair build: half/bin/atomonly/newton + stencil: half/bin/3d/newton + bin: standard +Setting up Verlet run ... + Unit style : lj + Current step : 0 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 0 1.44 -6.7733681 0 -4.6218056 -5.0244179 + 10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175 +Loop time of 0.00164276 on 1 procs for 10 steps with 256 atoms + +Performance: 2629719.113 tau/day, 6087.313 timesteps/s +93.7% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0014956 | 0.0014956 | 0.0014956 | 0.0 | 91.04 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 8.045e-05 | 8.045e-05 | 8.045e-05 | 0.0 | 4.90 +Output | 1.1399e-05 | 1.1399e-05 | 1.1399e-05 | 0.0 | 0.69 +Modify | 3.7431e-05 | 3.7431e-05 | 3.7431e-05 | 0.0 | 2.28 +Other | | 1.789e-05 | | | 1.09 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1431 ave 1431 max 1431 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9984 ave 9984 max 9984 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9984 +Ave neighs/atom = 39 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 10 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175 + 20 0.6239063 -5.557644 0 -4.6254403 0.97451173 +Loop time of 0.00199768 on 1 procs for 10 steps with 256 atoms + +Performance: 2162504.180 tau/day, 5005.797 timesteps/s +99.8% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0018518 | 0.0018518 | 0.0018518 | 0.0 | 92.70 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 7.9768e-05 | 7.9768e-05 | 7.9768e-05 | 0.0 | 3.99 +Output | 1.1433e-05 | 1.1433e-05 | 1.1433e-05 | 0.0 | 0.57 +Modify | 3.6904e-05 | 3.6904e-05 | 3.6904e-05 | 0.0 | 1.85 +Other | | 1.773e-05 | | | 0.89 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1431 ave 1431 max 1431 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9952 ave 9952 max 9952 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9952 +Ave neighs/atom = 38.875 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 20 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 20 0.6239063 -5.5404291 0 -4.6082254 1.0394285 + 21 0.63845863 -5.5628733 0 -4.6089263 0.99398278 +Loop time of 0.000304321 on 1 procs for 1 steps with 256 atoms + +Performance: 1419553.695 tau/day, 3286.004 timesteps/s +98.9% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00027815 | 0.00027815 | 0.00027815 | 0.0 | 91.40 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 8.321e-06 | 8.321e-06 | 8.321e-06 | 0.0 | 2.73 +Output | 1.0513e-05 | 1.0513e-05 | 1.0513e-05 | 0.0 | 3.45 +Modify | 3.968e-06 | 3.968e-06 | 3.968e-06 | 0.0 | 1.30 +Other | | 3.365e-06 | | | 1.11 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1431 ave 1431 max 1431 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9705 ave 9705 max 9705 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9705 +Ave neighs/atom = 37.9102 +Neighbor list builds = 0 +Dangerous builds not checked +Force on 1 atom via extract_atom: 26.9581 +Force on 1 atom via extract_variable: 26.9581 +Setting up Verlet run ... + Unit style : lj + Current step : 21 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 21 0.63845863 -5.5628733 0 -4.6089263 0.99398278 + 31 0.7494946 -5.7306417 0 -4.6107913 0.41043597 +Loop time of 0.00196027 on 1 procs for 10 steps with 256 atoms + +Performance: 2203779.175 tau/day, 5101.341 timesteps/s +99.7% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0018146 | 0.0018146 | 0.0018146 | 0.0 | 92.57 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 8.0268e-05 | 8.0268e-05 | 8.0268e-05 | 0.0 | 4.09 +Output | 1.0973e-05 | 1.0973e-05 | 1.0973e-05 | 0.0 | 0.56 +Modify | 3.6913e-05 | 3.6913e-05 | 3.6913e-05 | 0.0 | 1.88 +Other | | 1.756e-05 | | | 0.90 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1431 ave 1431 max 1431 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9688 ave 9688 max 9688 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9688 +Ave neighs/atom = 37.8438 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 31 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 31 0.7494946 -5.7306417 0 -4.6107913 0.41043597 + 51 0.71349216 -5.6772387 0 -4.6111811 0.52117681 +Loop time of 0.00433063 on 1 procs for 20 steps with 256 atoms + +Performance: 1995088.941 tau/day, 4618.261 timesteps/s +99.3% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0035121 | 0.0035121 | 0.0035121 | 0.0 | 81.10 +Neigh | 0.00050258 | 0.00050258 | 0.00050258 | 0.0 | 11.61 +Comm | 0.00019444 | 0.00019444 | 0.00019444 | 0.0 | 4.49 +Output | 1.2092e-05 | 1.2092e-05 | 1.2092e-05 | 0.0 | 0.28 +Modify | 7.2917e-05 | 7.2917e-05 | 7.2917e-05 | 0.0 | 1.68 +Other | | 3.647e-05 | | | 0.84 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1421 ave 1421 max 1421 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9700 ave 9700 max 9700 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9700 +Ave neighs/atom = 37.8906 +Neighbor list builds = 1 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 51 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 51 0.71349216 -5.6772387 0 -4.6111811 0.52117681 + 61 0.78045421 -5.7781094 0 -4.6120011 0.093808941 +Loop time of 0.00196567 on 1 procs for 10 steps with 256 atoms + +Performance: 2197727.285 tau/day, 5087.332 timesteps/s +99.7% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0018222 | 0.0018222 | 0.0018222 | 0.0 | 92.70 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 7.8285e-05 | 7.8285e-05 | 7.8285e-05 | 0.0 | 3.98 +Output | 1.0862e-05 | 1.0862e-05 | 1.0862e-05 | 0.0 | 0.55 +Modify | 3.6719e-05 | 3.6719e-05 | 3.6719e-05 | 0.0 | 1.87 +Other | | 1.764e-05 | | | 0.90 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1421 ave 1421 max 1421 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9700 ave 9700 max 9700 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9700 +Ave neighs/atom = 37.8906 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 61 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 61 0.78045421 -5.7781094 0 -4.6120011 0.093808941 + 81 0.77743907 -5.7735004 0 -4.6118971 0.090822641 +Loop time of 0.00430528 on 1 procs for 20 steps with 256 atoms + +Performance: 2006838.581 tau/day, 4645.460 timesteps/s +99.8% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0034931 | 0.0034931 | 0.0034931 | 0.0 | 81.13 +Neigh | 0.00050437 | 0.00050437 | 0.00050437 | 0.0 | 11.72 +Comm | 0.0001868 | 0.0001868 | 0.0001868 | 0.0 | 4.34 +Output | 1.1699e-05 | 1.1699e-05 | 1.1699e-05 | 0.0 | 0.27 +Modify | 7.3308e-05 | 7.3308e-05 | 7.3308e-05 | 0.0 | 1.70 +Other | | 3.604e-05 | | | 0.84 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1405 ave 1405 max 1405 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9701 ave 9701 max 9701 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9701 +Ave neighs/atom = 37.8945 +Neighbor list builds = 1 +Dangerous builds not checked +Deleted 256 atoms, new total = 0 +Setting up Verlet run ... + Unit style : lj + Current step : 81 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.63 | 2.63 | 2.63 Mbytes +Step Temp E_pair E_mol TotEng Press + 81 0.6239063 -5.5404291 0 -4.6082254 1.0394285 + 91 0.75393007 -5.7375259 0 -4.6110484 0.39357367 +Loop time of 0.00195843 on 1 procs for 10 steps with 256 atoms + +Performance: 2205851.941 tau/day, 5106.139 timesteps/s +99.7% CPU use with 1 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0018143 | 0.0018143 | 0.0018143 | 0.0 | 92.64 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 7.8608e-05 | 7.8608e-05 | 7.8608e-05 | 0.0 | 4.01 +Output | 1.0786e-05 | 1.0786e-05 | 1.0786e-05 | 0.0 | 0.55 +Modify | 3.7106e-05 | 3.7106e-05 | 3.7106e-05 | 0.0 | 1.89 +Other | | 1.762e-05 | | | 0.90 + +Nlocal: 256 ave 256 max 256 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Nghost: 1431 ave 1431 max 1431 min +Histogram: 1 0 0 0 0 0 0 0 0 0 +Neighs: 9705 ave 9705 max 9705 min +Histogram: 1 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9705 +Ave neighs/atom = 37.9102 +Neighbor list builds = 0 +Dangerous builds not checked +Total wall time: 0:00:00 diff --git a/examples/COUPLE/plugin/log.simple.plugin.4 b/examples/COUPLE/plugin/log.simple.plugin.4 new file mode 100644 index 0000000000..d76719dd7c --- /dev/null +++ b/examples/COUPLE/plugin/log.simple.plugin.4 @@ -0,0 +1,296 @@ +LAMMPS (18 Feb 2020) +Lattice spacing in x,y,z = 1.6796 1.6796 1.6796 +Created orthogonal box = (0 0 0) to (6.71838 6.71838 6.71838) + 1 by 1 by 2 MPI processor grid +Created 256 atoms + create_atoms CPU = 0.000265157 secs +Neighbor list info ... + update every 20 steps, delay 0 steps, check no + max neighbors/atom: 2000, page size: 100000 + master list distance cutoff = 2.8 + ghost atom cutoff = 2.8 + binsize = 1.4, bins = 5 5 5 + 1 neighbor lists, perpetual/occasional/extra = 1 0 0 + (1) pair lj/cut, perpetual + attributes: half, newton on + pair build: half/bin/atomonly/newton + stencil: half/bin/3d/newton + bin: standard +Setting up Verlet run ... + Unit style : lj + Current step : 0 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 0 1.44 -6.7733681 0 -4.6218056 -5.0244179 + 10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175 +Loop time of 0.00115264 on 2 procs for 10 steps with 256 atoms + +Performance: 3747912.946 tau/day, 8675.724 timesteps/s +94.5% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00074885 | 0.00075021 | 0.00075156 | 0.0 | 65.09 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 0.00031829 | 0.00031943 | 0.00032056 | 0.0 | 27.71 +Output | 9.306e-06 | 2.6673e-05 | 4.4041e-05 | 0.0 | 2.31 +Modify | 2.0684e-05 | 2.0891e-05 | 2.1098e-05 | 0.0 | 1.81 +Other | | 3.544e-05 | | | 3.07 + +Nlocal: 128 ave 128 max 128 min +Histogram: 2 0 0 0 0 0 0 0 0 0 +Nghost: 1109 ave 1109 max 1109 min +Histogram: 2 0 0 0 0 0 0 0 0 0 +Neighs: 4992 ave 4992 max 4992 min +Histogram: 2 0 0 0 0 0 0 0 0 0 + +Total # of neighbors = 9984 +Ave neighs/atom = 39 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 10 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 10 1.1298532 -6.3095502 0 -4.6213906 -2.6058175 + 20 0.6239063 -5.557644 0 -4.6254403 0.97451173 +Loop time of 0.00120443 on 2 procs for 10 steps with 256 atoms + +Performance: 3586761.860 tau/day, 8302.689 timesteps/s +95.5% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00087798 | 0.00091359 | 0.0009492 | 0.0 | 75.85 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 0.00016739 | 0.00020368 | 0.00023997 | 0.0 | 16.91 +Output | 1.0124e-05 | 3.0513e-05 | 5.0901e-05 | 0.0 | 2.53 +Modify | 1.89e-05 | 1.9812e-05 | 2.0725e-05 | 0.0 | 1.64 +Other | | 3.683e-05 | | | 3.06 + +Nlocal: 128 ave 134 max 122 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1109 ave 1115 max 1103 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4976 ave 5205 max 4747 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9952 +Ave neighs/atom = 38.875 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 20 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 20 0.6239063 -5.5404291 0 -4.6082254 1.0394285 + 21 0.63845863 -5.5628733 0 -4.6089263 0.99398278 +Loop time of 0.000206062 on 2 procs for 1 steps with 256 atoms + +Performance: 2096456.406 tau/day, 4852.908 timesteps/s +94.1% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00012947 | 0.00013524 | 0.00014101 | 0.0 | 65.63 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 1.858e-05 | 2.4113e-05 | 2.9647e-05 | 0.0 | 11.70 +Output | 8.699e-06 | 2.4204e-05 | 3.9708e-05 | 0.0 | 11.75 +Modify | 2.34e-06 | 2.3705e-06 | 2.401e-06 | 0.0 | 1.15 +Other | | 2.013e-05 | | | 9.77 + +Nlocal: 128 ave 135 max 121 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1109 ave 1116 max 1102 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4852.5 ave 5106 max 4599 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9705 +Ave neighs/atom = 37.9102 +Force on 1 atom via extract_atom: -18.109 +Force on 1 atom via extract_variable: -18.109 +Neighbor list builds = 0 +Dangerous builds not checked +Force on 1 atom via extract_atom: 26.9581 +Force on 1 atom via extract_variable: 26.9581 +Setting up Verlet run ... + Unit style : lj + Current step : 21 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 21 0.63845863 -5.5628733 0 -4.6089263 0.99398278 + 31 0.7494946 -5.7306417 0 -4.6107913 0.41043597 +Loop time of 0.00119048 on 2 procs for 10 steps with 256 atoms + +Performance: 3628802.105 tau/day, 8400.005 timesteps/s +98.0% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00085276 | 0.00089699 | 0.00094123 | 0.0 | 75.35 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 0.00016896 | 0.00021444 | 0.00025992 | 0.0 | 18.01 +Output | 9.413e-06 | 2.5939e-05 | 4.2465e-05 | 0.0 | 2.18 +Modify | 1.8977e-05 | 2.0009e-05 | 2.1042e-05 | 0.0 | 1.68 +Other | | 3.31e-05 | | | 2.78 + +Nlocal: 128 ave 135 max 121 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1109 ave 1116 max 1102 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4844 ave 5096 max 4592 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9688 +Ave neighs/atom = 37.8438 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 31 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 31 0.7494946 -5.7306417 0 -4.6107913 0.41043597 + 51 0.71349216 -5.6772387 0 -4.6111811 0.52117681 +Loop time of 0.00252603 on 2 procs for 20 steps with 256 atoms + +Performance: 3420382.192 tau/day, 7917.551 timesteps/s +99.2% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0016245 | 0.0017014 | 0.0017784 | 0.2 | 67.36 +Neigh | 0.00025359 | 0.0002563 | 0.00025901 | 0.0 | 10.15 +Comm | 0.00036863 | 0.00045124 | 0.00053385 | 0.0 | 17.86 +Output | 9.839e-06 | 2.8031e-05 | 4.6223e-05 | 0.0 | 1.11 +Modify | 3.7027e-05 | 3.9545e-05 | 4.2063e-05 | 0.0 | 1.57 +Other | | 4.948e-05 | | | 1.96 + +Nlocal: 128 ave 132 max 124 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1100 ave 1101 max 1099 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4850 ave 4953 max 4747 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9700 +Ave neighs/atom = 37.8906 +Neighbor list builds = 1 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 51 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 51 0.71349216 -5.6772387 0 -4.6111811 0.52117681 + 61 0.78045421 -5.7781094 0 -4.6120011 0.093808941 +Loop time of 0.00115444 on 2 procs for 10 steps with 256 atoms + +Performance: 3742065.976 tau/day, 8662.190 timesteps/s +96.5% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.00087346 | 0.00089311 | 0.00091275 | 0.0 | 77.36 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 0.00016192 | 0.0001823 | 0.00020269 | 0.0 | 15.79 +Output | 9.49e-06 | 2.6234e-05 | 4.2978e-05 | 0.0 | 2.27 +Modify | 1.9095e-05 | 1.9843e-05 | 2.0591e-05 | 0.0 | 1.72 +Other | | 3.296e-05 | | | 2.85 + +Nlocal: 128 ave 132 max 124 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1100 ave 1101 max 1099 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4850 ave 4953 max 4747 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9700 +Ave neighs/atom = 37.8906 +Neighbor list builds = 0 +Dangerous builds not checked +Setting up Verlet run ... + Unit style : lj + Current step : 61 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 61 0.78045421 -5.7781094 0 -4.6120011 0.093808941 + 81 0.77743907 -5.7735004 0 -4.6118971 0.090822641 +Loop time of 0.00244325 on 2 procs for 20 steps with 256 atoms + +Performance: 3536279.919 tau/day, 8185.833 timesteps/s +99.0% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0016916 | 0.0017038 | 0.001716 | 0.0 | 69.73 +Neigh | 0.00025229 | 0.00025512 | 0.00025795 | 0.0 | 10.44 +Comm | 0.00035772 | 0.00036918 | 0.00038064 | 0.0 | 15.11 +Output | 1.0858e-05 | 2.7875e-05 | 4.4891e-05 | 0.0 | 1.14 +Modify | 3.817e-05 | 3.9325e-05 | 4.048e-05 | 0.0 | 1.61 +Other | | 4.796e-05 | | | 1.96 + +Nlocal: 128 ave 128 max 128 min +Histogram: 2 0 0 0 0 0 0 0 0 0 +Nghost: 1088.5 ave 1092 max 1085 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4850.5 ave 4851 max 4850 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9701 +Ave neighs/atom = 37.8945 +Neighbor list builds = 1 +Dangerous builds not checked +Deleted 256 atoms, new total = 0 +Setting up Verlet run ... + Unit style : lj + Current step : 81 + Time step : 0.005 +Per MPI rank memory allocation (min/avg/max) = 2.624 | 2.624 | 2.624 Mbytes +Step Temp E_pair E_mol TotEng Press + 81 0.6239063 -5.5404291 0 -4.6082254 1.0394285 + 91 0.75393007 -5.7375259 0 -4.6110484 0.39357367 +Loop time of 0.00118092 on 2 procs for 10 steps with 256 atoms + +Performance: 3658158.625 tau/day, 8467.960 timesteps/s +98.6% CPU use with 2 MPI tasks x no OpenMP threads + +MPI task timing breakdown: +Section | min time | avg time | max time |%varavg| %total +--------------------------------------------------------------- +Pair | 0.0008476 | 0.00089265 | 0.00093771 | 0.0 | 75.59 +Neigh | 0 | 0 | 0 | 0.0 | 0.00 +Comm | 0.00016335 | 0.00020946 | 0.00025557 | 0.0 | 17.74 +Output | 8.87e-06 | 2.5733e-05 | 4.2595e-05 | 0.0 | 2.18 +Modify | 1.8755e-05 | 1.9814e-05 | 2.0872e-05 | 0.0 | 1.68 +Other | | 3.326e-05 | | | 2.82 + +Nlocal: 128 ave 135 max 121 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Nghost: 1109 ave 1116 max 1102 min +Histogram: 1 0 0 0 0 0 0 0 0 1 +Neighs: 4852.5 ave 5106 max 4599 min +Histogram: 1 0 0 0 0 0 0 0 0 1 + +Total # of neighbors = 9705 +Ave neighs/atom = 37.9102 +Neighbor list builds = 0 +Dangerous builds not checked +Total wall time: 0:00:00 diff --git a/examples/COUPLE/plugin/simple.c b/examples/COUPLE/plugin/simple.c new file mode 100644 index 0000000000..541e4fd432 --- /dev/null +++ b/examples/COUPLE/plugin/simple.c @@ -0,0 +1,175 @@ +/* ---------------------------------------------------------------------- + LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator + www.cs.sandia.gov/~sjplimp/lammps.html + Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories + + Copyright (2003) Sandia Corporation. Under the terms of Contract + DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains + certain rights in this software. This software is distributed under + the GNU General Public License. + + See the README file in the top-level LAMMPS directory. +------------------------------------------------------------------------- */ + +/* c_driver = simple example of how an umbrella program + can invoke LAMMPS as a library on some subset of procs + Syntax: simpleC P in.lammps + P = # of procs to run LAMMPS on + must be <= # of procs the driver code itself runs on + in.lammps = LAMMPS input script + See README for compilation instructions */ + +#include +#include +#include +#include +#include "liblammpsplugin.h" /* this is the include for the plugin loader */ + +int main(int narg, char **arg) +{ + int n,me,nprocs; + int nprocs_lammps, lammps; + char line[1024]; + MPI_Comm comm_lammps; + FILE *fp = NULL; + liblammpsplugin_t *plugin = NULL; + void *lmp = NULL; + double *x = NULL; + double *v = NULL; + char *strtwo; + char *cmds[2]; + int *type = NULL; + + /* setup MPI and various communicators + driver runs on all procs in MPI_COMM_WORLD + comm_lammps only has 1st P procs (could be all or any subset) */ + + MPI_Init(&narg,&arg); + + if (narg != 4) { + printf("Syntax: simpleC P in.lammps /path/to/liblammps.so\n"); + exit(1); + } + + MPI_Comm_rank(MPI_COMM_WORLD,&me); + MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + + nprocs_lammps = atoi(arg[1]); + if (nprocs_lammps > nprocs) { + if (me == 0) + printf("ERROR: LAMMPS cannot use more procs than available\n"); + MPI_Abort(MPI_COMM_WORLD,1); + } + + if (me < nprocs_lammps) lammps = 1; + else lammps = MPI_UNDEFINED; + MPI_Comm_split(MPI_COMM_WORLD,lammps,0,&comm_lammps); + + /* open LAMMPS input script */ + + if (me == 0) { + fp = fopen(arg[2],"r"); + if (fp == NULL) { + printf("ERROR: Could not open LAMMPS input script\n"); + MPI_Abort(MPI_COMM_WORLD,1); + } + } + + /* run the input script thru LAMMPS one line at a time until end-of-file + driver proc 0 reads a line, Bcasts it to all procs + (could just send it to proc 0 of comm_lammps and let it Bcast) + all LAMMPS procs call lammps_command() on the line */ + + if (lammps == 1) { + plugin = liblammpsplugin_load(arg[3]); + if (plugin == NULL) { + if (me == 0) printf("ERROR: Could not load shared LAMMPS library\n"); + MPI_Abort(MPI_COMM_WORLD,1); + } + } + if (lammps == 1) plugin->open(0,NULL,comm_lammps,&lmp); + + while (1) { + if (me == 0) { + if (fgets(line,1024,fp) == NULL) n = 0; + else n = strlen(line) + 1; + if (n == 0) fclose(fp); + } + MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD); + if (n == 0) break; + MPI_Bcast(line,n,MPI_CHAR,0,MPI_COMM_WORLD); + if (lammps == 1) plugin->command(lmp,line); + } + + /* run 10 more steps + get coords from LAMMPS + change coords of 1st atom + put coords back into LAMMPS + run a single step with changed coords */ + + if (lammps == 1) { + plugin->command(lmp,"run 10"); + + int natoms = plugin->get_natoms(lmp); + x = (double *) malloc(3*natoms*sizeof(double)); + plugin->gather_atoms(lmp,"x",1,3,x); + v = (double *) malloc(3*natoms*sizeof(double)); + plugin->gather_atoms(lmp,"v",1,3,v); + double epsilon = 0.1; + x[0] += epsilon; + plugin->scatter_atoms(lmp,"x",1,3,x); + + plugin->command(lmp,"run 1"); + } + + // extract force on single atom two different ways + + if (lammps == 1) { + double **f = (double **) plugin->extract_atom(lmp,"f"); + printf("Force on 1 atom via extract_atom: %g\n",f[0][0]); + + double *fx = (double *) plugin->extract_variable(lmp,"fx","all"); + printf("Force on 1 atom via extract_variable: %g\n",fx[0]); + } + + /* use commands_string() and commands_list() to invoke more commands */ + + strtwo = (char *)"run 10\nrun 20"; + if (lammps == 1) plugin->commands_string(lmp,strtwo); + + cmds[0] = (char *)"run 10"; + cmds[1] = (char *)"run 20"; + if (lammps == 1) plugin->commands_list(lmp,2,cmds); + + /* delete all atoms + create_atoms() to create new ones with old coords, vels + initial thermo should be same as step 20 */ + + if (lammps == 1) { + int natoms = plugin->get_natoms(lmp); + type = (int *) malloc(natoms*sizeof(double)); + int i; + for (i = 0; i < natoms; i++) type[i] = 1; + + plugin->command(lmp,"delete_atoms group all"); + plugin->create_atoms(lmp,natoms,NULL,type,x,v,NULL,0); + plugin->command(lmp,"run 10"); + } + + if (x) free(x); + if (v) free(v); + if (type) free(type); + + // close down LAMMPS + + if (lammps == 1) { + plugin->close(lmp); + liblammpsplugin_release(plugin); + } + + /* close down MPI */ + + if (lammps == 1) MPI_Comm_free(&comm_lammps); + MPI_Barrier(MPI_COMM_WORLD); + MPI_Finalize(); +} diff --git a/src/library.h b/src/library.h index 9e6c4f45f8..2076371316 100644 --- a/src/library.h +++ b/src/library.h @@ -11,6 +11,9 @@ See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ +#ifndef LAMMPS_LIBRARY_H +#define LAMMPS_LIBRARY_H + /* C or Fortran style library interface to LAMMPS new LAMMPS-specific functions can be added @@ -114,6 +117,7 @@ int lammps_get_last_error_message(void *, char *, int); } #endif +#endif /* LAMMPS_LIBRARY_H */ /* ERROR/WARNING messages: E: Library error: issuing LAMMPS command during run From e87b3a21c2a4704ac69e3628bf028f8deb34c0c7 Mon Sep 17 00:00:00 2001 From: Axel Kohlmeyer Date: Tue, 25 Feb 2020 14:10:55 -0500 Subject: [PATCH 2/2] complete documentation for LAMMPS plugin coupling example --- doc/src/Howto_couple.rst | 2 ++ examples/COUPLE/README | 1 + examples/COUPLE/plugin/README | 39 ++++++++++++++--------------------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/doc/src/Howto_couple.rst b/doc/src/Howto_couple.rst index 849415a0e3..a46bb3f2f2 100644 --- a/doc/src/Howto_couple.rst +++ b/doc/src/Howto_couple.rst @@ -70,6 +70,8 @@ examples/COUPLE/README for more details: * simple: simple driver programs in C++ and C which invoke LAMMPS as a library +* plugin: simple driver program in C which invokes LAMMPS as a plugin + from a shared library. * lammps\_quest: coupling of LAMMPS and `Quest `_, to run classical MD with quantum forces calculated by a density functional code * lammps\_spparks: coupling of LAMMPS and `SPPARKS `_, to couple diff --git a/examples/COUPLE/README b/examples/COUPLE/README index 05b38335fb..5a9f297b0f 100644 --- a/examples/COUPLE/README +++ b/examples/COUPLE/README @@ -27,6 +27,7 @@ These are the sub-directories included in this directory: simple simple example of driver code calling LAMMPS as a lib multiple example of driver code calling multiple instances of LAMMPS +plugin example for loading LAMMPS at runtime from a shared library lammps_mc client/server coupling of Monte Carlo client with LAMMPS server for energy evaluation lammps_nwchem client/server coupling of LAMMPS client with diff --git a/examples/COUPLE/plugin/README b/examples/COUPLE/plugin/README index 7075689d6f..b035ed3c0e 100644 --- a/examples/COUPLE/plugin/README +++ b/examples/COUPLE/plugin/README @@ -1,21 +1,22 @@ -This directory has a simple C code that shows how -LAMMPS can be linked to a driver application as a library. The purpose -is to illustrate how another code could perform computations while -using LAMMPS to perform MD on all or a subset of the processors, or -how an umbrella code or script could call both LAMMPS and some other -code to perform a coupled calculation. +This directory has a simple C code that shows how LAMMPS can be included +in an application as a shared library loaded at runtime. The code is +essentially the same as in the "simple" example that links to LAMMPS +either with a static or shared library. The purpose is to illustrate +how another code could be built without having a LAMMPS library present +and then load the (separately compiled) shared library. simple.c is the C driver liblammpsplugin.c is the LAMMPS library plugin loader -The 3 codes do the same thing, so you can compare them to see how to -drive LAMMPS from each language. See lammps/python/example/simple.py -to do something similar from Python. The Fortran driver requires an -additional wrapper library that interfaces the C interface of the -LAMMPS library to Fortran and also translates the MPI communicator -from Fortran to C. +You can then build the driver executable codes with a compile line +like below. -First build LAMMPS as a library (see examples/COUPLE/README), e.g. +mpicc -c -O -Wall -g -I$HOME/lammps/src liblammpsplugin.c +mpicc -c -O -Wall -g simple.c +mpicc simple.o liblammsplugin.o -ldl -o simpleC + +You also need to build LAMMPS as a shared library +(see examples/COUPLE/README), e.g. cd $HOME/lammps/src make mode=shlib mpi @@ -28,15 +29,6 @@ cd build-shared cmake -D BUILD_LIB=on -D BUILD_SHARED_LIBS=on ../cmake make -You can then build any of the driver codes with compile lines like -these, which include paths to the LAMMPS library interface, and -linking with FFTW (only needed if you built LAMMPS as a library with -its PPPM solver). - -mpicc -c simple.c -mpicc simple.o -llammps -lfftw -o simpleC - - You then run simpleC on a parallel machine on some number of processors Q with 3 arguments: @@ -66,5 +58,6 @@ of LAMMPS through the function pointers in the liblammpsplugin_t struct. This has the benefit that your binary is not linked to liblammps.so directly and thus you can change the name of the shared library (e.g. to have different variants compiled, or to load a different LAMMPS versions without -having to update your executable). +having to update your executable). The shared library still has to be +compatible with the compilation settings the plugin code.