Merge pull request #1820 from ornladios/adios_read_dump

Support read_dump with dump style 'adios'
This commit is contained in:
Axel Kohlmeyer 2020-01-20 16:27:09 -05:00 committed by GitHub
commit f03cfba029
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1342 additions and 57 deletions

View File

@ -46,7 +46,8 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`dimension <dimension>`
* :doc:`displace_atoms <displace_atoms>`
* :doc:`dump <dump>`
* :doc:`dump adios <dump_adios>`
* :doc:`dump atom/adios <dump_adios>`
* :doc:`dump custom/adios <dump_adios>`
* :doc:`dump image <dump_image>`
* :doc:`dump movie <dump_image>`
* :doc:`dump netcdf <dump_netcdf>`
@ -131,11 +132,10 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`units <units>`
* :doc:`variable <variable>`
* :doc:`velocity <velocity>`
* :doc:`write_coeff <write_coeff>`
* :doc:`write_data <write_data>`
* :doc:`write_dump <write_dump>`
* :doc:`write_restart <write_restart>`
*
* :doc:`write\_coeff <write_coeff>`
* :doc:`write\_data <write_data>`
* :doc:`write\_dump <write_dump>`
* :doc:`write\_restart <write_restart>`
*
*
*

View File

@ -1096,8 +1096,9 @@ USER-ADIOS package
**Contents:**
ADIOS is a high-performance I/O library. This package implements the
dump "atom/adios" and dump "custom/adios" commands to write data using
the ADIOS library.
:doc:`dump atom/adios <dump_adios>`, :doc:`dump custom/adios <dump_adios>` and
:doc:`read_dump ... format adios <read_dump>`
commands to write and read data using the ADIOS library.
**Authors:** Norbert Podhorszki (ORNL) from the ADIOS developer team.
@ -1111,6 +1112,10 @@ This package has :ref:`specific installation instructions <user-adios>` on the :
* src/USER-ADIOS/README
* examples/USER/adios
* https://github.com/ornladios/ADIOS2
* :doc:`dump atom/adios <dump_adios>`
* :doc:`dump custom/adios <dump_adios>`
* :doc:`read_dump <read_dump>`
----------

View File

@ -21,8 +21,11 @@ dump command
:doc:`dump movie <dump_image>` command
======================================
:doc:`dump adios <dump_adios>` command
======================================
:doc:`dump atom/adios <dump_adios>` command
===========================================
:doc:`dump custom/adios <dump_adios>` command
=============================================
Syntax
""""""
@ -45,12 +48,12 @@ Syntax
*atom* args = none
*atom/gz* args = none
*atom/mpiio* args = none
*atom/adios* args = none, discussed on :doc:`dump adios <dump_adios>` doc page
*atom/adios* args = none, discussed on :doc:`dump atom/adios <dump_adios>` doc page
*cfg* args = same as *custom* args, see below
*cfg/gz* args = same as *custom* args, see below
*cfg/mpiio* args = same as *custom* args, see below
*custom*\ , *custom/gz*\ , *custom/mpiio* args = see below
*custom/adios* args = same as *custom* args, discussed on :doc:`dump adios <dump_adios>` doc page
*custom/adios* args = same as *custom* args, discussed on :doc:`dump custom/adios <dump_adios>` doc page
*dcd* args = none
*h5md* args = discussed on :doc:`dump h5md <dump_h5md>` doc page
*image* args = discussed on :doc:`dump image <dump_image>` doc page
@ -713,7 +716,8 @@ LAMMPS was built with that package. See the :doc:`Build package <Build_package>
Related commands
""""""""""""""""
:doc:`dump adios <dump_adios>` :doc:`dump h5md <dump_h5md>`, :doc:`dump image <dump_image>`,
:doc:`dump atom/adios <dump_adios>`, :doc:`dump custom/adios <dump_adios>`,
:doc:`dump h5md <dump_h5md>`, :doc:`dump image <dump_image>`,
:doc:`dump molfile <dump_molfile>`, :doc:`dump\_modify <dump_modify>`,
:doc:`undump <undump>`

View File

@ -1,6 +1,7 @@
.. index:: dump atoms/adios
.. index:: dump atom/adios
.. index:: dump custom/adios
dump atoms/adios command
dump atom/adios command
=========================
dump custom/adios command
@ -12,7 +13,7 @@ Syntax
.. parsed-literal::
dump ID group-ID atoms/adios N file.bp
dump ID group-ID atom/adios N file.bp
dump ID group-ID custom/adios N file.bp args

View File

@ -43,6 +43,9 @@ Syntax
*format* values = format of dump file, must be last keyword if used
*native* = native LAMMPS dump file
*xyz* = XYZ file
*adios* [*timeout* value] = dump file written by the :doc:`dump adios <dump_adios>` command
*timeout* = specify waiting time for the arrival of the timestep when running concurrently.
The value is a float number and is interpreted in seconds.
*molfile* style path = VMD molfile plugin interface
style = *dcd* or *xyz* or others supported by molfile plugins
path = optional path for location of molfile plugins
@ -65,6 +68,8 @@ Examples
read_dump dump.xyz 10 x y z box no format molfile xyz ../plugins
read_dump dump.dcd 0 x y z format molfile dcd
read_dump dump.file 1000 x y z vx vy vz format molfile lammpstrj /usr/local/lib/vmd/plugins/LINUXAMD64/plugins/molfile
read_dump dump.bp 5000 x y z vx vy vz format adios
read_dump dump.bp 5000 x y z vx vy vz format adios timeout 60.0
Description
"""""""""""
@ -136,6 +141,17 @@ contain multiple directories separated by a colon (or semi-colon on
windows). The *path* keyword is optional and defaults to ".",
i.e. the current directory.
The *adios* format supports reading data that was written by the
:doc:`dump adios <dump_adios>` command. The
entire dump is read in parallel across all the processes, dividing
the atoms evenly amongs the processes. The number of writers that
has written the dump file does not matter. Using the adios style for
dump and read_dump is a convenient way to dump all atoms from *N*
writers and read it back by *M* readers. If one is running two
LAMMPS instances concurrently where one dumps data and the other is
reading it with the rerun command, the timeout option can be specified
to wait on the reader side for the arrival of the requested step.
Support for other dump format readers may be added in the future.
@ -147,7 +163,19 @@ and box information.
The dump file is scanned for a snapshot with a timestamp that matches
the specified *Nstep*\ . This means the LAMMPS timestep the dump file
snapshot was written on for the *native* format. Note that the *xyz*
snapshot was written on for the *native* or *adios* formats.
The list of timestamps available in an adios .bp file is stored in the
variable *ntimestep*:
.. parsed-literal::
$ bpls dump.bp -d ntimestep
uint64_t ntimestep 5*scalar
(0) 0 50 100 150 200
Note that the *xyz*
and *molfile* formats do not store the timestep. For these formats,
timesteps are numbered logically, in a sequential manner, starting
from 0. Thus to access the 10th snapshot in an *xyz* or *mofile*
@ -160,7 +188,8 @@ and the current simulation box is orthogonal or vice versa. A warning
will be generated if the snapshot box boundary conditions (periodic,
shrink-wrapped, etc) do not match the current simulation boundary
conditions, but the boundary condition information in the snapshot is
otherwise ignored. See the "boundary" command for more details.
otherwise ignored. See the "boundary" command for more details. The
*adios* reader does the same as the *native* format reader.
For the *xyz* format, no information about the box is available, so
you must set the *box* flag to *no*\ . See details below.
@ -231,6 +260,18 @@ consistent from snapshot to snapshot in the molfile dump file.
See the :doc:`dump\_modify sort <dump_modify>` command if the dump file
was written by LAMMPS.
The *adios* format supports all fields that the *native* format supports
except for the *q* charge field.
The list of fields stored in an adios .bp file is recorded in the attributes
*columns* (array of short strings) and *columnstr* (space-separated single string).
.. parsed-literal::
$ bpls -la dump.bp column*
string columns attr = {"id", "type", "x", "y", "z", "vx", "vy", "vz"}
string columnstr attr = "id type x y z vx vy vz "
----------
@ -357,10 +398,14 @@ The *molfile* dump file formats are part of the USER-MOLFILE package.
They are only enabled if LAMMPS was built with that packages. See the
:doc:`Build package <Build_package>` doc page for more info.
To write and read adios .bp files, you must compile LAMMPS with the
:ref:`USER-ADIOS <PKG-USER-ADIOS>` package.
Related commands
""""""""""""""""
:doc:`dump <dump>`, :doc:`dump molfile <dump_molfile>`,
:doc:`dump adios <dump_adios>`,
:doc:`read\_data <read_data>`, :doc:`read\_restart <read_restart>`,
:doc:`rerun <rerun>`

View File

@ -44,6 +44,8 @@ Examples
rerun dump.vels dump x y z vx vy vz box yes format molfile lammpstrj
rerun dump.dcd dump x y z box no format molfile dcd
rerun ../run7/dump.file.gz skip 2 dump x y z box yes
rerun dump.bp dump x y z box no format adios
rerun dump.bp dump x y z vx vy vz format adios timeout 10.0
Description
"""""""""""

View File

@ -3,14 +3,14 @@
<!-- example engines
<engine type="BPFile">
<engine type="BP4">
<parameter key="substreams" value="10"/>
</engine>
<engine type="HDF5">
The 'substreams' parameter in BPFile controls how many
The 'substreams' parameter in BP4 controls how many
files on disk are created. This number should be proportional
to the number of servers in the parallel file system,
NOT to the number of processes.
@ -22,7 +22,7 @@
====================================================-->
<io name="atom">
<engine type="BPFile">
<engine type="BP4">
<parameter key="substreams" value="1"/>
</engine>
</io>
@ -32,7 +32,7 @@
====================================================-->
<io name="custom">
<engine type="BPFile">
<engine type="BP4">
<parameter key="substreams" value="1"/>
</engine>
</io>

View File

@ -39,8 +39,9 @@ fix 10 all balance 50 0.9 rcb
compute 1 all property/atom proc
variable p atom c_1%10
dump 2 all custom 50 balance.dump id v_p x y z
dump 3 all custom/adios 50 balance_custom.bp id v_p x y z
dump 5 all atom 50 balance_atom.dump
dump 2 all custom 50 balance_custom.dump id type x y z vx vy vz ix iy iz fx fy fz
dump 3 all custom/adios 50 balance_custom.bp id type x y z vx vy vz ix iy iz fx fy fz
dump 4 all atom/adios 50 balance_atom.bp
#dump 3 all image 50 image.*.jpg v_p type &

View File

@ -0,0 +1,64 @@
# 2d circle of particles inside a box with LJ walls
variable b index 0
variable x index 50
variable y index 20
variable d index 20
variable v index 5
variable w index 2
units lj
dimension 2
atom_style atomic
boundary f f p
lattice hex 0.85
region box block 0 $x 0 $y -0.5 0.5
create_box 1 box
region circle sphere $(v_d/2+1) $(v_d/2/sqrt(3.0)+1) 0.0 $(v_d/2)
create_atoms 1 region circle
mass 1 1.0
velocity all create 0.5 87287 loop geom
velocity all set $v $w 0 sum yes
pair_style lj/cut 2.5
pair_coeff 1 1 10.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
fix 2 all wall/lj93 xlo 0.0 1 1 2.5 xhi $x 1 1 2.5
fix 3 all wall/lj93 ylo 0.0 1 1 2.5 yhi $y 1 1 2.5
comm_style tiled
fix 10 all balance 50 0.9 rcb
## read_dump examples using native or adios dump readers
#read_dump balance_custom.dump 200 x y
#read_dump balance_custom.dump 200 x y vx vy ix iy fx fy
#read_dump balance_atom.bp 200 x y format adios
read_dump balance_custom.bp 200 x y vx vy fx fy ix iy format adios
compute 1 all property/atom proc
variable p atom c_1%10
dump 2 all custom 50 balance2.dump id v_p x y z
dump 3 all custom/adios 50 balance_custom2.bp id v_p x y z
dump 4 all atom/adios 50 balance_atom2.bp
#dump 3 all image 50 image.*.jpg v_p type &
# adiam 1.0 view 0 0 zoom 1.8 subbox yes 0.02
#variable colors string &
# "red green blue yellow white &
# purple pink orange lime gray"
#dump_modify 3 pad 5 amap 0 10 sa 1 10 ${colors}
thermo_style custom step temp epair press f_10[3] f_10
thermo 100
run 200
write_dump all atom/adios balance_atom_final2.bp
write_dump all atom balance_atom_final2.dump

View File

@ -0,0 +1,51 @@
Examples of how to use the rerun and read_dump commands with ADIOS
in.first - run on any number of procs for any size problem
in.rerun - run on same or different proc count for same size problem
in.read_dump - ditto to in.rerun
The thermo output on the same timesteps should be identical exactly,
or to within compression accuracy errors if compression is used.
$ grep "^ *[2468]00 " log.20Jan20.first.g++.7.4
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
$ grep "^ *[2468]00 " log.20Jan20.rerun.g++.7.4
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
$ grep "^ *[2468]00 " log.20Jan20.read_dump.g++.7.4 | sort -n
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
To dump the atom table from the lj_dump.bp, use the ADIOS bpls utility
$ bpls -l lj_dump.bp -d atoms -n 8 --format "%g" | less -S
double atoms 11*{32000, 8} = -3.97451 / 32000
( 0, 0,0) 1 1 0 0 0 -0.145828 -0.669946 -2.01722
( 0, 1,0) 2 1 0.839798 0.839798 0 -1.2587 1.4644 1.58791
( 0, 2,0) 3 1 0.839798 0 0.839798 -1.26427 -0.661341 1.11072
...
(10,31997,0) 32000 1 31.7633 32.7835 33.2632 -0.32891 0.00446725 0.478541
(10,31998,0) 31994 1 31.1663 32.7753 32.3628 -1.45085 0.668693 0.339836
(10,31999,0) 31924 1 32.8007 32.8736 32.5882 -0.980419 -0.237448 -1.21369
Concurrent rerun use case
=========================
The ADIOS BP4 engine allows for reading from the dump file (completed steps)
while the writer is still running (and dumping new steps). In two terminals,
one can run the in.first and in.rerun examples concurrently. The second
simulation will blocking wait for the steps from the first up to the timeout
specified in the rerun command.
$ mpirun -n 2 lmp -in in.first
$ mpirun -n 4 lmp -in in.rerun

View File

@ -0,0 +1,62 @@
<?xml version="1.0"?>
<adios-config>
<!-- example engines
<engine type="BP4">
<parameter key="substreams" value="10"/>
</engine>
<engine type="HDF5">
The 'substreams' parameter in BP4 controls how many
files on disk are created. This number should be proportional
to the number of servers in the parallel file system,
NOT to the number of processes.
substreams=1 is generally a very inefficient setting for large parallel runs.
To compress the atoms table, add a <variable...> element to <io>
Note, outside the <engine> element!
Supported compression methods are:
lossless: bzip2, blosc
lossy: sz, zfp, mgard
ADIOS must be built with the corresponding compression library.
<variable name="atoms">
<operation type="sz">
<parameter key="accuracy" value="0.001"/>
</operation>
</variable>
-->
<!--====================================================
Configuration for the dump atom/adios command
====================================================-->
<io name="atom">
<engine type="BP4">
<parameter key="substreams" value="1"/>
</engine>
</io>
<!--====================================================
Configuration for the dump custom/adios command
====================================================-->
<io name="custom">
<engine type="BP4">
<parameter key="substreams" value="1"/>
</engine>
</io>
<!--====================================================
Configuration for the read_dump adios command
====================================================-->
<io name="read_dump">
<engine type="BP4">
</engine>
</io>
</adios-config>

View File

@ -0,0 +1,34 @@
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable yy equal 20*$y
variable zz equal 20*$z
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 ${xx} 0 ${yy} 0 ${zz}
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
#dump 1 all custom 100 lj.dump id type x y z vx vy vz
dump 1 all custom/adios 100 lj_dump.bp id type x y z vx vy vz
thermo 100
run 1000

View File

@ -0,0 +1,37 @@
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable yy equal 20*$y
variable zz equal 20*$z
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 ${xx} 0 ${yy} 0 ${zz}
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
thermo 100
read_dump lj_dump.bp 200 x y z vx vy vz format adios
run 0 post no
read_dump lj_dump.bp 800 x y z vx vy vz format adios
run 0 post no
read_dump lj_dump.bp 600 x y z vx vy vz format adios
run 0 post no
read_dump lj_dump.bp 400 x y z vx vy vz format adios
run 0 post no

View File

@ -0,0 +1,29 @@
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable yy equal 20*$y
variable zz equal 20*$z
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 ${xx} 0 ${yy} 0 ${zz}
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
thermo 100
rerun lj_dump.bp first 200 last 800 every 200 &
dump x y z vx vy vz format adios timeout 10.0

View File

@ -0,0 +1,100 @@
LAMMPS (09 Jan 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable xx equal 20*1
variable yy equal 20*$y
variable yy equal 20*1
variable zz equal 20*$z
variable zz equal 20*1
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 ${xx} 0 ${yy} 0 ${zz}
region box block 0 20 0 ${yy} 0 ${zz}
region box block 0 20 0 20 0 ${zz}
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
create_atoms CPU = 0.00244845 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
#dump 1 all custom 100 lj.dump id type x y z vx vy vz
dump 1 all custom/adios 100 lj_dump.bp id type x y z vx vy vz
thermo 100
run 1000
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 24 24 24
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.869 | 6.869 | 6.869 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6134356 -5.0197073
100 0.7574531 -5.7585055 0 -4.6223613 0.20726105
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
300 0.74624286 -5.741962 0 -4.6226327 0.32016436
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
500 0.73249345 -5.7206946 0 -4.6219887 0.44253023
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
700 0.71489947 -5.693532 0 -4.6212164 0.61322381
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
900 0.70799522 -5.6828388 0 -4.6208791 0.66961272
1000 0.70325878 -5.6750833 0 -4.6202281 0.7112575
Loop time of 15.1503 on 4 procs for 1000 steps with 32000 atoms
Performance: 28514.269 tau/day, 66.005 timesteps/s
99.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 12.936 | 13.166 | 13.291 | 3.7 | 86.90
Neigh | 1.1655 | 1.1802 | 1.1882 | 0.8 | 7.79
Comm | 0.37464 | 0.50485 | 0.75152 | 20.5 | 3.33
Output | 0.022504 | 0.022534 | 0.022602 | 0.0 | 0.15
Modify | 0.24633 | 0.24883 | 0.2527 | 0.5 | 1.64
Other | | 0.02831 | | | 0.19
Nlocal: 8000 ave 8049 max 7942 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Nghost: 8632.5 ave 8685 max 8591 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 299934 ave 303105 max 295137 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Total # of neighbors = 1199738
Ave neighs/atom = 37.4918
Neighbor list builds = 50
Dangerous builds not checked
Total wall time: 0:00:15

View File

@ -0,0 +1,120 @@
LAMMPS (09 Jan 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable xx equal 20*1
variable yy equal 20*$y
variable yy equal 20*1
variable zz equal 20*$z
variable zz equal 20*1
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 ${xx} 0 ${yy} 0 ${zz}
region box block 0 20 0 ${yy} 0 ${zz}
region box block 0 20 0 20 0 ${zz}
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
create_atoms CPU = 0.00378291 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
thermo 100
read_dump lj_dump.bp 200 x y z vx vy vz format adios
orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
32000 atoms before read
32000 atoms in snapshot
0 atoms purged
32000 atoms replaced
0 atoms trimmed
0 atoms added
32000 atoms after read
run 0 post no
WARNING: No fixes defined, atoms won't move (src/verlet.cpp:52)
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 24 24 24
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.841 | 6.843 | 6.846 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
Loop time of 2.35651e-06 on 4 procs for 0 steps with 32000 atoms
read_dump lj_dump.bp 800 x y z vx vy vz format adios
orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
32000 atoms before read
32000 atoms in snapshot
0 atoms purged
32000 atoms replaced
0 atoms trimmed
0 atoms added
32000 atoms after read
run 0 post no
WARNING: No fixes defined, atoms won't move (src/verlet.cpp:52)
Per MPI rank memory allocation (min/avg/max) = 6.841 | 6.843 | 6.846 Mbytes
Step Temp E_pair E_mol TotEng Press
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
Loop time of 2.1345e-06 on 4 procs for 0 steps with 32000 atoms
read_dump lj_dump.bp 600 x y z vx vy vz format adios
orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
32000 atoms before read
32000 atoms in snapshot
0 atoms purged
32000 atoms replaced
0 atoms trimmed
0 atoms added
32000 atoms after read
run 0 post no
WARNING: No fixes defined, atoms won't move (src/verlet.cpp:52)
Per MPI rank memory allocation (min/avg/max) = 6.841 | 6.843 | 6.846 Mbytes
Step Temp E_pair E_mol TotEng Press
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
Loop time of 2.58526e-06 on 4 procs for 0 steps with 32000 atoms
read_dump lj_dump.bp 400 x y z vx vy vz format adios
orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
32000 atoms before read
32000 atoms in snapshot
0 atoms purged
32000 atoms replaced
0 atoms trimmed
0 atoms added
32000 atoms after read
run 0 post no
WARNING: No fixes defined, atoms won't move (src/verlet.cpp:52)
Per MPI rank memory allocation (min/avg/max) = 6.841 | 6.843 | 6.846 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
Loop time of 2.05977e-06 on 4 procs for 0 steps with 32000 atoms
Total wall time: 0:00:00

View File

@ -0,0 +1,88 @@
LAMMPS (09 Jan 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# 3d Lennard-Jones melt
variable x index 1
variable y index 1
variable z index 1
variable xx equal 20*$x
variable xx equal 20*1
variable yy equal 20*$y
variable yy equal 20*1
variable zz equal 20*$z
variable zz equal 20*1
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 ${xx} 0 ${yy} 0 ${zz}
region box block 0 20 0 ${yy} 0 ${zz}
region box block 0 20 0 20 0 ${zz}
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
create_atoms CPU = 0.00374608 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
thermo 100
rerun lj_dump.bp first 200 last 800 every 200 dump x y z vx vy vz format adios
WARNING: No fixes defined, atoms won't move (src/verlet.cpp:52)
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 24 24 24
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.716 | 6.718 | 6.721 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.75953175 -5.7618892 0 -4.6226272 0.20910575
400 0.74155675 -5.7343359 0 -4.6220356 0.3777989
600 0.72087255 -5.7029314 0 -4.6216563 0.55730354
800 0.70876958 -5.6840594 0 -4.6209382 0.66822293
Loop time of 0.236006 on 4 procs for 4 steps with 32000 atoms
Performance: 7321.841 tau/day, 16.949 timesteps/s
94.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.236 | | |100.00
Nlocal: 8000 ave 8073 max 7933 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Nghost: 8693.25 ave 8731 max 8658 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Neighs: 299786 ave 302951 max 293883 min
Histogram: 1 0 0 0 0 0 0 1 1 1
Total # of neighbors = 1199144
Ave neighs/atom = 37.4733
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,6 +1,6 @@
This package provides the adios dump and restart styles.
This package provides the adios dump and read_dump styles.
See the doc page for the "dump adios" and "restart adios" commands.
See the doc page for the "dump adios" and "read_dump ... format adios" commands.
These styles require having ADIOS 2.x itself installed on your system.
Configure LAMMPS with CMake

View File

@ -16,7 +16,6 @@
------------------------------------------------------------------------- */
#include "dump_atom_adios.h"
#include <cstring>
#include "atom.h"
#include "domain.h"
#include "error.h"
@ -24,6 +23,7 @@
#include "memory.h"
#include "universe.h"
#include "update.h"
#include <cstring>
#include "adios2.h"
@ -50,7 +50,7 @@ public:
// one ADIOS output variable we need to change every step
adios2::Variable<double> varAtoms;
};
}
} // namespace LAMMPS_NS
/* ---------------------------------------------------------------------- */
@ -58,8 +58,15 @@ DumpAtomADIOS::DumpAtomADIOS(LAMMPS *lmp, int narg, char **arg)
: DumpAtom(lmp, narg, arg)
{
internal = new DumpAtomADIOSInternal();
internal->ad =
new adios2::ADIOS("adios2_config.xml", world, adios2::DebugON);
try {
internal->ad =
new adios2::ADIOS("adios2_config.xml", world, adios2::DebugON);
} catch (std::ios_base::failure &e) {
char str[256];
snprintf(str, sizeof(str), "ADIOS initialization failed with error: %s",
e.what());
error->one(FLERR, str);
}
}
/* ---------------------------------------------------------------------- */
@ -248,14 +255,21 @@ void DumpAtomADIOS::init_style()
// setup column string
if (scale_flag == 0 && image_flag == 0)
std::vector<std::string> columnNames;
if (scale_flag == 0 && image_flag == 0) {
columns = (char *)"id type x y z";
else if (scale_flag == 0 && image_flag == 1)
columnNames = {"id", "type", "x", "y", "z"};
} else if (scale_flag == 0 && image_flag == 1) {
columns = (char *)"id type x y z ix iy iz";
else if (scale_flag == 1 && image_flag == 0)
columnNames = {"id", "type", "x", "y", "z", "ix", "iy", "iz"};
} else if (scale_flag == 1 && image_flag == 0) {
columns = (char *)"id type xs ys zs";
else if (scale_flag == 1 && image_flag == 1)
columnNames = {"id", "type", "xs", "ys", "zs"};
} else if (scale_flag == 1 && image_flag == 1) {
columns = (char *)"id type xs ys zs ix iy iz";
columnNames = {"id", "type", "xs", "ys", "zs", "ix", "iy", "iz"};
}
// setup function ptrs
@ -316,7 +330,10 @@ void DumpAtomADIOS::init_style()
int *boundaryptr = reinterpret_cast<int *>(domain->boundary);
internal->io.DefineAttribute<int>("boundary", boundaryptr, 6);
internal->io.DefineAttribute<std::string>("columns", columns);
size_t nColumns = static_cast<size_t>(size_one);
internal->io.DefineAttribute<std::string>("columns", columnNames.data(),
nColumns);
internal->io.DefineAttribute<std::string>("columnstr", columns);
internal->io.DefineAttribute<std::string>("boundarystr", boundstr);
internal->io.DefineAttribute<std::string>("LAMMPS/dump_style", "atom");
internal->io.DefineAttribute<std::string>("LAMMPS/version",
@ -332,7 +349,6 @@ void DumpAtomADIOS::init_style()
// atom table size is not known at the moment
// it will be correctly defined at the moment of write
size_t UnknownSizeYet = 1;
size_t nColumns = static_cast<size_t>(size_one);
internal->varAtoms = internal->io.DefineVariable<double>(
"atoms", {UnknownSizeYet, nColumns}, {UnknownSizeYet, 0},
{UnknownSizeYet, nColumns});

View File

@ -16,8 +16,6 @@
------------------------------------------------------------------------- */
#include "dump_custom_adios.h"
#include <cmath>
#include <cstring>
#include "atom.h"
#include "compute.h"
#include "domain.h"
@ -32,6 +30,8 @@
#include "universe.h"
#include "update.h"
#include "variable.h"
#include <cmath>
#include <cstring>
#include "adios2.h"
@ -124,7 +124,7 @@ public:
// (individual list of 'columns' string)
std::vector<std::string> columnNames;
};
}
} // namespace LAMMPS_NS
/* ---------------------------------------------------------------------- */
@ -132,8 +132,15 @@ DumpCustomADIOS::DumpCustomADIOS(LAMMPS *lmp, int narg, char **arg)
: DumpCustom(lmp, narg, arg)
{
internal = new DumpCustomADIOSInternal();
internal->ad =
new adios2::ADIOS("adios2_config.xml", world, adios2::DebugON);
try {
internal->ad =
new adios2::ADIOS("adios2_config.xml", world, adios2::DebugON);
} catch (std::ios_base::failure &e) {
char str[256];
snprintf(str, sizeof(str), "ADIOS initialization failed with error: %s",
e.what());
error->one(FLERR, str);
}
// if (screen) fprintf(screen, "DumpCustomADIOS constructor: nvariable=%d
// id_variable=%p, variables=%p, nfield=%d, earg=%p\n", nvariable,
@ -415,7 +422,7 @@ void DumpCustomADIOS::init_style()
"columns", internal->columnNames.data(), nColumns);
internal->io.DefineAttribute<std::string>("columnstr", columns);
internal->io.DefineAttribute<std::string>("boundarystr", boundstr);
internal->io.DefineAttribute<std::string>("LAMMPS/dump_style", "atom");
internal->io.DefineAttribute<std::string>("LAMMPS/dump_style", "custom");
internal->io.DefineAttribute<std::string>("LAMMPS/version",
universe->version);
internal->io.DefineAttribute<std::string>("LAMMPS/num_ver",

View File

@ -0,0 +1,527 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Norbert Podhorszki (Oak Ridge National Laboratory)
------------------------------------------------------------------------- */
#include "reader_adios.h"
#include "comm.h"
#include "error.h"
#include "memory.h"
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "adios2.h"
#include "math_const.h"
using namespace LAMMPS_NS;
using namespace MathConst;
// also in read_dump.cpp
enum { ID, TYPE, X, Y, Z, VX, VY, VZ, Q, IX, IY, IZ, FX, FY, FZ };
enum { UNSET, NOSCALE_NOWRAP, NOSCALE_WRAP, SCALE_NOWRAP, SCALE_WRAP };
#define SMALL 1.0e-6
// true if the difference between two floats is "small".
// cannot use fabsf() since it is not fully portable.
static bool is_smalldiff(const float &val1, const float &val2)
{
return (fabs(static_cast<double>(val1 - val2)) < SMALL);
}
namespace LAMMPS_NS
{
class ReadADIOSInternal
{
public:
ReadADIOSInternal(){};
~ReadADIOSInternal() = default;
// name of adios group, referrable in adios2_config.xml
const std::string ioName = "read_dump";
adios2::ADIOS *ad = nullptr; // adios object
adios2::IO io; // adios group of variables and attributes in this dump
adios2::Engine fh; // adios file/stream handle object
// ADIOS input variables we need to change every step
adios2::Variable<uint64_t> varNtimestep;
adios2::Variable<uint64_t> varNatoms;
adios2::Variable<double> varAtoms;
// list of column names for the atom table
// (individual list of 'columns' string)
std::vector<std::string> columnNames;
float timeout = 0.0;
};
} // namespace LAMMPS_NS
/* ---------------------------------------------------------------------- */
ReaderADIOS::ReaderADIOS(LAMMPS *lmp) : Reader(lmp)
{
fieldindex = NULL;
nAtoms = 0;
nAtomsTotal = 0;
atomOffset = 0;
nstep = 0;
nid = 0;
me = comm->me;
internal = new ReadADIOSInternal();
try {
internal->ad =
new adios2::ADIOS("adios2_config.xml", world, adios2::DebugON);
} catch (std::ios_base::failure &e) {
char str[256];
snprintf(str, sizeof(str), "ADIOS initialization failed with error: %s",
e.what());
error->one(FLERR, str);
}
/* Define the group holding all variables and attributes */
internal->io = internal->ad->DeclareIO(internal->ioName);
}
/* ---------------------------------------------------------------------- */
ReaderADIOS::~ReaderADIOS()
{
if (me == 0) {
memory->destroy(fieldindex);
}
internal->columnNames.clear();
if (internal->fh) {
internal->fh.Close();
}
delete internal->ad;
delete internal;
}
/* ----------------------------------------------------------------------
pass on settings to find and load the proper plugin
Called by all processors.
------------------------------------------------------------------------- */
void ReaderADIOS::settings(int narg, char **arg)
{
int idx = 0;
while (idx < narg) {
if (!strcmp(arg[idx], "timeout")) {
if (idx + 1 < narg) {
internal->timeout = std::stof(arg[idx + 1]);
internal->io.SetParameter("OpenTimeoutSecs", arg[idx + 1]);
++idx;
} else {
char str[128];
snprintf(str, sizeof(str),
"Missing value for 'timeout' option for ADIOS "
"read_dump command");
error->one(FLERR, str);
}
}
++idx;
}
}
/* ----------------------------------------------------------------------
try to open given file
Every process must call this Collective operation
------------------------------------------------------------------------- */
void ReaderADIOS::open_file(const char *file)
{
int rv;
char str[1024];
// close open file, if needed.
if (internal->fh)
internal->fh.Close();
try {
internal->fh = internal->io.Open(file, adios2::Mode::Read, world);
} catch (std::ios_base::failure &e) {
char str[256];
snprintf(str, sizeof(str), "%s", e.what());
error->one(FLERR, str);
}
if (!internal->fh) {
snprintf(str, sizeof(str), "Cannot open file %s using ADIOS", file);
error->one(FLERR, str);
}
}
/* ----------------------------------------------------------------------
close current file
Every process must call this Collective operation
------------------------------------------------------------------------- */
void ReaderADIOS::close_file()
{
// close open file, if needed.
if (internal->fh) {
internal->fh.Close();
}
}
/* ----------------------------------------------------------------------
read and return time stamp from dump file
if first read reaches end-of-file, return 1 so caller can open next file
Called by all processors.
------------------------------------------------------------------------- */
int ReaderADIOS::read_time(bigint &ntimestep)
{
char str[1024];
adios2::StepStatus status =
internal->fh.BeginStep(adios2::StepMode::Read, internal->timeout);
switch (status) {
case adios2::StepStatus::EndOfStream:
case adios2::StepStatus::NotReady:
case adios2::StepStatus::OtherError:
return 1;
default:
break;
}
internal->varNtimestep =
internal->io.InquireVariable<uint64_t>("ntimestep");
if (!internal->varNtimestep) {
snprintf(str, sizeof(str),
"Did not find 'ntimestep' variable in ADIOS file %s",
internal->fh.Name().c_str());
error->one(FLERR, str);
}
ntimestep = static_cast<bigint>(internal->varNtimestep.Max());
// std::cerr << " **** ReaderADIOS::read_time found step " << ntimestep
// << " **** " << std::endl;
return 0;
}
/* ----------------------------------------------------------------------
skip snapshot for given timestamp
Called by all processors.
------------------------------------------------------------------------- */
void ReaderADIOS::skip() { internal->fh.EndStep(); }
/* ----------------------------------------------------------------------
read remaining header info:
return natoms
box bounds, triclinic (inferred), fieldflag (1 if any fields not found),
xyz flags = from input scaleflag & wrapflag
if fieldflag set:
match Nfield fields to per-atom column labels
allocate and set fieldindex = which column each field maps to
fieldtype = X,VX,IZ etc
fieldlabel = user-specified label or NULL if use fieldtype default
xyz flag = scaledflag if has fieldlabel name, else set by x,xs,xu,xsu
only called by proc 0
------------------------------------------------------------------------- */
bigint ReaderADIOS::read_header(double box[3][3], int &boxinfo, int &triclinic,
int fieldinfo, int nfield, int *fieldtype,
char **fieldlabel, int scaleflag, int wrapflag,
int &fieldflag, int &xflag, int &yflag,
int &zflag)
{
char str[1024];
nid = 0;
// signal that we have no box info at all so far.
internal->varNatoms = internal->io.InquireVariable<uint64_t>("natoms");
if (!internal->varNatoms) {
snprintf(str, sizeof(str),
"Did not find 'natoms' variable in ADIOS file %s",
internal->fh.Name().c_str());
error->one(FLERR, str);
}
/* nAtoms */
nAtomsTotal = internal->varNatoms.Max();
uint64_t rem = nAtomsTotal % comm->nprocs;
nAtoms = nAtomsTotal / comm->nprocs;
atomOffset = comm->me * nAtoms;
if (comm->me < rem) {
++nAtoms;
atomOffset += comm->me;
} else {
atomOffset += rem;
}
/* triclinic */
adios2::Attribute<int32_t> attTriclinic =
internal->io.InquireAttribute<int32_t>("triclinic");
if (!attTriclinic) {
snprintf(str, sizeof(str),
"Did not find 'triclinic' attribute in ADIOS file %s",
internal->fh.Name().c_str());
error->one(FLERR, str);
}
triclinic = attTriclinic.Data()[0];
/* read Box */
adios2::Variable<double> varBoxxlo =
internal->io.InquireVariable<double>("boxxlo");
adios2::Variable<double> varBoxxhi =
internal->io.InquireVariable<double>("boxxhi");
adios2::Variable<double> varBoxylo =
internal->io.InquireVariable<double>("boxylo");
adios2::Variable<double> varBoxyhi =
internal->io.InquireVariable<double>("boxyhi");
adios2::Variable<double> varBoxzlo =
internal->io.InquireVariable<double>("boxzlo");
adios2::Variable<double> varBoxzhi =
internal->io.InquireVariable<double>("boxzhi");
box[0][0] = varBoxxlo.Max();
box[0][1] = varBoxxhi.Max();
box[0][2] = 0.0;
box[1][0] = varBoxylo.Max();
box[1][1] = varBoxyhi.Max();
box[1][2] = 0.0;
box[2][0] = varBoxzlo.Max();
box[2][1] = varBoxzhi.Max();
box[2][2] = 0.0;
if (triclinic) {
adios2::Variable<double> varBoxxy =
internal->io.InquireVariable<double>("boxxy");
adios2::Variable<double> varBoxxz =
internal->io.InquireVariable<double>("boxxz");
adios2::Variable<double> varBoxyz =
internal->io.InquireVariable<double>("boxyz");
box[0][2] = varBoxxy.Max();
box[1][2] = varBoxxz.Max();
box[2][2] = varBoxyz.Max();
}
boxinfo = 1;
// if no field info requested, just return
if (!fieldinfo)
return nAtoms;
memory->create(fieldindex, nfield, "read_dump:fieldindex");
/* Columns */
adios2::Attribute<std::string> attColumns =
internal->io.InquireAttribute<std::string>("columns");
std::vector<std::string> labelVector = attColumns.Data();
int nwords = labelVector.size();
std::map<std::string, int> labels;
for (int i = 0; i < nwords; ++i) {
labels.emplace(labelVector[i], i);
}
int s_index, u_index, su_index;
xflag = UNSET;
yflag = UNSET;
zflag = UNSET;
// copy fieldtype list for supported fields
for (int i = 0; i < nfield; i++) {
if (fieldlabel[i]) {
fieldindex[i] = find_label(fieldlabel[i], labels);
if (fieldtype[i] == X)
xflag = 2 * scaleflag + wrapflag + 1;
else if (fieldtype[i] == Y)
yflag = 2 * scaleflag + wrapflag + 1;
else if (fieldtype[i] == Z)
zflag = 2 * scaleflag + wrapflag + 1;
}
else if (fieldtype[i] == ID)
fieldindex[i] = find_label("id", labels);
else if (fieldtype[i] == TYPE)
fieldindex[i] = find_label("type", labels);
else if (fieldtype[i] == X) {
fieldindex[i] = find_label("x", labels);
xflag = NOSCALE_WRAP;
if (fieldindex[i] < 0) {
fieldindex[i] = nwords;
s_index = find_label("xs", labels);
u_index = find_label("xu", labels);
su_index = find_label("xsu", labels);
if (s_index >= 0 && s_index < fieldindex[i]) {
fieldindex[i] = s_index;
xflag = SCALE_WRAP;
}
if (u_index >= 0 && u_index < fieldindex[i]) {
fieldindex[i] = u_index;
xflag = NOSCALE_NOWRAP;
}
if (su_index >= 0 && su_index < fieldindex[i]) {
fieldindex[i] = su_index;
xflag = SCALE_NOWRAP;
}
}
if (fieldindex[i] == nwords)
fieldindex[i] = -1;
} else if (fieldtype[i] == Y) {
fieldindex[i] = find_label("y", labels);
yflag = NOSCALE_WRAP;
if (fieldindex[i] < 0) {
fieldindex[i] = nwords;
s_index = find_label("ys", labels);
u_index = find_label("yu", labels);
su_index = find_label("ysu", labels);
if (s_index >= 0 && s_index < fieldindex[i]) {
fieldindex[i] = s_index;
yflag = SCALE_WRAP;
}
if (u_index >= 0 && u_index < fieldindex[i]) {
fieldindex[i] = u_index;
yflag = NOSCALE_NOWRAP;
}
if (su_index >= 0 && su_index < fieldindex[i]) {
fieldindex[i] = su_index;
yflag = SCALE_NOWRAP;
}
}
if (fieldindex[i] == nwords)
fieldindex[i] = -1;
} else if (fieldtype[i] == Z) {
fieldindex[i] = find_label("z", labels);
zflag = NOSCALE_WRAP;
if (fieldindex[i] < 0) {
fieldindex[i] = nwords;
s_index = find_label("zs", labels);
u_index = find_label("zu", labels);
su_index = find_label("zsu", labels);
if (s_index >= 0 && s_index < fieldindex[i]) {
fieldindex[i] = s_index;
zflag = SCALE_WRAP;
}
if (u_index >= 0 && u_index < fieldindex[i]) {
fieldindex[i] = u_index;
zflag = NOSCALE_NOWRAP;
}
if (su_index >= 0 && su_index < fieldindex[i]) {
fieldindex[i] = su_index;
zflag = SCALE_NOWRAP;
}
}
if (fieldindex[i] == nwords)
fieldindex[i] = -1;
} else if (fieldtype[i] == VX)
fieldindex[i] = find_label("vx", labels);
else if (fieldtype[i] == VY)
fieldindex[i] = find_label("vy", labels);
else if (fieldtype[i] == VZ)
fieldindex[i] = find_label("vz", labels);
else if (fieldtype[i] == FX)
fieldindex[i] = find_label("fx", labels);
else if (fieldtype[i] == FY)
fieldindex[i] = find_label("fy", labels);
else if (fieldtype[i] == FZ)
fieldindex[i] = find_label("fz", labels);
else if (fieldtype[i] == Q)
fieldindex[i] = find_label("q", labels);
else if (fieldtype[i] == IX)
fieldindex[i] = find_label("ix", labels);
else if (fieldtype[i] == IY)
fieldindex[i] = find_label("iy", labels);
else if (fieldtype[i] == IZ)
fieldindex[i] = find_label("iz", labels);
}
// set fieldflag = -1 if any unfound fields
fieldflag = 0;
for (int i = 0; i < nfield; i++)
if (fieldindex[i] < 0)
fieldflag = -1;
return nAtoms;
}
/* ----------------------------------------------------------------------
read N atom lines from dump file
stores appropriate values in fields array
return 0 if success, 1 if error
only called by proc 0
------------------------------------------------------------------------- */
void ReaderADIOS::read_atoms(int n, int nfield, double **fields)
{
char str[1024];
/* Read Atoms */
/* This is the firsts (and last) read of array data, so we
* call EndStep() here instead of PerformGets()
*/
adios2::Variable<double> varAtoms =
internal->io.InquireVariable<double>("atoms");
if (n != nAtoms) {
snprintf(
str, sizeof(str),
"ReaderADIOS::read_atoms() expects 'n=%d' equal to the number of "
"atoms (=%" PRIu64 ") for process %d in ADIOS file %s.",
n, nAtoms, comm->me, internal->fh.Name().c_str());
error->one(FLERR, str);
}
size_t ncols = varAtoms.Count()[1];
varAtoms.SetSelection({{atomOffset, 0}, {nAtoms, ncols}});
std::vector<double> table;
internal->fh.Get<double>(varAtoms, table);
// EndStep or PerformGets required to make the read happen
internal->fh.EndStep();
size_t idx;
for (int i = 0; i < nAtoms; i++) {
idx = i * ncols;
for (int m = 0; m < nfield; m++) {
fields[i][m] = table[idx + fieldindex[m]];
}
}
}
/* ----------------------------------------------------------------------
match label to any of N labels
return index of match or -1 if no match
------------------------------------------------------------------------- */
int ReaderADIOS::find_label(const std::string &label,
const std::map<std::string, int> &labels)
{
std::map<std::string, int>::const_iterator it = labels.find(label);
if (it != labels.end()) {
return it->second;
}
return -1;
}

View File

@ -0,0 +1,72 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
Contributed by Norbert Podhorszki (Oak Ridge National Laboratory)
------------------------------------------------------------------------- */
#ifdef READER_CLASS
ReaderStyle(adios, ReaderADIOS)
#else
#ifndef LMP_READER_ADIOS_H
#define LMP_READER_ADIOS_H
#include "reader.h"
#include <map>
#include <string>
#include <vector>
namespace LAMMPS_NS
{
class ReadADIOSInternal;
class ReaderADIOS : public Reader
{
public:
ReaderADIOS(class LAMMPS *);
virtual ~ReaderADIOS();
virtual void settings(int, char **);
virtual int read_time(bigint &);
virtual void skip();
virtual bigint read_header(double[3][3], int &, int &, int, int, int *,
char **, int, int, int &, int &, int &, int &);
virtual void read_atoms(int, int, double **);
virtual void open_file(const char *);
virtual void close_file();
private:
int *fieldindex; // mapping of input fields to dump
uint64_t nAtomsTotal; // current number of atoms in entire dump step
uint64_t nAtoms; // current number of atoms for this process
// (Sum(nAtoms)=nAtomsTotal)
uint64_t atomOffset; // starting atom position for this process to read
bigint nstep; // current (time) step number
bigint nid; // current atom id.
int me;
ReadADIOSInternal *internal;
int find_label(const std::string &label,
const std::map<std::string, int> &labels);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -74,6 +74,8 @@ ReadDump::ReadDump(LAMMPS *lmp) : Pointers(lmp)
readers = NULL;
nsnapatoms = NULL;
clustercomm = MPI_COMM_NULL;
filereader = 0;
parallel = 0;
}
/* ---------------------------------------------------------------------- */
@ -263,6 +265,12 @@ void ReadDump::setup_reader(int narg, char **arg)
else error->all(FLERR,utils::check_packages_for_style("reader",readerstyle,lmp).c_str());
if (utils::strmatch(readerstyle,"^adios")) {
// everyone is a reader with adios
parallel = 1;
filereader = 1;
}
// pass any arguments to readers
if (narg > 0 && filereader)
@ -284,7 +292,7 @@ bigint ReadDump::seek(bigint nrequest, int exact)
// proc 0 finds the timestep in its first reader
if (me == 0) {
if (me == 0 || parallel) {
// exit file loop when dump timestep >= nrequest
// or files exhausted
@ -317,10 +325,12 @@ bigint ReadDump::seek(bigint nrequest, int exact)
if (exact && ntimestep != nrequest) ntimestep = -1;
}
// proc 0 broadcasts timestep and currentfile to all procs
if (!parallel) {
// proc 0 broadcasts timestep and currentfile to all procs
MPI_Bcast(&ntimestep,1,MPI_LMP_BIGINT,0,world);
MPI_Bcast(&currentfile,1,MPI_INT,0,world);
MPI_Bcast(&ntimestep,1,MPI_LMP_BIGINT,0,world);
MPI_Bcast(&currentfile,1,MPI_INT,0,world);
}
// if ntimestep < 0:
// all filereader procs close all their files and return
@ -379,7 +389,7 @@ bigint ReadDump::next(bigint ncurrent, bigint nlast, int nevery, int nskip)
// proc 0 finds the timestep in its first reader
if (me == 0) {
if (me == 0 || parallel) {
// exit file loop when dump timestep matches all criteria
// or files exhausted
@ -425,10 +435,12 @@ bigint ReadDump::next(bigint ncurrent, bigint nlast, int nevery, int nskip)
if (ntimestep > nlast) ntimestep = -1;
}
// proc 0 broadcasts timestep and currentfile to all procs
if (!parallel) {
// proc 0 broadcasts timestep and currentfile to all procs
MPI_Bcast(&ntimestep,1,MPI_LMP_BIGINT,0,world);
MPI_Bcast(&currentfile,1,MPI_INT,0,world);
MPI_Bcast(&ntimestep,1,MPI_LMP_BIGINT,0,world);
MPI_Bcast(&currentfile,1,MPI_INT,0,world);
}
// if ntimestep < 0:
// all filereader procs close all their files and return
@ -489,10 +501,12 @@ void ReadDump::header(int fieldinfo)
xflag,yflag,zflag);
}
MPI_Bcast(nsnapatoms,nreader,MPI_LMP_BIGINT,0,clustercomm);
MPI_Bcast(&boxinfo,1,MPI_INT,0,clustercomm);
MPI_Bcast(&triclinic_snap,1,MPI_INT,0,clustercomm);
MPI_Bcast(&box[0][0],9,MPI_DOUBLE,0,clustercomm);
if (!parallel) {
MPI_Bcast(nsnapatoms,nreader,MPI_LMP_BIGINT,0,clustercomm);
MPI_Bcast(&boxinfo,1,MPI_INT,0,clustercomm);
MPI_Bcast(&triclinic_snap,1,MPI_INT,0,clustercomm);
MPI_Bcast(&box[0][0],9,MPI_DOUBLE,0,clustercomm);
}
// local copy of snapshot box parameters
// used in xfield,yfield,zfield when converting dump atom to absolute coords
@ -715,7 +729,7 @@ void ReadDump::read_atoms()
// each reading proc reads one file and splits data across cluster
// cluster can be all procs or a subset
if (!multiproc || multiproc_nfile < nprocs) {
if (!parallel && (!multiproc || multiproc_nfile < nprocs)) {
nsnap = nsnapatoms[0];
if (filereader) {
@ -787,7 +801,7 @@ void ReadDump::read_atoms()
// every proc is a filereader, reads one or more files
// each proc keeps all data it reads, no communication required
} else if (multiproc_nfile >= nprocs) {
} else if (multiproc_nfile >= nprocs || parallel) {
bigint sum = 0;
for (int i = 0; i < nreader; i++)
sum += nsnapatoms[i];
@ -805,7 +819,12 @@ void ReadDump::read_atoms()
nsnap = nsnapatoms[i];
ntotal = 0;
while (ntotal < nsnap) {
nread = MIN(CHUNK,nsnap-ntotal);
if (parallel) {
// read the whole thing at once
nread = nsnap-ntotal;
} else {
nread = MIN(CHUNK,nsnap-ntotal);
}
readers[i]->read_atoms(nread,nfield,&fields[nnew+ntotal]);
ntotal += nread;
}

View File

@ -58,6 +58,7 @@ private:
int firstfile; // index of 1st dump file my cluster reads
// (0 to multiproc_nfile-1)
int filereader; // 1 if this proc reads from a dump file(s)
int parallel; // 1 if parallel reading (e.g. via ADIOS2)
int dimension; // same as in Domain
int triclinic;